github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/blocks_fetcher_test.go (about)

     1  package initialsync
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sort"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/kevinms/leakybucket-go"
    12  	core "github.com/libp2p/go-libp2p-core"
    13  	"github.com/libp2p/go-libp2p-core/network"
    14  	types "github.com/prysmaticlabs/eth2-types"
    15  	mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
    16  	"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
    17  	dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
    18  	p2pm "github.com/prysmaticlabs/prysm/beacon-chain/p2p"
    19  	p2pt "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
    20  	beaconsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
    21  	"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
    22  	p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
    23  	eth "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
    24  	"github.com/prysmaticlabs/prysm/proto/eth/v1alpha1/wrapper"
    25  	"github.com/prysmaticlabs/prysm/proto/interfaces"
    26  	"github.com/prysmaticlabs/prysm/shared/params"
    27  	"github.com/prysmaticlabs/prysm/shared/sliceutil"
    28  	"github.com/prysmaticlabs/prysm/shared/testutil"
    29  	"github.com/prysmaticlabs/prysm/shared/testutil/assert"
    30  	"github.com/prysmaticlabs/prysm/shared/testutil/require"
    31  	"github.com/sirupsen/logrus"
    32  	logTest "github.com/sirupsen/logrus/hooks/test"
    33  )
    34  
    35  func TestBlocksFetcher_InitStartStop(t *testing.T) {
    36  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
    37  
    38  	ctx, cancel := context.WithCancel(context.Background())
    39  	defer cancel()
    40  	fetcher := newBlocksFetcher(
    41  		ctx,
    42  		&blocksFetcherConfig{
    43  			chain: mc,
    44  			p2p:   p2p,
    45  		},
    46  	)
    47  
    48  	t.Run("check for leaked goroutines", func(t *testing.T) {
    49  		err := fetcher.start()
    50  		require.NoError(t, err)
    51  		fetcher.stop() // should block up until all resources are reclaimed
    52  		select {
    53  		case <-fetcher.requestResponses():
    54  		default:
    55  			t.Error("fetchResponses channel is leaked")
    56  		}
    57  	})
    58  
    59  	t.Run("re-starting of stopped fetcher", func(t *testing.T) {
    60  		assert.ErrorContains(t, errFetcherCtxIsDone.Error(), fetcher.start())
    61  	})
    62  
    63  	t.Run("multiple stopping attempts", func(t *testing.T) {
    64  		fetcher := newBlocksFetcher(
    65  			context.Background(),
    66  			&blocksFetcherConfig{
    67  				chain: mc,
    68  				p2p:   p2p,
    69  			})
    70  		require.NoError(t, fetcher.start())
    71  		fetcher.stop()
    72  		fetcher.stop()
    73  	})
    74  
    75  	t.Run("cancellation", func(t *testing.T) {
    76  		ctx, cancel := context.WithCancel(context.Background())
    77  		fetcher := newBlocksFetcher(
    78  			ctx,
    79  			&blocksFetcherConfig{
    80  				chain: mc,
    81  				p2p:   p2p,
    82  			})
    83  		require.NoError(t, fetcher.start())
    84  		cancel()
    85  		fetcher.stop()
    86  	})
    87  
    88  	t.Run("peer filter capacity weight", func(t *testing.T) {
    89  		ctx, cancel := context.WithCancel(context.Background())
    90  		defer cancel()
    91  		fetcher := newBlocksFetcher(
    92  			ctx,
    93  			&blocksFetcherConfig{
    94  				chain:                    mc,
    95  				p2p:                      p2p,
    96  				peerFilterCapacityWeight: 2,
    97  			})
    98  		require.NoError(t, fetcher.start())
    99  		assert.Equal(t, peerFilterCapacityWeight, fetcher.capacityWeight)
   100  	})
   101  }
   102  
   103  func TestBlocksFetcher_RoundRobin(t *testing.T) {
   104  	slotsInBatch := types.Slot(flags.Get().BlockBatchLimit)
   105  	requestsGenerator := func(start, end, batchSize types.Slot) []*fetchRequestParams {
   106  		var requests []*fetchRequestParams
   107  		for i := start; i <= end; i += batchSize {
   108  			requests = append(requests, &fetchRequestParams{
   109  				start: i,
   110  				count: uint64(batchSize),
   111  			})
   112  		}
   113  		return requests
   114  	}
   115  	tests := []struct {
   116  		name               string
   117  		expectedBlockSlots []types.Slot
   118  		peers              []*peerData
   119  		requests           []*fetchRequestParams
   120  	}{
   121  		{
   122  			name:               "Single peer with all blocks",
   123  			expectedBlockSlots: makeSequence(1, 3*slotsInBatch),
   124  			peers: []*peerData{
   125  				{
   126  					blocks:         makeSequence(1, 3*slotsInBatch),
   127  					finalizedEpoch: helpers.SlotToEpoch(3 * slotsInBatch),
   128  					headSlot:       3 * slotsInBatch,
   129  				},
   130  			},
   131  			requests: requestsGenerator(1, 3*slotsInBatch, slotsInBatch),
   132  		},
   133  		{
   134  			name:               "Single peer with all blocks (many small requests)",
   135  			expectedBlockSlots: makeSequence(1, 3*slotsInBatch),
   136  			peers: []*peerData{
   137  				{
   138  					blocks:         makeSequence(1, 3*slotsInBatch),
   139  					finalizedEpoch: helpers.SlotToEpoch(3 * slotsInBatch),
   140  					headSlot:       3 * slotsInBatch,
   141  				},
   142  			},
   143  			requests: requestsGenerator(1, 3*slotsInBatch, slotsInBatch/4),
   144  		},
   145  		{
   146  			name:               "Multiple peers with all blocks",
   147  			expectedBlockSlots: makeSequence(1, 3*slotsInBatch),
   148  			peers: []*peerData{
   149  				{
   150  					blocks:         makeSequence(1, 3*slotsInBatch),
   151  					finalizedEpoch: helpers.SlotToEpoch(3 * slotsInBatch),
   152  					headSlot:       3 * slotsInBatch,
   153  				},
   154  				{
   155  					blocks:         makeSequence(1, 3*slotsInBatch),
   156  					finalizedEpoch: helpers.SlotToEpoch(3 * slotsInBatch),
   157  					headSlot:       3 * slotsInBatch,
   158  				},
   159  				{
   160  					blocks:         makeSequence(1, 3*slotsInBatch),
   161  					finalizedEpoch: helpers.SlotToEpoch(3 * slotsInBatch),
   162  					headSlot:       3 * slotsInBatch,
   163  				},
   164  			},
   165  			requests: requestsGenerator(1, 3*slotsInBatch, slotsInBatch),
   166  		},
   167  		{
   168  			name:               "Multiple peers with skipped slots",
   169  			expectedBlockSlots: append(makeSequence(1, 64), makeSequence(500, 640)...), // up to 18th epoch
   170  			peers: []*peerData{
   171  				{
   172  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   173  					finalizedEpoch: 18,
   174  					headSlot:       640,
   175  				},
   176  				{
   177  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   178  					finalizedEpoch: 18,
   179  					headSlot:       640,
   180  				},
   181  				{
   182  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   183  					finalizedEpoch: 18,
   184  					headSlot:       640,
   185  				},
   186  				{
   187  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   188  					finalizedEpoch: 18,
   189  					headSlot:       640,
   190  				},
   191  				{
   192  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   193  					finalizedEpoch: 18,
   194  					headSlot:       640,
   195  				},
   196  			},
   197  			requests: []*fetchRequestParams{
   198  				{
   199  					start: 1,
   200  					count: uint64(slotsInBatch),
   201  				},
   202  				{
   203  					start: slotsInBatch + 1,
   204  					count: uint64(slotsInBatch),
   205  				},
   206  				{
   207  					start: 2*slotsInBatch + 1,
   208  					count: uint64(slotsInBatch),
   209  				},
   210  				{
   211  					start: 500,
   212  					count: 53,
   213  				},
   214  				{
   215  					start: 553,
   216  					count: 200,
   217  				},
   218  			},
   219  		},
   220  		{
   221  			name:               "Multiple peers with failures",
   222  			expectedBlockSlots: makeSequence(1, 2*slotsInBatch),
   223  			peers: []*peerData{
   224  				{
   225  					blocks:         makeSequence(1, 320),
   226  					finalizedEpoch: 8,
   227  					headSlot:       320,
   228  				},
   229  				{
   230  					blocks:         makeSequence(1, 320),
   231  					finalizedEpoch: 8,
   232  					headSlot:       320,
   233  					failureSlots:   makeSequence(1, 32), // first epoch
   234  				},
   235  				{
   236  					blocks:         makeSequence(1, 320),
   237  					finalizedEpoch: 8,
   238  					headSlot:       320,
   239  				},
   240  				{
   241  					blocks:         makeSequence(1, 320),
   242  					finalizedEpoch: 8,
   243  					headSlot:       320,
   244  				},
   245  			},
   246  			requests: []*fetchRequestParams{
   247  				{
   248  					start: 1,
   249  					count: uint64(slotsInBatch),
   250  				},
   251  				{
   252  					start: slotsInBatch + 1,
   253  					count: uint64(slotsInBatch),
   254  				},
   255  			},
   256  		},
   257  	}
   258  
   259  	for _, tt := range tests {
   260  		t.Run(tt.name, func(t *testing.T) {
   261  			cache.initializeRootCache(tt.expectedBlockSlots, t)
   262  
   263  			beaconDB := dbtest.SetupDB(t)
   264  
   265  			p := p2pt.NewTestP2P(t)
   266  			connectPeers(t, p, tt.peers, p.Peers())
   267  			cache.RLock()
   268  			genesisRoot := cache.rootCache[0]
   269  			cache.RUnlock()
   270  
   271  			err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()))
   272  			require.NoError(t, err)
   273  
   274  			st, err := testutil.NewBeaconState()
   275  			require.NoError(t, err)
   276  
   277  			mc := &mock.ChainService{
   278  				State: st,
   279  				Root:  genesisRoot[:],
   280  				DB:    beaconDB,
   281  				FinalizedCheckPoint: &eth.Checkpoint{
   282  					Epoch: 0,
   283  				},
   284  			}
   285  
   286  			ctx, cancel := context.WithCancel(context.Background())
   287  			fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   288  				chain: mc,
   289  				p2p:   p,
   290  			})
   291  			require.NoError(t, fetcher.start())
   292  
   293  			var wg sync.WaitGroup
   294  			wg.Add(len(tt.requests)) // how many block requests we are going to make
   295  			go func() {
   296  				wg.Wait()
   297  				log.Debug("Stopping fetcher")
   298  				fetcher.stop()
   299  			}()
   300  
   301  			processFetchedBlocks := func() ([]interfaces.SignedBeaconBlock, error) {
   302  				defer cancel()
   303  				var unionRespBlocks []interfaces.SignedBeaconBlock
   304  
   305  				for {
   306  					select {
   307  					case resp, ok := <-fetcher.requestResponses():
   308  						if !ok { // channel closed, aggregate
   309  							return unionRespBlocks, nil
   310  						}
   311  
   312  						if resp.err != nil {
   313  							log.WithError(resp.err).Debug("Block fetcher returned error")
   314  						} else {
   315  							unionRespBlocks = append(unionRespBlocks, resp.blocks...)
   316  							if len(resp.blocks) == 0 {
   317  								log.WithFields(logrus.Fields{
   318  									"start": resp.start,
   319  									"count": resp.count,
   320  								}).Debug("Received empty slot")
   321  							}
   322  						}
   323  
   324  						wg.Done()
   325  					case <-ctx.Done():
   326  						log.Debug("Context closed, exiting goroutine")
   327  						return unionRespBlocks, nil
   328  					}
   329  				}
   330  			}
   331  
   332  			maxExpectedBlocks := uint64(0)
   333  			for _, requestParams := range tt.requests {
   334  				err = fetcher.scheduleRequest(context.Background(), requestParams.start, requestParams.count)
   335  				assert.NoError(t, err)
   336  				maxExpectedBlocks += requestParams.count
   337  			}
   338  
   339  			blocks, err := processFetchedBlocks()
   340  			assert.NoError(t, err)
   341  
   342  			sort.Slice(blocks, func(i, j int) bool {
   343  				return blocks[i].Block().Slot() < blocks[j].Block().Slot()
   344  			})
   345  
   346  			slots := make([]types.Slot, len(blocks))
   347  			for i, block := range blocks {
   348  				slots[i] = block.Block().Slot()
   349  			}
   350  
   351  			log.WithFields(logrus.Fields{
   352  				"blocksLen": len(blocks),
   353  				"slots":     slots,
   354  			}).Debug("Finished block fetching")
   355  
   356  			if len(blocks) > int(maxExpectedBlocks) {
   357  				t.Errorf("Too many blocks returned. Wanted %d got %d", maxExpectedBlocks, len(blocks))
   358  			}
   359  			assert.Equal(t, len(tt.expectedBlockSlots), len(blocks), "Processes wrong number of blocks")
   360  			var receivedBlockSlots []types.Slot
   361  			for _, blk := range blocks {
   362  				receivedBlockSlots = append(receivedBlockSlots, blk.Block().Slot())
   363  			}
   364  			missing := sliceutil.NotSlot(sliceutil.IntersectionSlot(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots)
   365  			if len(missing) > 0 {
   366  				t.Errorf("Missing blocks at slots %v", missing)
   367  			}
   368  		})
   369  	}
   370  }
   371  
   372  func TestBlocksFetcher_scheduleRequest(t *testing.T) {
   373  	blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
   374  	t.Run("context cancellation", func(t *testing.T) {
   375  		ctx, cancel := context.WithCancel(context.Background())
   376  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
   377  		cancel()
   378  		assert.ErrorContains(t, "context canceled", fetcher.scheduleRequest(ctx, 1, blockBatchLimit))
   379  	})
   380  
   381  	t.Run("unblock on context cancellation", func(t *testing.T) {
   382  		fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{})
   383  		for i := 0; i < maxPendingRequests; i++ {
   384  			assert.NoError(t, fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit))
   385  		}
   386  
   387  		// Will block on next request (and wait until requests are either processed or context is closed).
   388  		go func() {
   389  			fetcher.cancel()
   390  		}()
   391  		assert.ErrorContains(t, errFetcherCtxIsDone.Error(),
   392  			fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit))
   393  	})
   394  }
   395  func TestBlocksFetcher_handleRequest(t *testing.T) {
   396  	blockBatchLimit := flags.Get().BlockBatchLimit
   397  	chainConfig := struct {
   398  		expectedBlockSlots []types.Slot
   399  		peers              []*peerData
   400  	}{
   401  		expectedBlockSlots: makeSequence(1, types.Slot(blockBatchLimit)),
   402  		peers: []*peerData{
   403  			{
   404  				blocks:         makeSequence(1, 320),
   405  				finalizedEpoch: 8,
   406  				headSlot:       320,
   407  			},
   408  			{
   409  				blocks:         makeSequence(1, 320),
   410  				finalizedEpoch: 8,
   411  				headSlot:       320,
   412  			},
   413  		},
   414  	}
   415  
   416  	mc, p2p, _ := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
   417  
   418  	t.Run("context cancellation", func(t *testing.T) {
   419  		ctx, cancel := context.WithCancel(context.Background())
   420  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   421  			chain: mc,
   422  			p2p:   p2p,
   423  		})
   424  
   425  		cancel()
   426  		response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit))
   427  		assert.ErrorContains(t, "context canceled", response.err)
   428  	})
   429  
   430  	t.Run("receive blocks", func(t *testing.T) {
   431  		ctx, cancel := context.WithCancel(context.Background())
   432  		defer cancel()
   433  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   434  			chain: mc,
   435  			p2p:   p2p,
   436  		})
   437  
   438  		requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
   439  		defer reqCancel()
   440  		go func() {
   441  			response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */)
   442  			select {
   443  			case <-ctx.Done():
   444  			case fetcher.fetchResponses <- response:
   445  			}
   446  		}()
   447  
   448  		var blocks []interfaces.SignedBeaconBlock
   449  		select {
   450  		case <-ctx.Done():
   451  			t.Error(ctx.Err())
   452  		case resp := <-fetcher.requestResponses():
   453  			if resp.err != nil {
   454  				t.Error(resp.err)
   455  			} else {
   456  				blocks = resp.blocks
   457  			}
   458  		}
   459  		if uint64(len(blocks)) != uint64(blockBatchLimit) {
   460  			t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
   461  		}
   462  
   463  		var receivedBlockSlots []types.Slot
   464  		for _, blk := range blocks {
   465  			receivedBlockSlots = append(receivedBlockSlots, blk.Block().Slot())
   466  		}
   467  		missing := sliceutil.NotSlot(sliceutil.IntersectionSlot(chainConfig.expectedBlockSlots, receivedBlockSlots), chainConfig.expectedBlockSlots)
   468  		if len(missing) > 0 {
   469  			t.Errorf("Missing blocks at slots %v", missing)
   470  		}
   471  	})
   472  }
   473  
   474  func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
   475  	blockBatchLimit := flags.Get().BlockBatchLimit
   476  	chainConfig := struct {
   477  		expectedBlockSlots []types.Slot
   478  		peers              []*peerData
   479  	}{
   480  		expectedBlockSlots: makeSequence(1, 320),
   481  		peers: []*peerData{
   482  			{
   483  				blocks:         makeSequence(1, 320),
   484  				finalizedEpoch: 8,
   485  				headSlot:       320,
   486  			},
   487  			{
   488  				blocks:         makeSequence(1, 320),
   489  				finalizedEpoch: 8,
   490  				headSlot:       320,
   491  			},
   492  		},
   493  	}
   494  
   495  	mc, p2p, _ := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
   496  	ctx, cancel := context.WithCancel(context.Background())
   497  	defer cancel()
   498  
   499  	fetcher := newBlocksFetcher(
   500  		ctx,
   501  		&blocksFetcherConfig{
   502  			chain: mc,
   503  			p2p:   p2p,
   504  		})
   505  
   506  	_, peerIDs := p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, helpers.SlotToEpoch(mc.HeadSlot()))
   507  	req := &p2ppb.BeaconBlocksByRangeRequest{
   508  		StartSlot: 1,
   509  		Step:      1,
   510  		Count:     uint64(blockBatchLimit),
   511  	}
   512  	blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0])
   513  	assert.NoError(t, err)
   514  	assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned")
   515  
   516  	// Test context cancellation.
   517  	ctx, cancel = context.WithCancel(context.Background())
   518  	cancel()
   519  	_, err = fetcher.requestBlocks(ctx, req, peerIDs[0])
   520  	assert.ErrorContains(t, "context canceled", err)
   521  }
   522  
   523  func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
   524  	p1 := p2pt.NewTestP2P(t)
   525  	p2 := p2pt.NewTestP2P(t)
   526  	p3 := p2pt.NewTestP2P(t)
   527  	p1.Connect(p2)
   528  	p1.Connect(p3)
   529  	require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
   530  	req := &p2ppb.BeaconBlocksByRangeRequest{
   531  		StartSlot: 100,
   532  		Step:      1,
   533  		Count:     64,
   534  	}
   535  
   536  	topic := p2pm.RPCBlocksByRangeTopicV1
   537  	protocol := core.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
   538  	streamHandlerFn := func(stream network.Stream) {
   539  		assert.NoError(t, stream.Close())
   540  	}
   541  	p2.BHost.SetStreamHandler(protocol, streamHandlerFn)
   542  	p3.BHost.SetStreamHandler(protocol, streamHandlerFn)
   543  
   544  	burstFactor := uint64(flags.Get().BlockBatchLimitBurstFactor)
   545  
   546  	ctx, cancel := context.WithCancel(context.Background())
   547  	defer cancel()
   548  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
   549  	fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), false)
   550  
   551  	hook := logTest.NewGlobal()
   552  	wg := new(sync.WaitGroup)
   553  	wg.Add(1)
   554  	go func() {
   555  		// Exhaust available rate for p2, so that rate limiting is triggered.
   556  		for i := uint64(0); i <= burstFactor; i++ {
   557  			if i == burstFactor {
   558  				// The next request will trigger rate limiting for p2. Now, allow concurrent
   559  				// p3 data request (p3 shouldn't be rate limited).
   560  				time.AfterFunc(1*time.Second, func() {
   561  					wg.Done()
   562  				})
   563  			}
   564  			_, err := fetcher.requestBlocks(ctx, req, p2.PeerID())
   565  			if err != nil {
   566  				assert.ErrorContains(t, errFetcherCtxIsDone.Error(), err)
   567  			}
   568  		}
   569  	}()
   570  
   571  	// Wait until p2 exhausts its rate and is spinning on rate limiting timer.
   572  	wg.Wait()
   573  
   574  	// The next request should NOT trigger rate limiting as rate is exhausted for p2, not p3.
   575  	ch := make(chan struct{}, 1)
   576  	go func() {
   577  		_, err := fetcher.requestBlocks(ctx, req, p3.PeerID())
   578  		assert.NoError(t, err)
   579  		ch <- struct{}{}
   580  	}()
   581  	timer := time.NewTimer(2 * time.Second)
   582  	select {
   583  	case <-timer.C:
   584  		t.Error("p3 takes too long to respond: lock contention")
   585  	case <-ch:
   586  		// p3 responded w/o waiting for rate limiter's lock (on which p2 spins).
   587  	}
   588  	// Make sure that p2 has been rate limited.
   589  	require.LogsContain(t, hook, fmt.Sprintf("msg=\"Slowing down for rate limit\" peer=%s", p2.PeerID()))
   590  }
   591  
   592  func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
   593  	p1 := p2pt.NewTestP2P(t)
   594  	tests := []struct {
   595  		name         string
   596  		req          *p2ppb.BeaconBlocksByRangeRequest
   597  		handlerGenFn func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream)
   598  		wantedErr    string
   599  		validate     func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock)
   600  	}{
   601  		{
   602  			name: "no error",
   603  			req: &p2ppb.BeaconBlocksByRangeRequest{
   604  				StartSlot: 100,
   605  				Step:      4,
   606  				Count:     64,
   607  			},
   608  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   609  				return func(stream network.Stream) {
   610  					for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
   611  						blk := testutil.NewBeaconBlock()
   612  						blk.Block.Slot = i
   613  						assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   614  					}
   615  					assert.NoError(t, stream.Close())
   616  				}
   617  			},
   618  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   619  				assert.Equal(t, req.Count, uint64(len(blocks)))
   620  			},
   621  		},
   622  		{
   623  			name: "too many blocks",
   624  			req: &p2ppb.BeaconBlocksByRangeRequest{
   625  				StartSlot: 100,
   626  				Step:      1,
   627  				Count:     64,
   628  			},
   629  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   630  				return func(stream network.Stream) {
   631  					for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step+1); i += types.Slot(req.Step) {
   632  						blk := testutil.NewBeaconBlock()
   633  						blk.Block.Slot = i
   634  						assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   635  					}
   636  					assert.NoError(t, stream.Close())
   637  				}
   638  			},
   639  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   640  				assert.Equal(t, 0, len(blocks))
   641  			},
   642  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   643  		},
   644  		{
   645  			name: "not in a consecutive order",
   646  			req: &p2ppb.BeaconBlocksByRangeRequest{
   647  				StartSlot: 100,
   648  				Step:      1,
   649  				Count:     64,
   650  			},
   651  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   652  				return func(stream network.Stream) {
   653  					blk := testutil.NewBeaconBlock()
   654  					blk.Block.Slot = 163
   655  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   656  
   657  					blk = testutil.NewBeaconBlock()
   658  					blk.Block.Slot = 162
   659  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   660  					assert.NoError(t, stream.Close())
   661  				}
   662  			},
   663  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   664  				assert.Equal(t, 0, len(blocks))
   665  			},
   666  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   667  		},
   668  		{
   669  			name: "same slot number",
   670  			req: &p2ppb.BeaconBlocksByRangeRequest{
   671  				StartSlot: 100,
   672  				Step:      1,
   673  				Count:     64,
   674  			},
   675  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   676  				return func(stream network.Stream) {
   677  					blk := testutil.NewBeaconBlock()
   678  					blk.Block.Slot = 160
   679  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   680  
   681  					blk = testutil.NewBeaconBlock()
   682  					blk.Block.Slot = 160
   683  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   684  					assert.NoError(t, stream.Close())
   685  				}
   686  			},
   687  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   688  				assert.Equal(t, 0, len(blocks))
   689  			},
   690  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   691  		},
   692  		{
   693  			name: "slot is too low",
   694  			req: &p2ppb.BeaconBlocksByRangeRequest{
   695  				StartSlot: 100,
   696  				Step:      1,
   697  				Count:     64,
   698  			},
   699  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   700  				return func(stream network.Stream) {
   701  					defer func() {
   702  						assert.NoError(t, stream.Close())
   703  					}()
   704  					for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
   705  						blk := testutil.NewBeaconBlock()
   706  						// Patch mid block, with invalid slot number.
   707  						if i == req.StartSlot.Add(req.Count*req.Step/2) {
   708  							blk.Block.Slot = req.StartSlot - 1
   709  							assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   710  							break
   711  						} else {
   712  							blk.Block.Slot = i
   713  							assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   714  						}
   715  					}
   716  				}
   717  			},
   718  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   719  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   720  				assert.Equal(t, 0, len(blocks))
   721  			},
   722  		},
   723  		{
   724  			name: "slot is too high",
   725  			req: &p2ppb.BeaconBlocksByRangeRequest{
   726  				StartSlot: 100,
   727  				Step:      1,
   728  				Count:     64,
   729  			},
   730  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   731  				return func(stream network.Stream) {
   732  					defer func() {
   733  						assert.NoError(t, stream.Close())
   734  					}()
   735  					for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
   736  						blk := testutil.NewBeaconBlock()
   737  						// Patch mid block, with invalid slot number.
   738  						if i == req.StartSlot.Add(req.Count*req.Step/2) {
   739  							blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step)
   740  							assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   741  							break
   742  						} else {
   743  							blk.Block.Slot = i
   744  							assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   745  						}
   746  					}
   747  				}
   748  			},
   749  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   750  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   751  				assert.Equal(t, 0, len(blocks))
   752  			},
   753  		},
   754  		{
   755  			name: "valid step increment",
   756  			req: &p2ppb.BeaconBlocksByRangeRequest{
   757  				StartSlot: 100,
   758  				Step:      5,
   759  				Count:     64,
   760  			},
   761  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   762  				return func(stream network.Stream) {
   763  					blk := testutil.NewBeaconBlock()
   764  					blk.Block.Slot = 100
   765  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   766  
   767  					blk = testutil.NewBeaconBlock()
   768  					blk.Block.Slot = 105
   769  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   770  					assert.NoError(t, stream.Close())
   771  				}
   772  			},
   773  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   774  				assert.Equal(t, 2, len(blocks))
   775  			},
   776  		},
   777  		{
   778  			name: "invalid step increment",
   779  			req: &p2ppb.BeaconBlocksByRangeRequest{
   780  				StartSlot: 100,
   781  				Step:      5,
   782  				Count:     64,
   783  			},
   784  			handlerGenFn: func(req *p2ppb.BeaconBlocksByRangeRequest) func(stream network.Stream) {
   785  				return func(stream network.Stream) {
   786  					blk := testutil.NewBeaconBlock()
   787  					blk.Block.Slot = 100
   788  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   789  
   790  					blk = testutil.NewBeaconBlock()
   791  					blk.Block.Slot = 103
   792  					assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
   793  					assert.NoError(t, stream.Close())
   794  				}
   795  			},
   796  			validate: func(req *p2ppb.BeaconBlocksByRangeRequest, blocks []interfaces.SignedBeaconBlock) {
   797  				assert.Equal(t, 0, len(blocks))
   798  			},
   799  			wantedErr: beaconsync.ErrInvalidFetchedData.Error(),
   800  		},
   801  	}
   802  
   803  	topic := p2pm.RPCBlocksByRangeTopicV1
   804  	protocol := core.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
   805  
   806  	ctx, cancel := context.WithCancel(context.Background())
   807  	defer cancel()
   808  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
   809  	fetcher.rateLimiter = leakybucket.NewCollector(0.000001, 640, false)
   810  
   811  	for _, tt := range tests {
   812  		t.Run(tt.name, func(t *testing.T) {
   813  			p2 := p2pt.NewTestP2P(t)
   814  			p1.Connect(p2)
   815  
   816  			p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
   817  			blocks, err := fetcher.requestBlocks(ctx, tt.req, p2.PeerID())
   818  			if tt.wantedErr != "" {
   819  				assert.ErrorContains(t, tt.wantedErr, err)
   820  			} else {
   821  				assert.NoError(t, err)
   822  				tt.validate(tt.req, blocks)
   823  			}
   824  		})
   825  	}
   826  }