github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/blocks_queue_test.go (about)

     1  package initialsync
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/kevinms/leakybucket-go"
    10  	"github.com/libp2p/go-libp2p-core/peer"
    11  	types "github.com/prysmaticlabs/eth2-types"
    12  	mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
    13  	"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
    14  	dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
    15  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
    16  	p2pt "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
    17  	beaconsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
    18  	"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
    19  	eth "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
    20  	"github.com/prysmaticlabs/prysm/proto/eth/v1alpha1/wrapper"
    21  	"github.com/prysmaticlabs/prysm/proto/interfaces"
    22  	"github.com/prysmaticlabs/prysm/shared/bytesutil"
    23  	"github.com/prysmaticlabs/prysm/shared/sliceutil"
    24  	"github.com/prysmaticlabs/prysm/shared/testutil"
    25  	"github.com/prysmaticlabs/prysm/shared/testutil/assert"
    26  	"github.com/prysmaticlabs/prysm/shared/testutil/require"
    27  	"github.com/prysmaticlabs/prysm/shared/timeutils"
    28  	logTest "github.com/sirupsen/logrus/hooks/test"
    29  )
    30  
    31  func TestBlocksQueue_InitStartStop(t *testing.T) {
    32  	blockBatchLimit := flags.Get().BlockBatchLimit
    33  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
    34  
    35  	ctx, cancel := context.WithCancel(context.Background())
    36  	defer cancel()
    37  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
    38  		chain: mc,
    39  		p2p:   p2p,
    40  	})
    41  
    42  	t.Run("stop without start", func(t *testing.T) {
    43  		ctx, cancel := context.WithCancel(ctx)
    44  		defer cancel()
    45  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
    46  			chain:               mc,
    47  			highestExpectedSlot: types.Slot(blockBatchLimit),
    48  		})
    49  		assert.ErrorContains(t, errQueueTakesTooLongToStop.Error(), queue.stop())
    50  	})
    51  
    52  	t.Run("use default fetcher", func(t *testing.T) {
    53  		ctx, cancel := context.WithCancel(ctx)
    54  		defer cancel()
    55  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
    56  			chain:               mc,
    57  			highestExpectedSlot: types.Slot(blockBatchLimit),
    58  		})
    59  		assert.NoError(t, queue.start())
    60  	})
    61  
    62  	t.Run("stop timeout", func(t *testing.T) {
    63  		ctx, cancel := context.WithCancel(ctx)
    64  		defer cancel()
    65  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
    66  			chain:               mc,
    67  			highestExpectedSlot: types.Slot(blockBatchLimit),
    68  		})
    69  		assert.NoError(t, queue.start())
    70  		assert.ErrorContains(t, errQueueTakesTooLongToStop.Error(), queue.stop())
    71  	})
    72  
    73  	t.Run("check for leaked goroutines", func(t *testing.T) {
    74  		ctx, cancel := context.WithCancel(ctx)
    75  		defer cancel()
    76  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
    77  			blocksFetcher:       fetcher,
    78  			chain:               mc,
    79  			highestExpectedSlot: types.Slot(blockBatchLimit),
    80  		})
    81  
    82  		assert.NoError(t, queue.start())
    83  		// Blocks up until all resources are reclaimed (or timeout is called)
    84  		assert.NoError(t, queue.stop())
    85  		select {
    86  		case <-queue.fetchedData:
    87  		default:
    88  			t.Error("queue.fetchedData channel is leaked")
    89  		}
    90  		select {
    91  		case <-fetcher.fetchResponses:
    92  		default:
    93  			t.Error("fetcher.fetchResponses channel is leaked")
    94  		}
    95  	})
    96  
    97  	t.Run("re-starting of stopped queue", func(t *testing.T) {
    98  		ctx, cancel := context.WithCancel(ctx)
    99  		defer cancel()
   100  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   101  			blocksFetcher:       fetcher,
   102  			chain:               mc,
   103  			highestExpectedSlot: types.Slot(blockBatchLimit),
   104  		})
   105  		assert.NoError(t, queue.start())
   106  		assert.NoError(t, queue.stop())
   107  		assert.ErrorContains(t, errQueueCtxIsDone.Error(), queue.start())
   108  	})
   109  
   110  	t.Run("multiple stopping attempts", func(t *testing.T) {
   111  		ctx, cancel := context.WithCancel(ctx)
   112  		defer cancel()
   113  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   114  			blocksFetcher:       fetcher,
   115  			chain:               mc,
   116  			highestExpectedSlot: types.Slot(blockBatchLimit),
   117  		})
   118  		assert.NoError(t, queue.start())
   119  		assert.NoError(t, queue.stop())
   120  		assert.NoError(t, queue.stop())
   121  	})
   122  
   123  	t.Run("cancellation", func(t *testing.T) {
   124  		ctx, cancel := context.WithCancel(context.Background())
   125  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   126  			blocksFetcher:       fetcher,
   127  			chain:               mc,
   128  			highestExpectedSlot: types.Slot(blockBatchLimit),
   129  		})
   130  		assert.NoError(t, queue.start())
   131  		cancel()
   132  		assert.NoError(t, queue.stop())
   133  	})
   134  }
   135  
   136  func TestBlocksQueue_Loop(t *testing.T) {
   137  	tests := []struct {
   138  		name                string
   139  		highestExpectedSlot types.Slot
   140  		expectedBlockSlots  []types.Slot
   141  		peers               []*peerData
   142  	}{
   143  		{
   144  			name:                "Single peer with all blocks",
   145  			highestExpectedSlot: 251, // will be auto-fixed to 256 (to 8th epoch), by queue
   146  			expectedBlockSlots:  makeSequence(1, 256),
   147  			peers: []*peerData{
   148  				{
   149  					blocks:         makeSequence(1, 320),
   150  					finalizedEpoch: 8,
   151  					headSlot:       320,
   152  				},
   153  			},
   154  		},
   155  		{
   156  			name:                "Multiple peers with all blocks",
   157  			highestExpectedSlot: 256,
   158  			expectedBlockSlots:  makeSequence(1, 256),
   159  			peers: []*peerData{
   160  				{
   161  					blocks:         makeSequence(1, 320),
   162  					finalizedEpoch: 8,
   163  					headSlot:       320,
   164  				},
   165  				{
   166  					blocks:         makeSequence(1, 320),
   167  					finalizedEpoch: 8,
   168  					headSlot:       320,
   169  				},
   170  				{
   171  					blocks:         makeSequence(1, 320),
   172  					finalizedEpoch: 8,
   173  					headSlot:       320,
   174  				},
   175  				{
   176  					blocks:         makeSequence(1, 320),
   177  					finalizedEpoch: 8,
   178  					headSlot:       320,
   179  				},
   180  				{
   181  					blocks:         makeSequence(1, 320),
   182  					finalizedEpoch: 8,
   183  					headSlot:       320,
   184  				},
   185  			},
   186  		},
   187  		{
   188  			name:                "Multiple peers with skipped slots",
   189  			highestExpectedSlot: 576,
   190  			expectedBlockSlots:  append(makeSequence(1, 64), makeSequence(500, 576)...), // up to 18th epoch
   191  			peers: []*peerData{
   192  				{
   193  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   194  					finalizedEpoch: 18,
   195  					headSlot:       640,
   196  				},
   197  				{
   198  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   199  					finalizedEpoch: 18,
   200  					headSlot:       640,
   201  				},
   202  				{
   203  					blocks:         append(makeSequence(1, 64), makeSequence(500, 640)...),
   204  					finalizedEpoch: 18,
   205  					headSlot:       640,
   206  				},
   207  			},
   208  		},
   209  		{
   210  			name:                "Multiple peers with failures",
   211  			highestExpectedSlot: 128,
   212  			expectedBlockSlots:  makeSequence(1, 256),
   213  			peers: []*peerData{
   214  				{
   215  					blocks:         makeSequence(1, 320),
   216  					finalizedEpoch: 8,
   217  					headSlot:       320,
   218  					failureSlots:   makeSequence(32*3+1, 32*3+32),
   219  				},
   220  				{
   221  					blocks:         makeSequence(1, 320),
   222  					finalizedEpoch: 8,
   223  					headSlot:       320,
   224  					failureSlots:   makeSequence(1, 32*3),
   225  				},
   226  				{
   227  					blocks:         makeSequence(1, 320),
   228  					finalizedEpoch: 8,
   229  					headSlot:       320,
   230  				},
   231  				{
   232  					blocks:         makeSequence(1, 320),
   233  					finalizedEpoch: 8,
   234  					headSlot:       320,
   235  				},
   236  			},
   237  		},
   238  	}
   239  
   240  	for _, tt := range tests {
   241  		t.Run(tt.name, func(t *testing.T) {
   242  			mc, p2p, beaconDB := initializeTestServices(t, tt.expectedBlockSlots, tt.peers)
   243  
   244  			ctx, cancel := context.WithCancel(context.Background())
   245  			defer cancel()
   246  
   247  			fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   248  				chain: mc,
   249  				p2p:   p2p,
   250  			})
   251  			queue := newBlocksQueue(ctx, &blocksQueueConfig{
   252  				blocksFetcher:       fetcher,
   253  				chain:               mc,
   254  				highestExpectedSlot: tt.highestExpectedSlot,
   255  			})
   256  			assert.NoError(t, queue.start())
   257  			processBlock := func(block interfaces.SignedBeaconBlock) error {
   258  				if !beaconDB.HasBlock(ctx, bytesutil.ToBytes32(block.Block().ParentRoot())) {
   259  					return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
   260  				}
   261  				root, err := block.Block().HashTreeRoot()
   262  				if err != nil {
   263  					return err
   264  				}
   265  				return mc.ReceiveBlock(ctx, block, root)
   266  			}
   267  
   268  			var blocks []interfaces.SignedBeaconBlock
   269  			for data := range queue.fetchedData {
   270  				for _, block := range data.blocks {
   271  					if err := processBlock(block); err != nil {
   272  						continue
   273  					}
   274  					blocks = append(blocks, block)
   275  				}
   276  			}
   277  
   278  			assert.NoError(t, queue.stop())
   279  
   280  			if queue.chain.HeadSlot() < tt.highestExpectedSlot {
   281  				t.Errorf("Not enough slots synced, want: %v, got: %v",
   282  					len(tt.expectedBlockSlots), queue.chain.HeadSlot())
   283  			}
   284  			assert.Equal(t, len(tt.expectedBlockSlots), len(blocks), "Processes wrong number of blocks")
   285  			var receivedBlockSlots []types.Slot
   286  			for _, blk := range blocks {
   287  				receivedBlockSlots = append(receivedBlockSlots, blk.Block().Slot())
   288  			}
   289  			missing := sliceutil.NotSlot(sliceutil.IntersectionSlot(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots)
   290  			if len(missing) > 0 {
   291  				t.Errorf("Missing blocks at slots %v", missing)
   292  			}
   293  		})
   294  	}
   295  }
   296  
   297  func TestBlocksQueue_onScheduleEvent(t *testing.T) {
   298  	blockBatchLimit := flags.Get().BlockBatchLimit
   299  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   300  
   301  	ctx, cancel := context.WithCancel(context.Background())
   302  	defer cancel()
   303  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   304  		chain: mc,
   305  		p2p:   p2p,
   306  	})
   307  
   308  	t.Run("expired context", func(t *testing.T) {
   309  		ctx, cancel := context.WithCancel(ctx)
   310  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   311  			blocksFetcher:       fetcher,
   312  			chain:               mc,
   313  			highestExpectedSlot: types.Slot(blockBatchLimit),
   314  		})
   315  		handlerFn := queue.onScheduleEvent(ctx)
   316  		cancel()
   317  		updatedState, err := handlerFn(&stateMachine{
   318  			state: stateNew,
   319  		}, nil)
   320  		assert.ErrorContains(t, context.Canceled.Error(), err)
   321  		assert.Equal(t, stateNew, updatedState)
   322  	})
   323  
   324  	t.Run("invalid input state", func(t *testing.T) {
   325  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   326  			blocksFetcher:       fetcher,
   327  			chain:               mc,
   328  			highestExpectedSlot: types.Slot(blockBatchLimit),
   329  		})
   330  
   331  		invalidStates := []stateID{stateScheduled, stateDataParsed, stateSkipped, stateSent}
   332  		for _, state := range invalidStates {
   333  			t.Run(state.String(), func(t *testing.T) {
   334  				handlerFn := queue.onScheduleEvent(ctx)
   335  				updatedState, err := handlerFn(&stateMachine{
   336  					state: state,
   337  				}, nil)
   338  				assert.ErrorContains(t, errInvalidInitialState.Error(), err)
   339  				assert.Equal(t, state, updatedState)
   340  			})
   341  		}
   342  	})
   343  
   344  	t.Run("slot is too high", func(t *testing.T) {
   345  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   346  			blocksFetcher:       fetcher,
   347  			chain:               mc,
   348  			highestExpectedSlot: types.Slot(blockBatchLimit),
   349  		})
   350  
   351  		handlerFn := queue.onScheduleEvent(ctx)
   352  		updatedState, err := handlerFn(&stateMachine{
   353  			state: stateNew,
   354  			start: queue.highestExpectedSlot + 1,
   355  		}, nil)
   356  		assert.ErrorContains(t, errSlotIsTooHigh.Error(), err)
   357  		assert.Equal(t, stateSkipped, updatedState)
   358  	})
   359  
   360  	t.Run("fetcher fails scheduling", func(t *testing.T) {
   361  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   362  			blocksFetcher:       fetcher,
   363  			chain:               mc,
   364  			highestExpectedSlot: types.Slot(blockBatchLimit),
   365  		})
   366  		// Cancel to make fetcher spit error when trying to schedule next FSM.
   367  		requestCtx, requestCtxCancel := context.WithCancel(context.Background())
   368  		requestCtxCancel()
   369  		handlerFn := queue.onScheduleEvent(requestCtx)
   370  		updatedState, err := handlerFn(&stateMachine{
   371  			state: stateNew,
   372  		}, nil)
   373  		assert.ErrorContains(t, context.Canceled.Error(), err)
   374  		assert.Equal(t, stateNew, updatedState)
   375  	})
   376  
   377  	t.Run("schedule next fetch ok", func(t *testing.T) {
   378  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   379  			blocksFetcher:       fetcher,
   380  			chain:               mc,
   381  			highestExpectedSlot: types.Slot(blockBatchLimit),
   382  		})
   383  		handlerFn := queue.onScheduleEvent(ctx)
   384  		updatedState, err := handlerFn(&stateMachine{
   385  			state: stateNew,
   386  		}, nil)
   387  		assert.NoError(t, err)
   388  		assert.Equal(t, stateScheduled, updatedState)
   389  	})
   390  }
   391  
   392  func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
   393  	blockBatchLimit := flags.Get().BlockBatchLimit
   394  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   395  
   396  	ctx, cancel := context.WithCancel(context.Background())
   397  	defer cancel()
   398  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   399  		chain: mc,
   400  		p2p:   p2p,
   401  	})
   402  
   403  	t.Run("expired context", func(t *testing.T) {
   404  		ctx, cancel := context.WithCancel(ctx)
   405  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   406  			blocksFetcher:       fetcher,
   407  			chain:               mc,
   408  			highestExpectedSlot: types.Slot(blockBatchLimit),
   409  		})
   410  		handlerFn := queue.onDataReceivedEvent(ctx)
   411  		cancel()
   412  		updatedState, err := handlerFn(&stateMachine{
   413  			state: stateScheduled,
   414  		}, nil)
   415  		assert.ErrorContains(t, context.Canceled.Error(), err)
   416  		assert.Equal(t, stateScheduled, updatedState)
   417  	})
   418  
   419  	t.Run("invalid input state", func(t *testing.T) {
   420  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   421  			blocksFetcher:       fetcher,
   422  			chain:               mc,
   423  			highestExpectedSlot: types.Slot(blockBatchLimit),
   424  		})
   425  
   426  		invalidStates := []stateID{stateNew, stateDataParsed, stateSkipped, stateSent}
   427  		for _, state := range invalidStates {
   428  			t.Run(state.String(), func(t *testing.T) {
   429  				handlerFn := queue.onDataReceivedEvent(ctx)
   430  				updatedState, err := handlerFn(&stateMachine{
   431  					state: state,
   432  				}, nil)
   433  				assert.ErrorContains(t, errInvalidInitialState.Error(), err)
   434  				assert.Equal(t, state, updatedState)
   435  			})
   436  		}
   437  	})
   438  
   439  	t.Run("invalid input param", func(t *testing.T) {
   440  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   441  			blocksFetcher:       fetcher,
   442  			chain:               mc,
   443  			highestExpectedSlot: types.Slot(blockBatchLimit),
   444  		})
   445  
   446  		handlerFn := queue.onDataReceivedEvent(ctx)
   447  		updatedState, err := handlerFn(&stateMachine{
   448  			state: stateScheduled,
   449  		}, nil)
   450  		assert.ErrorContains(t, errInputNotFetchRequestParams.Error(), err)
   451  		assert.Equal(t, stateScheduled, updatedState)
   452  	})
   453  
   454  	t.Run("slot is too high do nothing", func(t *testing.T) {
   455  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   456  			blocksFetcher:       fetcher,
   457  			chain:               mc,
   458  			highestExpectedSlot: types.Slot(blockBatchLimit),
   459  		})
   460  
   461  		handlerFn := queue.onDataReceivedEvent(ctx)
   462  		updatedState, err := handlerFn(&stateMachine{
   463  			state: stateScheduled,
   464  		}, &fetchRequestResponse{
   465  			pid: "abc",
   466  			err: errSlotIsTooHigh,
   467  		})
   468  		assert.ErrorContains(t, errSlotIsTooHigh.Error(), err)
   469  		assert.Equal(t, stateScheduled, updatedState)
   470  	})
   471  
   472  	t.Run("slot is too high force re-request on previous epoch", func(t *testing.T) {
   473  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   474  			blocksFetcher:       fetcher,
   475  			chain:               mc,
   476  			highestExpectedSlot: types.Slot(blockBatchLimit),
   477  		})
   478  
   479  		// Mark previous machine as skipped - to test effect of re-requesting.
   480  		queue.smm.addStateMachine(250)
   481  		queue.smm.machines[250].state = stateSkipped
   482  		assert.Equal(t, stateSkipped, queue.smm.machines[250].state)
   483  
   484  		handlerFn := queue.onDataReceivedEvent(ctx)
   485  		updatedState, err := handlerFn(&stateMachine{
   486  			state: stateScheduled,
   487  		}, &fetchRequestResponse{
   488  			pid:   "abc",
   489  			err:   errSlotIsTooHigh,
   490  			start: 256,
   491  		})
   492  		assert.ErrorContains(t, errSlotIsTooHigh.Error(), err)
   493  		assert.Equal(t, stateScheduled, updatedState)
   494  		assert.Equal(t, stateNew, queue.smm.machines[250].state)
   495  	})
   496  
   497  	t.Run("invalid data returned", func(t *testing.T) {
   498  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   499  			blocksFetcher:       fetcher,
   500  			chain:               mc,
   501  			highestExpectedSlot: types.Slot(blockBatchLimit),
   502  		})
   503  
   504  		hook := logTest.NewGlobal()
   505  		defer hook.Reset()
   506  		handlerFn := queue.onDataReceivedEvent(ctx)
   507  		updatedState, err := handlerFn(&stateMachine{
   508  			state: stateScheduled,
   509  		}, &fetchRequestResponse{
   510  			pid: "abc",
   511  			err: beaconsync.ErrInvalidFetchedData,
   512  		})
   513  		assert.ErrorContains(t, beaconsync.ErrInvalidFetchedData.Error(), err)
   514  		assert.Equal(t, stateScheduled, updatedState)
   515  		assert.LogsContain(t, hook, "msg=\"Peer is penalized for invalid blocks\" pid=ZiCa")
   516  	})
   517  
   518  	t.Run("transition ok", func(t *testing.T) {
   519  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   520  			blocksFetcher:       fetcher,
   521  			chain:               mc,
   522  			highestExpectedSlot: types.Slot(blockBatchLimit),
   523  		})
   524  
   525  		handlerFn := queue.onDataReceivedEvent(ctx)
   526  		response := &fetchRequestResponse{
   527  			pid: "abc",
   528  			blocks: []interfaces.SignedBeaconBlock{
   529  				wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
   530  				wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
   531  			},
   532  		}
   533  		fsm := &stateMachine{
   534  			state: stateScheduled,
   535  		}
   536  		assert.Equal(t, peer.ID(""), fsm.pid)
   537  		assert.DeepSSZEqual(t, []interfaces.SignedBeaconBlock(nil), fsm.blocks)
   538  		updatedState, err := handlerFn(fsm, response)
   539  		assert.NoError(t, err)
   540  		assert.Equal(t, stateDataParsed, updatedState)
   541  		assert.Equal(t, response.pid, fsm.pid)
   542  		assert.DeepSSZEqual(t, response.blocks, fsm.blocks)
   543  	})
   544  }
   545  
   546  func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
   547  	blockBatchLimit := flags.Get().BlockBatchLimit
   548  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   549  
   550  	ctx, cancel := context.WithCancel(context.Background())
   551  	defer cancel()
   552  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   553  		chain: mc,
   554  		p2p:   p2p,
   555  	})
   556  
   557  	t.Run("expired context", func(t *testing.T) {
   558  		ctx, cancel := context.WithCancel(ctx)
   559  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   560  			blocksFetcher:       fetcher,
   561  			chain:               mc,
   562  			highestExpectedSlot: types.Slot(blockBatchLimit),
   563  		})
   564  		handlerFn := queue.onReadyToSendEvent(ctx)
   565  		cancel()
   566  		updatedState, err := handlerFn(&stateMachine{
   567  			state: stateNew,
   568  		}, nil)
   569  		assert.ErrorContains(t, context.Canceled.Error(), err)
   570  		assert.Equal(t, stateNew, updatedState)
   571  	})
   572  
   573  	t.Run("invalid input state", func(t *testing.T) {
   574  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   575  			blocksFetcher:       fetcher,
   576  			chain:               mc,
   577  			highestExpectedSlot: types.Slot(blockBatchLimit),
   578  		})
   579  
   580  		invalidStates := []stateID{stateNew, stateScheduled, stateSkipped, stateSent}
   581  		for _, state := range invalidStates {
   582  			t.Run(state.String(), func(t *testing.T) {
   583  				handlerFn := queue.onReadyToSendEvent(ctx)
   584  				updatedState, err := handlerFn(&stateMachine{
   585  					state: state,
   586  				}, nil)
   587  				assert.ErrorContains(t, errInvalidInitialState.Error(), err)
   588  				assert.Equal(t, state, updatedState)
   589  			})
   590  		}
   591  	})
   592  
   593  	t.Run("no blocks to send", func(t *testing.T) {
   594  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   595  			blocksFetcher:       fetcher,
   596  			chain:               mc,
   597  			highestExpectedSlot: types.Slot(blockBatchLimit),
   598  		})
   599  
   600  		handlerFn := queue.onReadyToSendEvent(ctx)
   601  		updatedState, err := handlerFn(&stateMachine{
   602  			state: stateDataParsed,
   603  		}, nil)
   604  		// No error, but state is marked as skipped - as no blocks were produced for range.
   605  		assert.NoError(t, err)
   606  		assert.Equal(t, stateSkipped, updatedState)
   607  	})
   608  
   609  	const pidDataParsed = "abc"
   610  	t.Run("send from the first machine", func(t *testing.T) {
   611  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   612  			chain: mc,
   613  			p2p:   p2p,
   614  		})
   615  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   616  			blocksFetcher:       fetcher,
   617  			chain:               mc,
   618  			highestExpectedSlot: types.Slot(blockBatchLimit),
   619  		})
   620  		queue.smm.addStateMachine(256)
   621  		queue.smm.addStateMachine(320)
   622  		queue.smm.machines[256].state = stateDataParsed
   623  		queue.smm.machines[256].pid = pidDataParsed
   624  		queue.smm.machines[256].blocks = []interfaces.SignedBeaconBlock{
   625  			wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
   626  		}
   627  
   628  		handlerFn := queue.onReadyToSendEvent(ctx)
   629  		updatedState, err := handlerFn(queue.smm.machines[256], nil)
   630  		// Machine is the first, has blocks, send them.
   631  		assert.NoError(t, err)
   632  		assert.Equal(t, stateSent, updatedState)
   633  	})
   634  
   635  	t.Run("previous machines are not processed - do not send", func(t *testing.T) {
   636  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   637  			chain: mc,
   638  			p2p:   p2p,
   639  		})
   640  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   641  			blocksFetcher:       fetcher,
   642  			chain:               mc,
   643  			highestExpectedSlot: types.Slot(blockBatchLimit),
   644  		})
   645  		queue.smm.addStateMachine(128)
   646  		queue.smm.machines[128].state = stateNew
   647  		queue.smm.addStateMachine(192)
   648  		queue.smm.machines[192].state = stateScheduled
   649  		queue.smm.addStateMachine(256)
   650  		queue.smm.machines[256].state = stateDataParsed
   651  		queue.smm.addStateMachine(320)
   652  		queue.smm.machines[320].state = stateDataParsed
   653  		queue.smm.machines[320].pid = pidDataParsed
   654  		queue.smm.machines[320].blocks = []interfaces.SignedBeaconBlock{
   655  			wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
   656  		}
   657  
   658  		handlerFn := queue.onReadyToSendEvent(ctx)
   659  		updatedState, err := handlerFn(queue.smm.machines[320], nil)
   660  		// Previous machines have stateNew, stateScheduled, stateDataParsed states, so current
   661  		// machine should wait before sending anything. So, no state change.
   662  		assert.NoError(t, err)
   663  		assert.Equal(t, stateDataParsed, updatedState)
   664  	})
   665  
   666  	t.Run("previous machines are processed - send", func(t *testing.T) {
   667  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   668  			chain: mc,
   669  			p2p:   p2p,
   670  		})
   671  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   672  			blocksFetcher:       fetcher,
   673  			chain:               mc,
   674  			highestExpectedSlot: types.Slot(blockBatchLimit),
   675  		})
   676  		queue.smm.addStateMachine(256)
   677  		queue.smm.machines[256].state = stateSkipped
   678  		queue.smm.addStateMachine(320)
   679  		queue.smm.machines[320].state = stateDataParsed
   680  		queue.smm.machines[320].pid = pidDataParsed
   681  		queue.smm.machines[320].blocks = []interfaces.SignedBeaconBlock{
   682  			wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
   683  		}
   684  
   685  		handlerFn := queue.onReadyToSendEvent(ctx)
   686  		updatedState, err := handlerFn(queue.smm.machines[320], nil)
   687  		assert.NoError(t, err)
   688  		assert.Equal(t, stateSent, updatedState)
   689  	})
   690  }
   691  
   692  func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
   693  	blockBatchLimit := flags.Get().BlockBatchLimit
   694  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   695  
   696  	ctx, cancel := context.WithCancel(context.Background())
   697  	defer cancel()
   698  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   699  		chain: mc,
   700  		p2p:   p2p,
   701  	})
   702  
   703  	t.Run("expired context", func(t *testing.T) {
   704  		ctx, cancel := context.WithCancel(ctx)
   705  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   706  			blocksFetcher:       fetcher,
   707  			chain:               mc,
   708  			highestExpectedSlot: types.Slot(blockBatchLimit),
   709  		})
   710  		handlerFn := queue.onProcessSkippedEvent(ctx)
   711  		cancel()
   712  		updatedState, err := handlerFn(&stateMachine{
   713  			state: stateSkipped,
   714  		}, nil)
   715  		assert.ErrorContains(t, context.Canceled.Error(), err)
   716  		assert.Equal(t, stateSkipped, updatedState)
   717  	})
   718  
   719  	t.Run("invalid input state", func(t *testing.T) {
   720  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   721  			blocksFetcher:       fetcher,
   722  			chain:               mc,
   723  			highestExpectedSlot: types.Slot(blockBatchLimit),
   724  		})
   725  
   726  		invalidStates := []stateID{stateNew, stateScheduled, stateDataParsed, stateSent}
   727  		for _, state := range invalidStates {
   728  			t.Run(state.String(), func(t *testing.T) {
   729  				handlerFn := queue.onProcessSkippedEvent(ctx)
   730  				updatedState, err := handlerFn(&stateMachine{
   731  					state: state,
   732  				}, nil)
   733  				assert.ErrorContains(t, errInvalidInitialState.Error(), err)
   734  				assert.Equal(t, state, updatedState)
   735  			})
   736  		}
   737  	})
   738  
   739  	t.Run("not the last machine - do nothing", func(t *testing.T) {
   740  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   741  			blocksFetcher:       fetcher,
   742  			chain:               mc,
   743  			highestExpectedSlot: types.Slot(blockBatchLimit),
   744  		})
   745  
   746  		queue.smm.addStateMachine(256)
   747  		// Machine is not skipped for too long. Do not mark as new just yet.
   748  		queue.smm.machines[256].updated = timeutils.Now().Add(-1 * (skippedMachineTimeout / 2))
   749  		queue.smm.machines[256].state = stateSkipped
   750  		queue.smm.addStateMachine(320)
   751  		queue.smm.machines[320].state = stateScheduled
   752  		handlerFn := queue.onProcessSkippedEvent(ctx)
   753  		updatedState, err := handlerFn(queue.smm.machines[256], nil)
   754  		assert.NoError(t, err)
   755  		assert.Equal(t, stateSkipped, updatedState)
   756  	})
   757  
   758  	t.Run("not the last machine - reset", func(t *testing.T) {
   759  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   760  			blocksFetcher:       fetcher,
   761  			chain:               mc,
   762  			highestExpectedSlot: types.Slot(blockBatchLimit),
   763  		})
   764  
   765  		queue.smm.addStateMachine(256)
   766  		// Machine is skipped for too long. Reset.
   767  		queue.smm.machines[256].updated = timeutils.Now().Add(-1 * skippedMachineTimeout)
   768  		queue.smm.machines[256].state = stateSkipped
   769  		queue.smm.addStateMachine(320)
   770  		queue.smm.machines[320].state = stateScheduled
   771  		handlerFn := queue.onProcessSkippedEvent(ctx)
   772  		updatedState, err := handlerFn(queue.smm.machines[256], nil)
   773  		assert.NoError(t, err)
   774  		assert.Equal(t, stateNew, updatedState)
   775  	})
   776  
   777  	t.Run("not all machines are skipped", func(t *testing.T) {
   778  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   779  			blocksFetcher:       fetcher,
   780  			chain:               mc,
   781  			highestExpectedSlot: types.Slot(blockBatchLimit),
   782  		})
   783  
   784  		queue.smm.addStateMachine(192)
   785  		queue.smm.machines[192].state = stateSkipped
   786  		queue.smm.addStateMachine(256)
   787  		queue.smm.machines[256].state = stateScheduled
   788  		queue.smm.addStateMachine(320)
   789  		queue.smm.machines[320].state = stateSkipped
   790  		handlerFn := queue.onProcessSkippedEvent(ctx)
   791  		updatedState, err := handlerFn(queue.smm.machines[320], nil)
   792  		assert.NoError(t, err)
   793  		assert.Equal(t, stateSkipped, updatedState)
   794  	})
   795  
   796  	t.Run("not enough peers", func(t *testing.T) {
   797  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   798  			blocksFetcher:       fetcher,
   799  			chain:               mc,
   800  			highestExpectedSlot: types.Slot(blockBatchLimit),
   801  		})
   802  
   803  		queue.smm.addStateMachine(192)
   804  		queue.smm.machines[192].state = stateSkipped
   805  		queue.smm.addStateMachine(256)
   806  		queue.smm.machines[256].state = stateSkipped
   807  		queue.smm.addStateMachine(320)
   808  		queue.smm.machines[320].state = stateSkipped
   809  		// Mode 1: Stop on finalized epoch.
   810  		handlerFn := queue.onProcessSkippedEvent(ctx)
   811  		updatedState, err := handlerFn(queue.smm.machines[320], nil)
   812  		assert.ErrorContains(t, errNoRequiredPeers.Error(), err)
   813  		assert.Equal(t, stateSkipped, updatedState)
   814  		// Mode 2: Do not on finalized epoch.
   815  		queue.mode = modeNonConstrained
   816  		handlerFn = queue.onProcessSkippedEvent(ctx)
   817  		updatedState, err = handlerFn(queue.smm.machines[320], nil)
   818  		assert.ErrorContains(t, errNoRequiredPeers.Error(), err)
   819  		assert.Equal(t, stateSkipped, updatedState)
   820  	})
   821  
   822  	t.Run("ready to update machines - non-skipped slot not found", func(t *testing.T) {
   823  		p := p2pt.NewTestP2P(t)
   824  		connectPeers(t, p, []*peerData{
   825  			{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 128},
   826  		}, p.Peers())
   827  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   828  			chain: mc,
   829  			p2p:   p,
   830  		})
   831  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   832  			blocksFetcher:       fetcher,
   833  			chain:               mc,
   834  			highestExpectedSlot: types.Slot(blockBatchLimit),
   835  		})
   836  
   837  		startSlot := queue.chain.HeadSlot()
   838  		blocksPerRequest := queue.blocksFetcher.blocksPerSecond
   839  		for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
   840  			queue.smm.addStateMachine(i).setState(stateSkipped)
   841  		}
   842  
   843  		handlerFn := queue.onProcessSkippedEvent(ctx)
   844  		updatedState, err := handlerFn(queue.smm.machines[types.Slot(blocksPerRequest*(lookaheadSteps-1))], nil)
   845  		assert.ErrorContains(t, "invalid range for non-skipped slot", err)
   846  		assert.Equal(t, stateSkipped, updatedState)
   847  	})
   848  
   849  	t.Run("ready to update machines - constrained mode", func(t *testing.T) {
   850  		p := p2pt.NewTestP2P(t)
   851  		connectPeers(t, p, []*peerData{
   852  			{blocks: makeSequence(500, 628), finalizedEpoch: 16, headSlot: 600},
   853  		}, p.Peers())
   854  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   855  			chain: mc,
   856  			p2p:   p,
   857  		})
   858  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   859  			blocksFetcher:       fetcher,
   860  			chain:               mc,
   861  			highestExpectedSlot: types.Slot(blockBatchLimit),
   862  		})
   863  		assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
   864  
   865  		startSlot := queue.chain.HeadSlot()
   866  		blocksPerRequest := queue.blocksFetcher.blocksPerSecond
   867  		var machineSlots []types.Slot
   868  		for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
   869  			queue.smm.addStateMachine(i).setState(stateSkipped)
   870  			machineSlots = append(machineSlots, i)
   871  		}
   872  		for _, slot := range machineSlots {
   873  			_, ok := queue.smm.findStateMachine(slot)
   874  			assert.Equal(t, true, ok)
   875  		}
   876  		// Update head slot, so that machines are re-arranged starting from the next slot i.e.
   877  		// there's no point to reset machines for some slot that has already been processed.
   878  		updatedSlot := types.Slot(100)
   879  		defer func() {
   880  			require.NoError(t, mc.State.SetSlot(0))
   881  		}()
   882  		require.NoError(t, mc.State.SetSlot(updatedSlot))
   883  
   884  		handlerFn := queue.onProcessSkippedEvent(ctx)
   885  		updatedState, err := handlerFn(queue.smm.machines[types.Slot(blocksPerRequest*(lookaheadSteps-1))], nil)
   886  		assert.NoError(t, err)
   887  		assert.Equal(t, stateSkipped, updatedState)
   888  		// Assert that machines have been re-arranged.
   889  		for i, slot := range machineSlots {
   890  			_, ok := queue.smm.findStateMachine(slot)
   891  			assert.Equal(t, false, ok)
   892  			_, ok = queue.smm.findStateMachine(updatedSlot.Add(1 + uint64(i)*blocksPerRequest))
   893  			assert.Equal(t, true, ok)
   894  		}
   895  		// Assert highest expected slot is extended.
   896  		assert.Equal(t, types.Slot(blocksPerRequest*lookaheadSteps), queue.highestExpectedSlot)
   897  	})
   898  
   899  	t.Run("ready to update machines - unconstrained mode", func(t *testing.T) {
   900  		p := p2pt.NewTestP2P(t)
   901  		connectPeers(t, p, []*peerData{
   902  			{blocks: makeSequence(500, 628), finalizedEpoch: 16, headSlot: 600},
   903  		}, p.Peers())
   904  		fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   905  			chain: mc,
   906  			p2p:   p,
   907  		})
   908  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   909  			blocksFetcher:       fetcher,
   910  			chain:               mc,
   911  			highestExpectedSlot: types.Slot(blockBatchLimit),
   912  		})
   913  		queue.mode = modeNonConstrained
   914  		assert.Equal(t, types.Slot(blockBatchLimit), queue.highestExpectedSlot)
   915  
   916  		startSlot := queue.chain.HeadSlot()
   917  		blocksPerRequest := queue.blocksFetcher.blocksPerSecond
   918  		var machineSlots []types.Slot
   919  		for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
   920  			queue.smm.addStateMachine(i).setState(stateSkipped)
   921  			machineSlots = append(machineSlots, i)
   922  		}
   923  		for _, slot := range machineSlots {
   924  			_, ok := queue.smm.findStateMachine(slot)
   925  			assert.Equal(t, true, ok)
   926  		}
   927  		// Update head slot, so that machines are re-arranged starting from the next slot i.e.
   928  		// there's no point to reset machines for some slot that has already been processed.
   929  		updatedSlot := types.Slot(100)
   930  		require.NoError(t, mc.State.SetSlot(updatedSlot))
   931  
   932  		handlerFn := queue.onProcessSkippedEvent(ctx)
   933  		updatedState, err := handlerFn(queue.smm.machines[types.Slot(blocksPerRequest*(lookaheadSteps-1))], nil)
   934  		assert.NoError(t, err)
   935  		assert.Equal(t, stateSkipped, updatedState)
   936  		// Assert that machines have been re-arranged.
   937  		for i, slot := range machineSlots {
   938  			_, ok := queue.smm.findStateMachine(slot)
   939  			assert.Equal(t, false, ok)
   940  			_, ok = queue.smm.findStateMachine(updatedSlot.Add(1 + uint64(i)*blocksPerRequest))
   941  			assert.Equal(t, true, ok)
   942  		}
   943  		// Assert highest expected slot is extended.
   944  		assert.Equal(t, types.Slot(blocksPerRequest*(lookaheadSteps+1)), queue.highestExpectedSlot)
   945  	})
   946  }
   947  
   948  func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
   949  	blockBatchLimit := flags.Get().BlockBatchLimit
   950  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, []*peerData{})
   951  
   952  	ctx, cancel := context.WithCancel(context.Background())
   953  	defer cancel()
   954  	fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
   955  		chain: mc,
   956  		p2p:   p2p,
   957  	})
   958  
   959  	t.Run("expired context", func(t *testing.T) {
   960  		ctx, cancel := context.WithCancel(ctx)
   961  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   962  			blocksFetcher:       fetcher,
   963  			chain:               mc,
   964  			highestExpectedSlot: types.Slot(blockBatchLimit),
   965  		})
   966  		handlerFn := queue.onCheckStaleEvent(ctx)
   967  		cancel()
   968  		updatedState, err := handlerFn(&stateMachine{
   969  			state: stateSkipped,
   970  		}, nil)
   971  		assert.ErrorContains(t, context.Canceled.Error(), err)
   972  		assert.Equal(t, stateSkipped, updatedState)
   973  	})
   974  
   975  	t.Run("invalid input state", func(t *testing.T) {
   976  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   977  			blocksFetcher:       fetcher,
   978  			chain:               mc,
   979  			highestExpectedSlot: types.Slot(blockBatchLimit),
   980  		})
   981  
   982  		invalidStates := []stateID{stateNew, stateScheduled, stateDataParsed, stateSkipped}
   983  		for _, state := range invalidStates {
   984  			t.Run(state.String(), func(t *testing.T) {
   985  				handlerFn := queue.onCheckStaleEvent(ctx)
   986  				updatedState, err := handlerFn(&stateMachine{
   987  					state: state,
   988  				}, nil)
   989  				assert.ErrorContains(t, errInvalidInitialState.Error(), err)
   990  				assert.Equal(t, state, updatedState)
   991  			})
   992  		}
   993  	})
   994  
   995  	t.Run("process non stale machine", func(t *testing.T) {
   996  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
   997  			blocksFetcher:       fetcher,
   998  			chain:               mc,
   999  			highestExpectedSlot: types.Slot(blockBatchLimit),
  1000  		})
  1001  		handlerFn := queue.onCheckStaleEvent(ctx)
  1002  		updatedState, err := handlerFn(&stateMachine{
  1003  			state:   stateSent,
  1004  			updated: timeutils.Now().Add(-staleEpochTimeout / 2),
  1005  		}, nil)
  1006  		// State should not change, as machine is not yet stale.
  1007  		assert.NoError(t, err)
  1008  		assert.Equal(t, stateSent, updatedState)
  1009  	})
  1010  
  1011  	t.Run("process stale machine", func(t *testing.T) {
  1012  		queue := newBlocksQueue(ctx, &blocksQueueConfig{
  1013  			blocksFetcher:       fetcher,
  1014  			chain:               mc,
  1015  			highestExpectedSlot: types.Slot(blockBatchLimit),
  1016  		})
  1017  		handlerFn := queue.onCheckStaleEvent(ctx)
  1018  		updatedState, err := handlerFn(&stateMachine{
  1019  			state:   stateSent,
  1020  			updated: timeutils.Now().Add(-staleEpochTimeout),
  1021  		}, nil)
  1022  		// State should change, as machine is stale.
  1023  		assert.NoError(t, err)
  1024  		assert.Equal(t, stateSkipped, updatedState)
  1025  	})
  1026  }
  1027  
  1028  func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
  1029  	beaconDB := dbtest.SetupDB(t)
  1030  	p2p := p2pt.NewTestP2P(t)
  1031  
  1032  	// The chain1 contains 250 blocks and is a dead end.
  1033  	// The chain2 contains 296 blocks, with fork started at slot 128 of chain1.
  1034  	chain1 := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 250)
  1035  	forkedSlot := types.Slot(201)
  1036  	chain2 := extendBlockSequence(t, chain1[:forkedSlot], 100)
  1037  	finalizedSlot := types.Slot(63)
  1038  	finalizedEpoch := helpers.SlotToEpoch(finalizedSlot)
  1039  
  1040  	genesisBlock := chain1[0]
  1041  	require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
  1042  	genesisRoot, err := genesisBlock.Block.HashTreeRoot()
  1043  	require.NoError(t, err)
  1044  
  1045  	st, err := testutil.NewBeaconState()
  1046  	require.NoError(t, err)
  1047  	mc := &mock.ChainService{
  1048  		State: st,
  1049  		Root:  genesisRoot[:],
  1050  		DB:    beaconDB,
  1051  		FinalizedCheckPoint: &eth.Checkpoint{
  1052  			Epoch: finalizedEpoch,
  1053  			Root:  []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
  1054  		},
  1055  	}
  1056  
  1057  	ctx, cancel := context.WithCancel(context.Background())
  1058  	defer cancel()
  1059  	fetcher := newBlocksFetcher(
  1060  		ctx,
  1061  		&blocksFetcherConfig{
  1062  			chain: mc,
  1063  			p2p:   p2p,
  1064  			db:    beaconDB,
  1065  		},
  1066  	)
  1067  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
  1068  
  1069  	queue := newBlocksQueue(ctx, &blocksQueueConfig{
  1070  		blocksFetcher:       fetcher,
  1071  		chain:               mc,
  1072  		highestExpectedSlot: types.Slot(len(chain2) - 1),
  1073  		mode:                modeNonConstrained,
  1074  	})
  1075  
  1076  	// Populate database with blocks from unfavourable fork i.e. branch that leads to dead end.
  1077  	for _, blk := range chain1[1:] {
  1078  		parentRoot := bytesutil.ToBytes32(blk.Block.ParentRoot)
  1079  		// Save block only if parent root is already in database or cache.
  1080  		if beaconDB.HasBlock(ctx, parentRoot) || mc.HasInitSyncBlock(parentRoot) {
  1081  			require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
  1082  			require.NoError(t, st.SetSlot(blk.Block.Slot))
  1083  		}
  1084  	}
  1085  	require.Equal(t, types.Slot(len(chain1)-1), mc.HeadSlot())
  1086  	hook := logTest.NewGlobal()
  1087  
  1088  	t.Run("unfavourable fork and no alternative branches", func(t *testing.T) {
  1089  		defer hook.Reset()
  1090  		// Reset all machines.
  1091  		require.NoError(t, queue.smm.removeAllStateMachines())
  1092  
  1093  		// Add peer that will advertise high non-finalized slot, but will not be able to support
  1094  		// its claims with actual blocks.
  1095  		emptyPeer := connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())
  1096  		defer func() {
  1097  			p2p.Peers().SetConnectionState(emptyPeer, peers.PeerDisconnected)
  1098  		}()
  1099  		chainState, err := p2p.Peers().ChainState(emptyPeer)
  1100  		require.NoError(t, err)
  1101  		chainState.HeadSlot = 500
  1102  		p2p.Peers().SetChainState(emptyPeer, chainState)
  1103  
  1104  		startSlot := mc.HeadSlot() + 1
  1105  		blocksPerRequest := queue.blocksFetcher.blocksPerSecond
  1106  		machineSlots := make([]types.Slot, 0)
  1107  		for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
  1108  			queue.smm.addStateMachine(i).setState(stateSkipped)
  1109  			machineSlots = append(machineSlots, i)
  1110  		}
  1111  		for _, slot := range machineSlots {
  1112  			_, ok := queue.smm.findStateMachine(slot)
  1113  			assert.Equal(t, true, ok)
  1114  		}
  1115  		// Since counter for stale epochs hasn't exceeded threshold, backtracking is not triggered.
  1116  		handlerFn := queue.onProcessSkippedEvent(ctx)
  1117  		assert.Equal(t, lookaheadSteps, len(queue.smm.machines))
  1118  		updatedState, err := handlerFn(queue.smm.machines[machineSlots[len(machineSlots)-1]], nil)
  1119  		assert.ErrorContains(t, "invalid range for non-skipped slot", err)
  1120  		assert.Equal(t, stateSkipped, updatedState)
  1121  		assert.Equal(t, lookaheadSteps-1, len(queue.smm.machines))
  1122  		assert.LogsDoNotContain(t, hook, "Searching for alternative blocks")
  1123  		assert.LogsDoNotContain(t, hook, "No alternative blocks found for peer")
  1124  		hook.Reset()
  1125  
  1126  		// The last machine got removed (it was for non-skipped slot, which fails).
  1127  		queue.smm.addStateMachine(machineSlots[len(machineSlots)-1])
  1128  		assert.Equal(t, lookaheadSteps, len(queue.smm.machines))
  1129  		for _, slot := range machineSlots {
  1130  			fsm, ok := queue.smm.findStateMachine(slot)
  1131  			require.Equal(t, true, ok)
  1132  			fsm.setState(stateSkipped)
  1133  		}
  1134  
  1135  		// Update counter, and trigger backtracking.
  1136  		queue.staleEpochs[helpers.SlotToEpoch(machineSlots[0])] = maxResetAttempts
  1137  		handlerFn = queue.onProcessSkippedEvent(ctx)
  1138  		updatedState, err = handlerFn(queue.smm.machines[machineSlots[len(machineSlots)-1]], nil)
  1139  		assert.ErrorContains(t, "invalid range for non-skipped slot", err)
  1140  		assert.Equal(t, stateSkipped, updatedState)
  1141  		assert.Equal(t, lookaheadSteps-1, len(queue.smm.machines))
  1142  		assert.LogsContain(t, hook, "Searching for alternative blocks")
  1143  		assert.LogsContain(t, hook, "No alternative blocks found for peer")
  1144  	})
  1145  
  1146  	t.Run("unfavourable fork and alternative branches exist", func(t *testing.T) {
  1147  		defer hook.Reset()
  1148  		// Reset all machines.
  1149  		require.NoError(t, queue.smm.removeAllStateMachines())
  1150  
  1151  		// Add peer that will advertise high non-finalized slot, but will not be able to support
  1152  		// its claims with actual blocks.
  1153  		forkedPeer := connectPeerHavingBlocks(t, p2p, chain2, finalizedSlot, p2p.Peers())
  1154  		startSlot := mc.HeadSlot() + 1
  1155  		blocksPerRequest := queue.blocksFetcher.blocksPerSecond
  1156  		machineSlots := make([]types.Slot, 0)
  1157  		for i := startSlot; i < startSlot.Add(blocksPerRequest*lookaheadSteps); i += types.Slot(blocksPerRequest) {
  1158  			queue.smm.addStateMachine(i).setState(stateSkipped)
  1159  			machineSlots = append(machineSlots, i)
  1160  		}
  1161  		for _, slot := range machineSlots {
  1162  			_, ok := queue.smm.findStateMachine(slot)
  1163  			assert.Equal(t, true, ok)
  1164  		}
  1165  		// Since counter for stale epochs hasn't exceeded threshold, backtracking is not triggered.
  1166  		handlerFn := queue.onProcessSkippedEvent(ctx)
  1167  		assert.Equal(t, lookaheadSteps, len(queue.smm.machines))
  1168  		updatedState, err := handlerFn(queue.smm.machines[machineSlots[len(machineSlots)-1]], nil)
  1169  		assert.ErrorContains(t, "invalid range for non-skipped slot", err)
  1170  		assert.Equal(t, stateSkipped, updatedState)
  1171  		assert.Equal(t, lookaheadSteps-1, len(queue.smm.machines))
  1172  		assert.LogsDoNotContain(t, hook, "Searching for alternative blocks")
  1173  		assert.LogsDoNotContain(t, hook, "No alternative blocks found for peer")
  1174  		hook.Reset()
  1175  
  1176  		// The last machine got removed (it was for non-skipped slot, which fails).
  1177  		queue.smm.addStateMachine(machineSlots[len(machineSlots)-1])
  1178  		assert.Equal(t, lookaheadSteps, len(queue.smm.machines))
  1179  		for _, slot := range machineSlots {
  1180  			fsm, ok := queue.smm.findStateMachine(slot)
  1181  			require.Equal(t, true, ok)
  1182  			fsm.setState(stateSkipped)
  1183  		}
  1184  
  1185  		// Update counter, and trigger backtracking.
  1186  		queue.staleEpochs[helpers.SlotToEpoch(machineSlots[0])] = maxResetAttempts
  1187  		handlerFn = queue.onProcessSkippedEvent(ctx)
  1188  		updatedState, err = handlerFn(queue.smm.machines[machineSlots[len(machineSlots)-1]], nil)
  1189  		require.NoError(t, err)
  1190  		assert.Equal(t, stateSkipped, updatedState)
  1191  		assert.LogsContain(t, hook, "Searching for alternative blocks")
  1192  		assert.LogsDoNotContain(t, hook, "No alternative blocks found for peer")
  1193  		require.Equal(t, lookaheadSteps, len(queue.smm.machines))
  1194  
  1195  		// Alternative fork should start on slot 201, make sure that the first machine contains all
  1196  		// required forked data, including data on and after slot 201.
  1197  		forkedEpochStartSlot, err := helpers.StartSlot(helpers.SlotToEpoch(forkedSlot))
  1198  		require.NoError(t, err)
  1199  		firstFSM, ok := queue.smm.findStateMachine(forkedEpochStartSlot + 1)
  1200  		require.Equal(t, true, ok)
  1201  		require.Equal(t, stateDataParsed, firstFSM.state)
  1202  		require.Equal(t, forkedPeer, firstFSM.pid)
  1203  		require.Equal(t, 64, len(firstFSM.blocks))
  1204  		require.Equal(t, forkedEpochStartSlot+1, firstFSM.blocks[0].Block().Slot())
  1205  
  1206  		// Assert that forked data from chain2 is available (within 64 fetched blocks).
  1207  		for i, blk := range chain2[forkedEpochStartSlot+1:] {
  1208  			if i >= len(firstFSM.blocks) {
  1209  				break
  1210  			}
  1211  			rootFromFSM, err := firstFSM.blocks[i].Block().HashTreeRoot()
  1212  			require.NoError(t, err)
  1213  			blkRoot, err := blk.Block.HashTreeRoot()
  1214  			require.NoError(t, err)
  1215  			assert.Equal(t, blkRoot, rootFromFSM)
  1216  		}
  1217  
  1218  		// Assert that machines are in the expected state.
  1219  		startSlot = forkedEpochStartSlot.Add(1 + uint64(len(firstFSM.blocks)))
  1220  		for i := startSlot; i < startSlot.Add(blocksPerRequest*(lookaheadSteps-1)); i += types.Slot(blocksPerRequest) {
  1221  			fsm, ok := queue.smm.findStateMachine(i)
  1222  			require.Equal(t, true, ok)
  1223  			assert.Equal(t, stateSkipped, fsm.state)
  1224  		}
  1225  	})
  1226  }
  1227  
  1228  func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
  1229  	ctx, cancel := context.WithCancel(context.Background())
  1230  	defer cancel()
  1231  
  1232  	beaconDB := dbtest.SetupDB(t)
  1233  	p2p := p2pt.NewTestP2P(t)
  1234  
  1235  	chain := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 128)
  1236  	finalizedSlot := types.Slot(82)
  1237  	finalizedEpoch := helpers.SlotToEpoch(finalizedSlot)
  1238  
  1239  	genesisBlock := chain[0]
  1240  	require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
  1241  	genesisRoot, err := genesisBlock.Block.HashTreeRoot()
  1242  	require.NoError(t, err)
  1243  
  1244  	st, err := testutil.NewBeaconState()
  1245  	require.NoError(t, err)
  1246  	mc := &mock.ChainService{
  1247  		State: st,
  1248  		Root:  genesisRoot[:],
  1249  		DB:    beaconDB,
  1250  		FinalizedCheckPoint: &eth.Checkpoint{
  1251  			Epoch: finalizedEpoch,
  1252  			Root:  []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
  1253  		},
  1254  	}
  1255  
  1256  	// Populate database with blocks with part of the chain, orphaned block will be added on top.
  1257  	for _, blk := range chain[1:84] {
  1258  		parentRoot := bytesutil.ToBytes32(blk.Block.ParentRoot)
  1259  		// Save block only if parent root is already in database or cache.
  1260  		if beaconDB.HasBlock(ctx, parentRoot) || mc.HasInitSyncBlock(parentRoot) {
  1261  			require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
  1262  			require.NoError(t, st.SetSlot(blk.Block.Slot))
  1263  		}
  1264  	}
  1265  	require.Equal(t, types.Slot(83), mc.HeadSlot())
  1266  	require.Equal(t, chain[83].Block.Slot, mc.HeadSlot())
  1267  
  1268  	// Set head to slot 85, while we do not have block with slot 84 in DB, so block is orphaned.
  1269  	// Moreover, block with slot 85 is a forked block and should be replaced, with block from peer.
  1270  	orphanedBlock := testutil.NewBeaconBlock()
  1271  	orphanedBlock.Block.Slot = 85
  1272  	orphanedBlock.Block.StateRoot = testutil.Random32Bytes(t)
  1273  	require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(orphanedBlock)))
  1274  	require.NoError(t, st.SetSlot(orphanedBlock.Block.Slot))
  1275  	require.Equal(t, types.Slot(85), mc.HeadSlot())
  1276  
  1277  	fetcher := newBlocksFetcher(
  1278  		ctx,
  1279  		&blocksFetcherConfig{
  1280  			chain: mc,
  1281  			p2p:   p2p,
  1282  			db:    beaconDB,
  1283  		},
  1284  	)
  1285  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
  1286  
  1287  	// Connect peer that has all the blocks available.
  1288  	allBlocksPeer := connectPeerHavingBlocks(t, p2p, chain, finalizedSlot, p2p.Peers())
  1289  	defer func() {
  1290  		p2p.Peers().SetConnectionState(allBlocksPeer, peers.PeerDisconnected)
  1291  	}()
  1292  
  1293  	// Queue should be able to fetch whole chain (including slot which comes before the currently set head).
  1294  	queue := newBlocksQueue(ctx, &blocksQueueConfig{
  1295  		blocksFetcher:       fetcher,
  1296  		chain:               mc,
  1297  		highestExpectedSlot: types.Slot(len(chain) - 1),
  1298  		mode:                modeNonConstrained,
  1299  	})
  1300  
  1301  	require.NoError(t, queue.start())
  1302  	isProcessedBlock := func(ctx context.Context, blk interfaces.SignedBeaconBlock, blkRoot [32]byte) bool {
  1303  		finalizedSlot, err := helpers.StartSlot(mc.FinalizedCheckpt().Epoch)
  1304  		if err != nil {
  1305  			return false
  1306  		}
  1307  		if blk.Block().Slot() <= finalizedSlot || (beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot)) {
  1308  			return true
  1309  		}
  1310  		return false
  1311  	}
  1312  
  1313  	select {
  1314  	case <-time.After(3 * time.Second):
  1315  		t.Fatal("test takes too long to complete")
  1316  	case data := <-queue.fetchedData:
  1317  		for _, blk := range data.blocks {
  1318  			blkRoot, err := blk.Block().HashTreeRoot()
  1319  			require.NoError(t, err)
  1320  			if isProcessedBlock(ctx, blk, blkRoot) {
  1321  				log.Errorf("slot: %d , root %#x: %v", blk.Block().Slot(), blkRoot, errBlockAlreadyProcessed)
  1322  				continue
  1323  			}
  1324  
  1325  			parentRoot := bytesutil.ToBytes32(blk.Block().ParentRoot())
  1326  			if !beaconDB.HasBlock(ctx, parentRoot) && !mc.HasInitSyncBlock(parentRoot) {
  1327  				log.Errorf("%v: %#x", errParentDoesNotExist, blk.Block().ParentRoot())
  1328  				continue
  1329  			}
  1330  
  1331  			// Block is not already processed, and parent exists in database - process.
  1332  			require.NoError(t, beaconDB.SaveBlock(ctx, blk))
  1333  			require.NoError(t, st.SetSlot(blk.Block().Slot()))
  1334  		}
  1335  	}
  1336  	require.NoError(t, queue.stop())
  1337  
  1338  	// Check that all blocks available in chain are produced by queue.
  1339  	for _, blk := range chain[:orphanedBlock.Block.Slot+32] {
  1340  		blkRoot, err := blk.Block.HashTreeRoot()
  1341  		require.NoError(t, err)
  1342  		require.Equal(t, true, beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot), "slot %d", blk.Block.Slot)
  1343  	}
  1344  }