github.com/MetalBlockchain/metalgo@v1.11.9/snow/networking/handler/handler_test.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package handler
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"sync"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/prometheus/client_golang/prometheus"
    14  	"github.com/stretchr/testify/require"
    15  	"go.uber.org/mock/gomock"
    16  
    17  	"github.com/MetalBlockchain/metalgo/ids"
    18  	"github.com/MetalBlockchain/metalgo/message"
    19  	"github.com/MetalBlockchain/metalgo/network/p2p"
    20  	"github.com/MetalBlockchain/metalgo/snow"
    21  	"github.com/MetalBlockchain/metalgo/snow/engine/common"
    22  	"github.com/MetalBlockchain/metalgo/snow/networking/tracker"
    23  	"github.com/MetalBlockchain/metalgo/snow/snowtest"
    24  	"github.com/MetalBlockchain/metalgo/snow/validators"
    25  	"github.com/MetalBlockchain/metalgo/subnets"
    26  	"github.com/MetalBlockchain/metalgo/utils/logging"
    27  	"github.com/MetalBlockchain/metalgo/utils/math/meter"
    28  	"github.com/MetalBlockchain/metalgo/utils/resource"
    29  	"github.com/MetalBlockchain/metalgo/utils/set"
    30  	"github.com/MetalBlockchain/metalgo/version"
    31  
    32  	p2ppb "github.com/MetalBlockchain/metalgo/proto/pb/p2p"
    33  	commontracker "github.com/MetalBlockchain/metalgo/snow/engine/common/tracker"
    34  )
    35  
    36  const testThreadPoolSize = 2
    37  
    38  var errFatal = errors.New("error should cause handler to close")
    39  
    40  func TestHandlerDropsTimedOutMessages(t *testing.T) {
    41  	require := require.New(t)
    42  
    43  	called := make(chan struct{})
    44  
    45  	snowCtx := snowtest.Context(t, snowtest.CChainID)
    46  	ctx := snowtest.ConsensusContext(snowCtx)
    47  
    48  	vdrs := validators.NewManager()
    49  	vdr0 := ids.GenerateTestNodeID()
    50  	require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, 1))
    51  
    52  	resourceTracker, err := tracker.NewResourceTracker(
    53  		prometheus.NewRegistry(),
    54  		resource.NoUsage,
    55  		meter.ContinuousFactory{},
    56  		time.Second,
    57  	)
    58  	require.NoError(err)
    59  
    60  	peerTracker, err := p2p.NewPeerTracker(
    61  		logging.NoLog{},
    62  		"",
    63  		prometheus.NewRegistry(),
    64  		nil,
    65  		version.CurrentApp,
    66  	)
    67  	require.NoError(err)
    68  
    69  	handlerIntf, err := New(
    70  		ctx,
    71  		vdrs,
    72  		nil,
    73  		time.Second,
    74  		testThreadPoolSize,
    75  		resourceTracker,
    76  		validators.UnhandledSubnetConnector,
    77  		subnets.New(ctx.NodeID, subnets.Config{}),
    78  		commontracker.NewPeers(),
    79  		peerTracker,
    80  		prometheus.NewRegistry(),
    81  	)
    82  	require.NoError(err)
    83  	handler := handlerIntf.(*handler)
    84  
    85  	bootstrapper := &common.BootstrapperTest{
    86  		EngineTest: common.EngineTest{
    87  			T: t,
    88  		},
    89  	}
    90  	bootstrapper.Default(false)
    91  	bootstrapper.ContextF = func() *snow.ConsensusContext {
    92  		return ctx
    93  	}
    94  	bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error {
    95  		require.FailNow("GetAcceptedFrontier message should have timed out")
    96  		return nil
    97  	}
    98  	bootstrapper.GetAcceptedF = func(context.Context, ids.NodeID, uint32, set.Set[ids.ID]) error {
    99  		called <- struct{}{}
   100  		return nil
   101  	}
   102  	handler.SetEngineManager(&EngineManager{
   103  		Snowman: &Engine{
   104  			Bootstrapper: bootstrapper,
   105  		},
   106  	})
   107  	ctx.State.Set(snow.EngineState{
   108  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   109  		State: snow.Bootstrapping, // assumed bootstrap is ongoing
   110  	})
   111  
   112  	pastTime := time.Now()
   113  	handler.clock.Set(pastTime)
   114  
   115  	nodeID := ids.EmptyNodeID
   116  	reqID := uint32(1)
   117  	chainID := ids.Empty
   118  	msg := Message{
   119  		InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID),
   120  		EngineType:     p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   121  	}
   122  	handler.Push(context.Background(), msg)
   123  
   124  	currentTime := time.Now().Add(time.Second)
   125  	handler.clock.Set(currentTime)
   126  
   127  	reqID++
   128  	msg = Message{
   129  		InboundMessage: message.InboundGetAccepted(chainID, reqID, 1*time.Second, nil, nodeID),
   130  		EngineType:     p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   131  	}
   132  	handler.Push(context.Background(), msg)
   133  
   134  	bootstrapper.StartF = func(context.Context, uint32) error {
   135  		return nil
   136  	}
   137  
   138  	handler.Start(context.Background(), false)
   139  
   140  	ticker := time.NewTicker(time.Second)
   141  	defer ticker.Stop()
   142  	select {
   143  	case <-ticker.C:
   144  		require.FailNow("Calling engine function timed out")
   145  	case <-called:
   146  	}
   147  }
   148  
   149  func TestHandlerClosesOnError(t *testing.T) {
   150  	require := require.New(t)
   151  
   152  	closed := make(chan struct{}, 1)
   153  	snowCtx := snowtest.Context(t, snowtest.CChainID)
   154  	ctx := snowtest.ConsensusContext(snowCtx)
   155  
   156  	vdrs := validators.NewManager()
   157  	require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1))
   158  
   159  	resourceTracker, err := tracker.NewResourceTracker(
   160  		prometheus.NewRegistry(),
   161  		resource.NoUsage,
   162  		meter.ContinuousFactory{},
   163  		time.Second,
   164  	)
   165  	require.NoError(err)
   166  
   167  	peerTracker, err := p2p.NewPeerTracker(
   168  		logging.NoLog{},
   169  		"",
   170  		prometheus.NewRegistry(),
   171  		nil,
   172  		version.CurrentApp,
   173  	)
   174  	require.NoError(err)
   175  
   176  	handlerIntf, err := New(
   177  		ctx,
   178  		vdrs,
   179  		nil,
   180  		time.Second,
   181  		testThreadPoolSize,
   182  		resourceTracker,
   183  		validators.UnhandledSubnetConnector,
   184  		subnets.New(ctx.NodeID, subnets.Config{}),
   185  		commontracker.NewPeers(),
   186  		peerTracker,
   187  		prometheus.NewRegistry(),
   188  	)
   189  	require.NoError(err)
   190  	handler := handlerIntf.(*handler)
   191  
   192  	handler.clock.Set(time.Now())
   193  	handler.SetOnStopped(func() {
   194  		closed <- struct{}{}
   195  	})
   196  
   197  	bootstrapper := &common.BootstrapperTest{
   198  		EngineTest: common.EngineTest{
   199  			T: t,
   200  		},
   201  	}
   202  	bootstrapper.Default(false)
   203  	bootstrapper.ContextF = func() *snow.ConsensusContext {
   204  		return ctx
   205  	}
   206  	bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error {
   207  		return errFatal
   208  	}
   209  
   210  	engine := &common.EngineTest{T: t}
   211  	engine.Default(false)
   212  	engine.ContextF = func() *snow.ConsensusContext {
   213  		return ctx
   214  	}
   215  
   216  	handler.SetEngineManager(&EngineManager{
   217  		Snowman: &Engine{
   218  			Bootstrapper: bootstrapper,
   219  			Consensus:    engine,
   220  		},
   221  	})
   222  
   223  	// assume bootstrapping is ongoing so that InboundGetAcceptedFrontier
   224  	// should normally be handled
   225  	ctx.State.Set(snow.EngineState{
   226  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   227  		State: snow.Bootstrapping,
   228  	})
   229  
   230  	bootstrapper.StartF = func(context.Context, uint32) error {
   231  		return nil
   232  	}
   233  
   234  	handler.Start(context.Background(), false)
   235  
   236  	nodeID := ids.EmptyNodeID
   237  	reqID := uint32(1)
   238  	deadline := time.Nanosecond
   239  	msg := Message{
   240  		InboundMessage: message.InboundGetAcceptedFrontier(ids.Empty, reqID, deadline, nodeID),
   241  		EngineType:     p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   242  	}
   243  	handler.Push(context.Background(), msg)
   244  
   245  	ticker := time.NewTicker(time.Second)
   246  	select {
   247  	case <-ticker.C:
   248  		require.FailNow("Handler shutdown timed out before calling toClose")
   249  	case <-closed:
   250  	}
   251  }
   252  
   253  func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) {
   254  	require := require.New(t)
   255  
   256  	closed := make(chan struct{}, 1)
   257  	snowCtx := snowtest.Context(t, snowtest.CChainID)
   258  	ctx := snowtest.ConsensusContext(snowCtx)
   259  	vdrs := validators.NewManager()
   260  	require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1))
   261  
   262  	resourceTracker, err := tracker.NewResourceTracker(
   263  		prometheus.NewRegistry(),
   264  		resource.NoUsage,
   265  		meter.ContinuousFactory{},
   266  		time.Second,
   267  	)
   268  	require.NoError(err)
   269  
   270  	peerTracker, err := p2p.NewPeerTracker(
   271  		logging.NoLog{},
   272  		"",
   273  		prometheus.NewRegistry(),
   274  		nil,
   275  		version.CurrentApp,
   276  	)
   277  	require.NoError(err)
   278  
   279  	handlerIntf, err := New(
   280  		ctx,
   281  		vdrs,
   282  		nil,
   283  		1,
   284  		testThreadPoolSize,
   285  		resourceTracker,
   286  		validators.UnhandledSubnetConnector,
   287  		subnets.New(ctx.NodeID, subnets.Config{}),
   288  		commontracker.NewPeers(),
   289  		peerTracker,
   290  		prometheus.NewRegistry(),
   291  	)
   292  	require.NoError(err)
   293  	handler := handlerIntf.(*handler)
   294  
   295  	handler.clock.Set(time.Now())
   296  
   297  	bootstrapper := &common.BootstrapperTest{
   298  		EngineTest: common.EngineTest{
   299  			T: t,
   300  		},
   301  	}
   302  	bootstrapper.Default(false)
   303  	bootstrapper.ContextF = func() *snow.ConsensusContext {
   304  		return ctx
   305  	}
   306  	bootstrapper.GetFailedF = func(context.Context, ids.NodeID, uint32) error {
   307  		closed <- struct{}{}
   308  		return nil
   309  	}
   310  	handler.SetEngineManager(&EngineManager{
   311  		Snowman: &Engine{
   312  			Bootstrapper: bootstrapper,
   313  		},
   314  	})
   315  	ctx.State.Set(snow.EngineState{
   316  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   317  		State: snow.Bootstrapping, // assumed bootstrap is ongoing
   318  	})
   319  
   320  	bootstrapper.StartF = func(context.Context, uint32) error {
   321  		return nil
   322  	}
   323  
   324  	handler.Start(context.Background(), false)
   325  
   326  	nodeID := ids.EmptyNodeID
   327  	chainID := ids.Empty
   328  	reqID := uint32(1)
   329  	inInboundMessage := Message{
   330  		InboundMessage: message.InternalGetFailed(nodeID, chainID, reqID),
   331  		EngineType:     p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   332  	}
   333  	handler.Push(context.Background(), inInboundMessage)
   334  
   335  	ticker := time.NewTicker(time.Second)
   336  	select {
   337  	case <-ticker.C:
   338  		require.FailNow("Handler shutdown timed out before calling toClose")
   339  	case <-closed:
   340  	}
   341  }
   342  
   343  // Test that messages from the VM are handled
   344  func TestHandlerDispatchInternal(t *testing.T) {
   345  	require := require.New(t)
   346  
   347  	snowCtx := snowtest.Context(t, snowtest.CChainID)
   348  	ctx := snowtest.ConsensusContext(snowCtx)
   349  	msgFromVMChan := make(chan common.Message)
   350  	vdrs := validators.NewManager()
   351  	require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1))
   352  
   353  	resourceTracker, err := tracker.NewResourceTracker(
   354  		prometheus.NewRegistry(),
   355  		resource.NoUsage,
   356  		meter.ContinuousFactory{},
   357  		time.Second,
   358  	)
   359  	require.NoError(err)
   360  
   361  	peerTracker, err := p2p.NewPeerTracker(
   362  		logging.NoLog{},
   363  		"",
   364  		prometheus.NewRegistry(),
   365  		nil,
   366  		version.CurrentApp,
   367  	)
   368  	require.NoError(err)
   369  
   370  	handler, err := New(
   371  		ctx,
   372  		vdrs,
   373  		msgFromVMChan,
   374  		time.Second,
   375  		testThreadPoolSize,
   376  		resourceTracker,
   377  		validators.UnhandledSubnetConnector,
   378  		subnets.New(ctx.NodeID, subnets.Config{}),
   379  		commontracker.NewPeers(),
   380  		peerTracker,
   381  		prometheus.NewRegistry(),
   382  	)
   383  	require.NoError(err)
   384  
   385  	bootstrapper := &common.BootstrapperTest{
   386  		EngineTest: common.EngineTest{
   387  			T: t,
   388  		},
   389  	}
   390  	bootstrapper.Default(false)
   391  
   392  	engine := &common.EngineTest{T: t}
   393  	engine.Default(false)
   394  	engine.ContextF = func() *snow.ConsensusContext {
   395  		return ctx
   396  	}
   397  
   398  	wg := &sync.WaitGroup{}
   399  	engine.NotifyF = func(context.Context, common.Message) error {
   400  		wg.Done()
   401  		return nil
   402  	}
   403  
   404  	handler.SetEngineManager(&EngineManager{
   405  		Snowman: &Engine{
   406  			Bootstrapper: bootstrapper,
   407  			Consensus:    engine,
   408  		},
   409  	})
   410  
   411  	ctx.State.Set(snow.EngineState{
   412  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   413  		State: snow.NormalOp, // assumed bootstrap is done
   414  	})
   415  
   416  	bootstrapper.StartF = func(context.Context, uint32) error {
   417  		return nil
   418  	}
   419  
   420  	wg.Add(1)
   421  	handler.Start(context.Background(), false)
   422  	msgFromVMChan <- 0
   423  	wg.Wait()
   424  }
   425  
   426  func TestHandlerSubnetConnector(t *testing.T) {
   427  	require := require.New(t)
   428  
   429  	snowCtx := snowtest.Context(t, snowtest.CChainID)
   430  	ctx := snowtest.ConsensusContext(snowCtx)
   431  	vdrs := validators.NewManager()
   432  	require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1))
   433  
   434  	resourceTracker, err := tracker.NewResourceTracker(
   435  		prometheus.NewRegistry(),
   436  		resource.NoUsage,
   437  		meter.ContinuousFactory{},
   438  		time.Second,
   439  	)
   440  	require.NoError(err)
   441  	ctrl := gomock.NewController(t)
   442  	connector := validators.NewMockSubnetConnector(ctrl)
   443  
   444  	nodeID := ids.GenerateTestNodeID()
   445  	subnetID := ids.GenerateTestID()
   446  
   447  	peerTracker, err := p2p.NewPeerTracker(
   448  		logging.NoLog{},
   449  		"",
   450  		prometheus.NewRegistry(),
   451  		nil,
   452  		version.CurrentApp,
   453  	)
   454  	require.NoError(err)
   455  
   456  	handler, err := New(
   457  		ctx,
   458  		vdrs,
   459  		nil,
   460  		time.Second,
   461  		testThreadPoolSize,
   462  		resourceTracker,
   463  		connector,
   464  		subnets.New(ctx.NodeID, subnets.Config{}),
   465  		commontracker.NewPeers(),
   466  		peerTracker,
   467  		prometheus.NewRegistry(),
   468  	)
   469  	require.NoError(err)
   470  
   471  	bootstrapper := &common.BootstrapperTest{
   472  		EngineTest: common.EngineTest{
   473  			T: t,
   474  		},
   475  	}
   476  	bootstrapper.Default(false)
   477  
   478  	engine := &common.EngineTest{T: t}
   479  	engine.Default(false)
   480  	engine.ContextF = func() *snow.ConsensusContext {
   481  		return ctx
   482  	}
   483  
   484  	handler.SetEngineManager(&EngineManager{
   485  		Snowman: &Engine{
   486  			Bootstrapper: bootstrapper,
   487  			Consensus:    engine,
   488  		},
   489  	})
   490  	ctx.State.Set(snow.EngineState{
   491  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   492  		State: snow.NormalOp, // assumed bootstrap is done
   493  	})
   494  
   495  	bootstrapper.StartF = func(context.Context, uint32) error {
   496  		return nil
   497  	}
   498  
   499  	handler.Start(context.Background(), false)
   500  
   501  	// Handler should call subnet connector when ConnectedSubnet message is received
   502  	var wg sync.WaitGroup
   503  	connector.EXPECT().ConnectedSubnet(gomock.Any(), nodeID, subnetID).Do(
   504  		func(context.Context, ids.NodeID, ids.ID) {
   505  			wg.Done()
   506  		})
   507  
   508  	wg.Add(1)
   509  	defer wg.Wait()
   510  
   511  	subnetInboundMessage := Message{
   512  		InboundMessage: message.InternalConnectedSubnet(nodeID, subnetID),
   513  		EngineType:     p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   514  	}
   515  	handler.Push(context.Background(), subnetInboundMessage)
   516  }
   517  
   518  // Tests that messages are routed to the correct engine type
   519  func TestDynamicEngineTypeDispatch(t *testing.T) {
   520  	tests := []struct {
   521  		name                string
   522  		currentEngineType   p2ppb.EngineType
   523  		requestedEngineType p2ppb.EngineType
   524  		setup               func(
   525  			h Handler,
   526  			b common.BootstrapableEngine,
   527  			e common.Engine,
   528  		)
   529  	}{
   530  		{
   531  			name:                "current - avalanche, requested - unspecified",
   532  			currentEngineType:   p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   533  			requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   534  			setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) {
   535  				h.SetEngineManager(&EngineManager{
   536  					Avalanche: &Engine{
   537  						StateSyncer:  nil,
   538  						Bootstrapper: b,
   539  						Consensus:    e,
   540  					},
   541  					Snowman: nil,
   542  				})
   543  			},
   544  		},
   545  		{
   546  			name:                "current - avalanche, requested - avalanche",
   547  			currentEngineType:   p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   548  			requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   549  			setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) {
   550  				h.SetEngineManager(&EngineManager{
   551  					Avalanche: &Engine{
   552  						StateSyncer:  nil,
   553  						Bootstrapper: b,
   554  						Consensus:    e,
   555  					},
   556  					Snowman: nil,
   557  				})
   558  			},
   559  		},
   560  		{
   561  			name:                "current - snowman, requested - unspecified",
   562  			currentEngineType:   p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   563  			requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED,
   564  			setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) {
   565  				h.SetEngineManager(&EngineManager{
   566  					Avalanche: nil,
   567  					Snowman: &Engine{
   568  						StateSyncer:  nil,
   569  						Bootstrapper: b,
   570  						Consensus:    e,
   571  					},
   572  				})
   573  			},
   574  		},
   575  		{
   576  			name:                "current - snowman, requested - avalanche",
   577  			currentEngineType:   p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   578  			requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   579  			setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) {
   580  				h.SetEngineManager(&EngineManager{
   581  					Avalanche: &Engine{
   582  						StateSyncer:  nil,
   583  						Bootstrapper: nil,
   584  						Consensus:    e,
   585  					},
   586  					Snowman: &Engine{
   587  						StateSyncer:  nil,
   588  						Bootstrapper: b,
   589  						Consensus:    nil,
   590  					},
   591  				})
   592  			},
   593  		},
   594  		{
   595  			name:                "current - snowman, requested - snowman",
   596  			currentEngineType:   p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   597  			requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   598  			setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) {
   599  				h.SetEngineManager(&EngineManager{
   600  					Avalanche: nil,
   601  					Snowman: &Engine{
   602  						StateSyncer:  nil,
   603  						Bootstrapper: b,
   604  						Consensus:    e,
   605  					},
   606  				})
   607  			},
   608  		},
   609  	}
   610  
   611  	for _, test := range tests {
   612  		t.Run(test.name, func(t *testing.T) {
   613  			require := require.New(t)
   614  
   615  			messageReceived := make(chan struct{})
   616  			snowCtx := snowtest.Context(t, snowtest.CChainID)
   617  			ctx := snowtest.ConsensusContext(snowCtx)
   618  			vdrs := validators.NewManager()
   619  			require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1))
   620  
   621  			resourceTracker, err := tracker.NewResourceTracker(
   622  				prometheus.NewRegistry(),
   623  				resource.NoUsage,
   624  				meter.ContinuousFactory{},
   625  				time.Second,
   626  			)
   627  			require.NoError(err)
   628  
   629  			peerTracker, err := p2p.NewPeerTracker(
   630  				logging.NoLog{},
   631  				"",
   632  				prometheus.NewRegistry(),
   633  				nil,
   634  				version.CurrentApp,
   635  			)
   636  			require.NoError(err)
   637  
   638  			handler, err := New(
   639  				ctx,
   640  				vdrs,
   641  				nil,
   642  				time.Second,
   643  				testThreadPoolSize,
   644  				resourceTracker,
   645  				validators.UnhandledSubnetConnector,
   646  				subnets.New(ids.EmptyNodeID, subnets.Config{}),
   647  				commontracker.NewPeers(),
   648  				peerTracker,
   649  				prometheus.NewRegistry(),
   650  			)
   651  			require.NoError(err)
   652  
   653  			bootstrapper := &common.BootstrapperTest{
   654  				EngineTest: common.EngineTest{
   655  					T: t,
   656  				},
   657  			}
   658  			bootstrapper.Default(false)
   659  
   660  			engine := &common.EngineTest{T: t}
   661  			engine.Default(false)
   662  			engine.ContextF = func() *snow.ConsensusContext {
   663  				return ctx
   664  			}
   665  			engine.ChitsF = func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) error {
   666  				close(messageReceived)
   667  				return nil
   668  			}
   669  
   670  			test.setup(handler, bootstrapper, engine)
   671  
   672  			ctx.State.Set(snow.EngineState{
   673  				Type:  test.currentEngineType,
   674  				State: snow.NormalOp, // assumed bootstrap is done
   675  			})
   676  
   677  			bootstrapper.StartF = func(context.Context, uint32) error {
   678  				return nil
   679  			}
   680  
   681  			handler.Start(context.Background(), false)
   682  			handler.Push(context.Background(), Message{
   683  				InboundMessage: message.InboundChits(
   684  					ids.Empty,
   685  					uint32(0),
   686  					ids.Empty,
   687  					ids.Empty,
   688  					ids.Empty,
   689  					ids.EmptyNodeID,
   690  				),
   691  				EngineType: test.requestedEngineType,
   692  			})
   693  
   694  			<-messageReceived
   695  		})
   696  	}
   697  }
   698  
   699  func TestHandlerStartError(t *testing.T) {
   700  	require := require.New(t)
   701  
   702  	snowCtx := snowtest.Context(t, snowtest.CChainID)
   703  	ctx := snowtest.ConsensusContext(snowCtx)
   704  	resourceTracker, err := tracker.NewResourceTracker(
   705  		prometheus.NewRegistry(),
   706  		resource.NoUsage,
   707  		meter.ContinuousFactory{},
   708  		time.Second,
   709  	)
   710  	require.NoError(err)
   711  
   712  	peerTracker, err := p2p.NewPeerTracker(
   713  		logging.NoLog{},
   714  		"",
   715  		prometheus.NewRegistry(),
   716  		nil,
   717  		version.CurrentApp,
   718  	)
   719  	require.NoError(err)
   720  
   721  	handler, err := New(
   722  		ctx,
   723  		validators.NewManager(),
   724  		nil,
   725  		time.Second,
   726  		testThreadPoolSize,
   727  		resourceTracker,
   728  		nil,
   729  		subnets.New(ctx.NodeID, subnets.Config{}),
   730  		commontracker.NewPeers(),
   731  		peerTracker,
   732  		prometheus.NewRegistry(),
   733  	)
   734  	require.NoError(err)
   735  
   736  	// Starting a handler with an unprovided engine should immediately cause the
   737  	// handler to shutdown.
   738  	handler.SetEngineManager(&EngineManager{})
   739  	ctx.State.Set(snow.EngineState{
   740  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   741  		State: snow.Initializing,
   742  	})
   743  	handler.Start(context.Background(), false)
   744  
   745  	_, err = handler.AwaitStopped(context.Background())
   746  	require.NoError(err)
   747  }