github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/consensus/integration/nodes_test.go (about)

     1  package integration_test
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"sort"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/dgraph-io/badger/v2"
    12  	"github.com/gammazero/workerpool"
    13  	"github.com/onflow/crypto"
    14  	"github.com/rs/zerolog"
    15  	"github.com/stretchr/testify/mock"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg"
    19  	"github.com/onflow/flow-go/cmd/bootstrap/run"
    20  	"github.com/onflow/flow-go/consensus"
    21  	"github.com/onflow/flow-go/consensus/hotstuff"
    22  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    23  	"github.com/onflow/flow-go/consensus/hotstuff/notifications"
    24  	"github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub"
    25  	"github.com/onflow/flow-go/consensus/hotstuff/persister"
    26  	hsig "github.com/onflow/flow-go/consensus/hotstuff/signature"
    27  	"github.com/onflow/flow-go/consensus/hotstuff/timeoutaggregator"
    28  	"github.com/onflow/flow-go/consensus/hotstuff/timeoutcollector"
    29  	"github.com/onflow/flow-go/consensus/hotstuff/verification"
    30  	"github.com/onflow/flow-go/consensus/hotstuff/voteaggregator"
    31  	"github.com/onflow/flow-go/consensus/hotstuff/votecollector"
    32  	synceng "github.com/onflow/flow-go/engine/common/synchronization"
    33  	"github.com/onflow/flow-go/engine/consensus/compliance"
    34  	"github.com/onflow/flow-go/engine/consensus/message_hub"
    35  	"github.com/onflow/flow-go/model/bootstrap"
    36  	"github.com/onflow/flow-go/model/flow"
    37  	"github.com/onflow/flow-go/model/flow/filter"
    38  	"github.com/onflow/flow-go/module"
    39  	"github.com/onflow/flow-go/module/buffer"
    40  	builder "github.com/onflow/flow-go/module/builder/consensus"
    41  	synccore "github.com/onflow/flow-go/module/chainsync"
    42  	modulecompliance "github.com/onflow/flow-go/module/compliance"
    43  	finalizer "github.com/onflow/flow-go/module/finalizer/consensus"
    44  	"github.com/onflow/flow-go/module/id"
    45  	"github.com/onflow/flow-go/module/irrecoverable"
    46  	"github.com/onflow/flow-go/module/local"
    47  	consensusMempools "github.com/onflow/flow-go/module/mempool/consensus"
    48  	"github.com/onflow/flow-go/module/mempool/stdmap"
    49  	"github.com/onflow/flow-go/module/metrics"
    50  	mockmodule "github.com/onflow/flow-go/module/mock"
    51  	msig "github.com/onflow/flow-go/module/signature"
    52  	"github.com/onflow/flow-go/module/trace"
    53  	"github.com/onflow/flow-go/state/protocol"
    54  	bprotocol "github.com/onflow/flow-go/state/protocol/badger"
    55  	"github.com/onflow/flow-go/state/protocol/blocktimer"
    56  	"github.com/onflow/flow-go/state/protocol/events"
    57  	"github.com/onflow/flow-go/state/protocol/inmem"
    58  	"github.com/onflow/flow-go/state/protocol/protocol_state/kvstore"
    59  	protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state"
    60  	"github.com/onflow/flow-go/state/protocol/util"
    61  	storage "github.com/onflow/flow-go/storage/badger"
    62  	storagemock "github.com/onflow/flow-go/storage/mock"
    63  	"github.com/onflow/flow-go/utils/unittest"
    64  )
    65  
    66  const hotstuffTimeout = 500 * time.Millisecond
    67  
    68  // RandomBeaconNodeInfo stores information about participation in DKG process for consensus node
    69  // contains private + public keys and participant index
    70  // Each node has unique structure
    71  type RandomBeaconNodeInfo struct {
    72  	RandomBeaconPrivKey crypto.PrivateKey
    73  	DKGParticipant      flow.DKGParticipant
    74  }
    75  
    76  // ConsensusParticipant stores information about node which is fixed during epoch changes
    77  // like staking key, role, network key and random beacon info which changes every epoch
    78  // Contains a mapping of DKG info per epoch.
    79  type ConsensusParticipant struct {
    80  	nodeInfo          bootstrap.NodeInfo
    81  	beaconInfoByEpoch map[uint64]RandomBeaconNodeInfo
    82  }
    83  
    84  // ConsensusParticipants is a special cache which stores information about consensus participants across multiple epochs
    85  // This structure is used to launch nodes in our integration test setup
    86  type ConsensusParticipants struct {
    87  	lookup map[flow.Identifier]ConsensusParticipant // nodeID -> ConsensusParticipant
    88  }
    89  
    90  func NewConsensusParticipants(data *run.ParticipantData) *ConsensusParticipants {
    91  	lookup := make(map[flow.Identifier]ConsensusParticipant)
    92  	for _, participant := range data.Participants {
    93  		lookup[participant.NodeID] = ConsensusParticipant{
    94  			nodeInfo: participant.NodeInfo,
    95  			beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{
    96  				1: {
    97  					RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
    98  					DKGParticipant:      data.Lookup[participant.NodeID],
    99  				},
   100  			},
   101  		}
   102  	}
   103  	return &ConsensusParticipants{
   104  		lookup: lookup,
   105  	}
   106  }
   107  
   108  // Lookup performs lookup of participant by nodeID
   109  func (p *ConsensusParticipants) Lookup(nodeID flow.Identifier) *ConsensusParticipant {
   110  	participant, ok := p.lookup[nodeID]
   111  	if ok {
   112  		return &participant
   113  	}
   114  	return nil
   115  }
   116  
   117  // Update stores information about consensus participants for some epoch
   118  // If this node was part of previous epoch it will get updated, if not created.
   119  func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.ParticipantData) {
   120  	for _, participant := range data.Participants {
   121  		dkgParticipant := data.Lookup[participant.NodeID]
   122  		entry, ok := p.lookup[participant.NodeID]
   123  		if !ok {
   124  			entry = ConsensusParticipant{
   125  				nodeInfo:          participant.NodeInfo,
   126  				beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{},
   127  			}
   128  		}
   129  
   130  		entry.beaconInfoByEpoch[epochCounter] = RandomBeaconNodeInfo{
   131  			RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
   132  			DKGParticipant:      dkgParticipant,
   133  		}
   134  		p.lookup[participant.NodeID] = entry
   135  	}
   136  }
   137  
   138  type Node struct {
   139  	db                *badger.DB
   140  	dbDir             string
   141  	index             int
   142  	log               zerolog.Logger
   143  	id                *flow.Identity
   144  	compliance        *compliance.Engine
   145  	sync              *synceng.Engine
   146  	hot               module.HotStuff
   147  	committee         *committees.Consensus
   148  	voteAggregator    hotstuff.VoteAggregator
   149  	timeoutAggregator hotstuff.TimeoutAggregator
   150  	messageHub        *message_hub.MessageHub
   151  	state             *bprotocol.ParticipantState
   152  	headers           *storage.Headers
   153  	net               *Network
   154  }
   155  
   156  // epochInfo is a helper structure for storing epoch information such as counter and final view
   157  type epochInfo struct {
   158  	finalView uint64
   159  	counter   uint64
   160  }
   161  
   162  // buildEpochLookupList is a helper function which builds an auxiliary structure of epochs sorted by counter
   163  func buildEpochLookupList(epochs ...protocol.Epoch) []epochInfo {
   164  	infos := make([]epochInfo, 0)
   165  	for _, epoch := range epochs {
   166  		finalView, err := epoch.FinalView()
   167  		if err != nil {
   168  			continue
   169  		}
   170  		counter, err := epoch.Counter()
   171  		if err != nil {
   172  			continue
   173  		}
   174  		infos = append(infos, epochInfo{
   175  			finalView: finalView,
   176  			counter:   counter,
   177  		})
   178  	}
   179  	sort.Slice(infos, func(i, j int) bool {
   180  		return infos[i].finalView < infos[j].finalView
   181  	})
   182  	return infos
   183  }
   184  
   185  // createNodes creates consensus nodes based on the input ConsensusParticipants info.
   186  // All nodes will be started using a common parent context.
   187  // Each node is connected to the Stopper, which will cancel the context when the
   188  // stopping condition is reached.
   189  // The list of created nodes, the common network hub, and a function which starts
   190  // all the nodes together, is returned.
   191  func createNodes(t *testing.T, participants *ConsensusParticipants, rootSnapshot protocol.Snapshot, stopper *Stopper) (nodes []*Node, hub *Hub, runFor func(time.Duration)) {
   192  	consensus, err := rootSnapshot.Identities(filter.HasRole[flow.Identity](flow.RoleConsensus))
   193  	require.NoError(t, err)
   194  
   195  	epochViewLookup := buildEpochLookupList(rootSnapshot.Epochs().Current(),
   196  		rootSnapshot.Epochs().Next())
   197  
   198  	epochLookup := &mockmodule.EpochLookup{}
   199  	epochLookup.On("EpochForViewWithFallback", mock.Anything).Return(
   200  		func(view uint64) uint64 {
   201  			for _, info := range epochViewLookup {
   202  				if view <= info.finalView {
   203  					return info.counter
   204  				}
   205  			}
   206  			return 0
   207  		}, func(view uint64) error {
   208  			if view > epochViewLookup[len(epochViewLookup)-1].finalView {
   209  				return fmt.Errorf("unexpected epoch transition")
   210  			} else {
   211  				return nil
   212  			}
   213  		})
   214  
   215  	hub = NewNetworkHub()
   216  	nodes = make([]*Node, 0, len(consensus))
   217  	for i, identity := range consensus {
   218  		consensusParticipant := participants.Lookup(identity.NodeID)
   219  		require.NotNil(t, consensusParticipant)
   220  		node := createNode(t, consensusParticipant, i, identity, rootSnapshot, hub, stopper, epochLookup)
   221  		nodes = append(nodes, node)
   222  	}
   223  
   224  	// create a context which will be used for all nodes
   225  	ctx, cancel := context.WithCancel(context.Background())
   226  	signalerCtx, _ := irrecoverable.WithSignaler(ctx)
   227  
   228  	// create a function to return which the test case can use to run the nodes for some maximum duration
   229  	// and gracefully stop after.
   230  	runFor = func(maxDuration time.Duration) {
   231  		runNodes(signalerCtx, nodes)
   232  		unittest.RequireCloseBefore(t, stopper.stopped, maxDuration, "expect to get signal from stopper before timeout")
   233  		stopNodes(t, cancel, nodes)
   234  	}
   235  
   236  	stopper.WithStopFunc(func() {
   237  
   238  	})
   239  
   240  	return nodes, hub, runFor
   241  }
   242  
   243  func createRootQC(t *testing.T, root *flow.Block, participantData *run.ParticipantData) *flow.QuorumCertificate {
   244  	consensusCluster := participantData.Identities()
   245  	votes, err := run.GenerateRootBlockVotes(root, participantData)
   246  	require.NoError(t, err)
   247  	qc, invalidVotes, err := run.GenerateRootQC(root, votes, participantData, consensusCluster)
   248  	require.NoError(t, err)
   249  	require.Len(t, invalidVotes, 0)
   250  	return qc
   251  }
   252  
   253  // createRootBlockData creates genesis block with first epoch and real data node identities.
   254  // This function requires all participants to pass DKG process.
   255  func createRootBlockData(participantData *run.ParticipantData) (*flow.Block, *flow.ExecutionResult, *flow.Seal) {
   256  	root := unittest.GenesisFixture()
   257  	consensusParticipants := participantData.Identities()
   258  
   259  	// add other roles to create a complete identity list
   260  	participants := unittest.CompleteIdentitySet(consensusParticipants...)
   261  	participants.Sort(flow.Canonical[flow.Identity])
   262  
   263  	dkgParticipantsKeys := make([]crypto.PublicKey, 0, len(consensusParticipants))
   264  	for _, participant := range participants.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)) {
   265  		dkgParticipantsKeys = append(dkgParticipantsKeys, participantData.Lookup[participant.NodeID].KeyShare)
   266  	}
   267  
   268  	counter := uint64(1)
   269  	setup := unittest.EpochSetupFixture(
   270  		unittest.WithParticipants(participants.ToSkeleton()),
   271  		unittest.SetupWithCounter(counter),
   272  		unittest.WithFirstView(root.Header.View),
   273  		unittest.WithFinalView(root.Header.View+1000),
   274  	)
   275  	commit := unittest.EpochCommitFixture(
   276  		unittest.CommitWithCounter(counter),
   277  		unittest.WithClusterQCsFromAssignments(setup.Assignments),
   278  		func(commit *flow.EpochCommit) {
   279  			commit.DKGGroupKey = participantData.GroupKey
   280  			commit.DKGParticipantKeys = dkgParticipantsKeys
   281  		},
   282  	)
   283  
   284  	epochProtocolStateID := inmem.ProtocolStateFromEpochServiceEvents(setup, commit).ID()
   285  	root.SetPayload(flow.Payload{ProtocolStateID: kvstore.NewDefaultKVStore(epochProtocolStateID).ID()})
   286  	result := unittest.BootstrapExecutionResultFixture(root, unittest.GenesisStateCommitment)
   287  	result.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent(), commit.ServiceEvent()}
   288  
   289  	seal := unittest.Seal.Fixture(unittest.Seal.WithResult(result))
   290  
   291  	return root, result, seal
   292  }
   293  
   294  func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo {
   295  	consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity])
   296  	infos := make([]bootstrap.NodeInfo, 0, n)
   297  	for _, node := range consensus {
   298  		networkPrivKey := unittest.NetworkingPrivKeyFixture()
   299  		stakingPrivKey := unittest.StakingPrivKeyFixture()
   300  		nodeInfo := bootstrap.NewPrivateNodeInfo(
   301  			node.NodeID,
   302  			node.Role,
   303  			node.Address,
   304  			node.InitialWeight,
   305  			networkPrivKey,
   306  			stakingPrivKey,
   307  		)
   308  		infos = append(infos, nodeInfo)
   309  	}
   310  	return infos
   311  }
   312  
   313  func createConsensusIdentities(t *testing.T, n int) *run.ParticipantData {
   314  	// create n consensus node participants
   315  	consensus := createPrivateNodeIdentities(n)
   316  	return completeConsensusIdentities(t, consensus)
   317  }
   318  
   319  // completeConsensusIdentities runs KG process and fills nodeInfos with missing random beacon keys
   320  func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) *run.ParticipantData {
   321  	dkgData, err := bootstrapDKG.RandomBeaconKG(len(nodeInfos), unittest.RandomBytes(48))
   322  	require.NoError(t, err)
   323  
   324  	participantData := &run.ParticipantData{
   325  		Participants: make([]run.Participant, 0, len(nodeInfos)),
   326  		Lookup:       make(map[flow.Identifier]flow.DKGParticipant),
   327  		GroupKey:     dkgData.PubGroupKey,
   328  	}
   329  	for index, node := range nodeInfos {
   330  		participant := run.Participant{
   331  			NodeInfo:            node,
   332  			RandomBeaconPrivKey: dkgData.PrivKeyShares[index],
   333  		}
   334  		participantData.Participants = append(participantData.Participants, participant)
   335  		participantData.Lookup[node.NodeID] = flow.DKGParticipant{
   336  			Index:    uint(index),
   337  			KeyShare: dkgData.PubKeyShares[index],
   338  		}
   339  	}
   340  
   341  	return participantData
   342  }
   343  
   344  // createRootSnapshot creates root block, generates root QC and builds a root snapshot for
   345  // bootstrapping a node
   346  func createRootSnapshot(t *testing.T, participantData *run.ParticipantData) *inmem.Snapshot {
   347  	root, result, seal := createRootBlockData(participantData)
   348  	rootQC := createRootQC(t, root, participantData)
   349  
   350  	rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, rootQC)
   351  	require.NoError(t, err)
   352  	return rootSnapshot
   353  }
   354  
   355  func createNode(
   356  	t *testing.T,
   357  	participant *ConsensusParticipant,
   358  	index int,
   359  	identity *flow.Identity,
   360  	rootSnapshot protocol.Snapshot,
   361  	hub *Hub,
   362  	stopper *Stopper,
   363  	epochLookup module.EpochLookup,
   364  ) *Node {
   365  
   366  	db, dbDir := unittest.TempBadgerDB(t)
   367  	metricsCollector := metrics.NewNoopCollector()
   368  	tracer := trace.NewNoopTracer()
   369  
   370  	headersDB := storage.NewHeaders(metricsCollector, db)
   371  	guaranteesDB := storage.NewGuarantees(metricsCollector, db, storage.DefaultCacheSize)
   372  	sealsDB := storage.NewSeals(metricsCollector, db)
   373  	indexDB := storage.NewIndex(metricsCollector, db)
   374  	resultsDB := storage.NewExecutionResults(metricsCollector, db)
   375  	receiptsDB := storage.NewExecutionReceipts(metricsCollector, db, resultsDB, storage.DefaultCacheSize)
   376  	payloadsDB := storage.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB)
   377  	blocksDB := storage.NewBlocks(db, headersDB, payloadsDB)
   378  	qcsDB := storage.NewQuorumCertificates(metricsCollector, db, storage.DefaultCacheSize)
   379  	setupsDB := storage.NewEpochSetups(metricsCollector, db)
   380  	commitsDB := storage.NewEpochCommits(metricsCollector, db)
   381  	protocolStateDB := storage.NewProtocolState(metricsCollector, setupsDB, commitsDB, db,
   382  		storage.DefaultProtocolStateCacheSize, storage.DefaultProtocolStateByBlockIDCacheSize)
   383  	protocokKVStoreDB := storage.NewProtocolKVStore(metricsCollector, db,
   384  		storage.DefaultProtocolKVStoreCacheSize, storage.DefaultProtocolKVStoreByBlockIDCacheSize)
   385  	versionBeaconDB := storage.NewVersionBeacons(db)
   386  	protocolStateEvents := events.NewDistributor()
   387  
   388  	localID := identity.ID()
   389  
   390  	log := unittest.Logger().With().
   391  		Int("index", index).
   392  		Hex("node_id", localID[:]).
   393  		Logger()
   394  
   395  	state, err := bprotocol.Bootstrap(
   396  		metricsCollector,
   397  		db,
   398  		headersDB,
   399  		sealsDB,
   400  		resultsDB,
   401  		blocksDB,
   402  		qcsDB,
   403  		setupsDB,
   404  		commitsDB,
   405  		protocolStateDB,
   406  		protocokKVStoreDB,
   407  		versionBeaconDB,
   408  		rootSnapshot,
   409  	)
   410  	require.NoError(t, err)
   411  
   412  	blockTimer, err := blocktimer.NewBlockTimer(1*time.Millisecond, 90*time.Second)
   413  	require.NoError(t, err)
   414  
   415  	fullState, err := bprotocol.NewFullConsensusState(
   416  		log,
   417  		tracer,
   418  		protocolStateEvents,
   419  		state,
   420  		indexDB,
   421  		payloadsDB,
   422  		blockTimer,
   423  		util.MockReceiptValidator(),
   424  		util.MockSealValidator(sealsDB),
   425  	)
   426  	require.NoError(t, err)
   427  
   428  	node := &Node{
   429  		db:    db,
   430  		dbDir: dbDir,
   431  		index: index,
   432  		id:    identity,
   433  	}
   434  
   435  	stopper.AddNode(node)
   436  
   437  	counterConsumer := &CounterConsumer{
   438  		finalized: func(total uint) {
   439  			stopper.onFinalizedTotal(node.id.ID(), total)
   440  		},
   441  	}
   442  
   443  	// log with node index
   444  	logConsumer := notifications.NewLogConsumer(log)
   445  	hotstuffDistributor := pubsub.NewDistributor()
   446  	hotstuffDistributor.AddConsumer(counterConsumer)
   447  	hotstuffDistributor.AddConsumer(logConsumer)
   448  
   449  	require.Equal(t, participant.nodeInfo.NodeID, localID)
   450  	privateKeys, err := participant.nodeInfo.PrivateKeys()
   451  	require.NoError(t, err)
   452  
   453  	// make local
   454  	me, err := local.New(identity.IdentitySkeleton, privateKeys.StakingKey)
   455  	require.NoError(t, err)
   456  
   457  	// add a network for this node to the hub
   458  	net := hub.AddNetwork(localID, node)
   459  
   460  	guaranteeLimit, sealLimit := uint(1000), uint(1000)
   461  	guarantees, err := stdmap.NewGuarantees(guaranteeLimit)
   462  	require.NoError(t, err)
   463  
   464  	receipts := consensusMempools.NewExecutionTree()
   465  
   466  	seals := stdmap.NewIncorporatedResultSeals(sealLimit)
   467  
   468  	mutableProtocolState := protocol_state.NewMutableProtocolState(
   469  		protocolStateDB,
   470  		protocokKVStoreDB,
   471  		state.Params(),
   472  		headersDB,
   473  		resultsDB,
   474  		setupsDB,
   475  		commitsDB,
   476  	)
   477  
   478  	// initialize the block builder
   479  	build, err := builder.NewBuilder(
   480  		metricsCollector,
   481  		db,
   482  		fullState,
   483  		headersDB,
   484  		sealsDB,
   485  		indexDB,
   486  		blocksDB,
   487  		resultsDB,
   488  		receiptsDB,
   489  		mutableProtocolState,
   490  		guarantees,
   491  		consensusMempools.NewIncorporatedResultSeals(seals, receiptsDB),
   492  		receipts,
   493  		tracer,
   494  	)
   495  	require.NoError(t, err)
   496  
   497  	// initialize the pending blocks cache
   498  	cache := buffer.NewPendingBlocks()
   499  
   500  	rootHeader, err := rootSnapshot.Head()
   501  	require.NoError(t, err)
   502  
   503  	rootQC, err := rootSnapshot.QuorumCertificate()
   504  	require.NoError(t, err)
   505  
   506  	committee, err := committees.NewConsensusCommittee(state, localID)
   507  	require.NoError(t, err)
   508  	protocolStateEvents.AddConsumer(committee)
   509  
   510  	// initialize the block finalizer
   511  	final := finalizer.NewFinalizer(db, headersDB, fullState, trace.NewNoopTracer())
   512  
   513  	syncCore, err := synccore.New(log, synccore.DefaultConfig(), metricsCollector, rootHeader.ChainID)
   514  	require.NoError(t, err)
   515  
   516  	voteAggregationDistributor := pubsub.NewVoteAggregationDistributor()
   517  	voteAggregationDistributor.AddVoteAggregationConsumer(logConsumer)
   518  
   519  	forks, err := consensus.NewForks(rootHeader, headersDB, final, hotstuffDistributor, rootHeader, rootQC)
   520  	require.NoError(t, err)
   521  
   522  	validator := consensus.NewValidator(metricsCollector, committee)
   523  	require.NoError(t, err)
   524  
   525  	keys := &storagemock.SafeBeaconKeys{}
   526  	// there is DKG key for this epoch
   527  	keys.On("RetrieveMyBeaconPrivateKey", mock.Anything).Return(
   528  		func(epochCounter uint64) crypto.PrivateKey {
   529  			dkgInfo, ok := participant.beaconInfoByEpoch[epochCounter]
   530  			if !ok {
   531  				return nil
   532  			}
   533  			return dkgInfo.RandomBeaconPrivKey
   534  		},
   535  		func(epochCounter uint64) bool {
   536  			_, ok := participant.beaconInfoByEpoch[epochCounter]
   537  			return ok
   538  		},
   539  		nil)
   540  
   541  	// use epoch aware store for testing scenarios where epoch changes
   542  	beaconKeyStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys)
   543  
   544  	signer := verification.NewCombinedSigner(me, beaconKeyStore)
   545  
   546  	persist := persister.New(db, rootHeader.ChainID)
   547  
   548  	livenessData, err := persist.GetLivenessData()
   549  	require.NoError(t, err)
   550  
   551  	voteProcessorFactory := votecollector.NewCombinedVoteProcessorFactory(committee, voteAggregationDistributor.OnQcConstructedFromVotes)
   552  
   553  	createCollectorFactoryMethod := votecollector.NewStateMachineFactory(log, voteAggregationDistributor, voteProcessorFactory.Create)
   554  	voteCollectors := voteaggregator.NewVoteCollectors(log, livenessData.CurrentView, workerpool.New(2), createCollectorFactoryMethod)
   555  
   556  	voteAggregator, err := voteaggregator.NewVoteAggregator(
   557  		log,
   558  		metricsCollector,
   559  		metricsCollector,
   560  		metricsCollector,
   561  		voteAggregationDistributor,
   562  		livenessData.CurrentView,
   563  		voteCollectors,
   564  	)
   565  	require.NoError(t, err)
   566  
   567  	timeoutAggregationDistributor := pubsub.NewTimeoutAggregationDistributor()
   568  	timeoutAggregationDistributor.AddTimeoutCollectorConsumer(logConsumer)
   569  
   570  	timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory(
   571  		log,
   572  		timeoutAggregationDistributor,
   573  		committee,
   574  		validator,
   575  		msig.ConsensusTimeoutTag,
   576  	)
   577  	timeoutCollectorsFactory := timeoutcollector.NewTimeoutCollectorFactory(
   578  		log,
   579  		timeoutAggregationDistributor,
   580  		timeoutProcessorFactory,
   581  	)
   582  	timeoutCollectors := timeoutaggregator.NewTimeoutCollectors(
   583  		log,
   584  		metricsCollector,
   585  		livenessData.CurrentView,
   586  		timeoutCollectorsFactory,
   587  	)
   588  
   589  	timeoutAggregator, err := timeoutaggregator.NewTimeoutAggregator(
   590  		log,
   591  		metricsCollector,
   592  		metricsCollector,
   593  		metricsCollector,
   594  		livenessData.CurrentView,
   595  		timeoutCollectors,
   596  	)
   597  	require.NoError(t, err)
   598  
   599  	hotstuffModules := &consensus.HotstuffModules{
   600  		Forks:                       forks,
   601  		Validator:                   validator,
   602  		Notifier:                    hotstuffDistributor,
   603  		Committee:                   committee,
   604  		Signer:                      signer,
   605  		Persist:                     persist,
   606  		VoteCollectorDistributor:    voteAggregationDistributor.VoteCollectorDistributor,
   607  		TimeoutCollectorDistributor: timeoutAggregationDistributor.TimeoutCollectorDistributor,
   608  		VoteAggregator:              voteAggregator,
   609  		TimeoutAggregator:           timeoutAggregator,
   610  	}
   611  
   612  	// initialize hotstuff
   613  	hot, err := consensus.NewParticipant(
   614  		log,
   615  		metricsCollector,
   616  		metricsCollector,
   617  		build,
   618  		rootHeader,
   619  		[]*flow.Header{},
   620  		hotstuffModules,
   621  		consensus.WithMinTimeout(hotstuffTimeout),
   622  		func(cfg *consensus.ParticipantConfig) {
   623  			cfg.MaxTimeoutObjectRebroadcastInterval = hotstuffTimeout
   624  		},
   625  	)
   626  	require.NoError(t, err)
   627  
   628  	// initialize the compliance engine
   629  	compCore, err := compliance.NewCore(
   630  		log,
   631  		metricsCollector,
   632  		metricsCollector,
   633  		metricsCollector,
   634  		metricsCollector,
   635  		hotstuffDistributor,
   636  		tracer,
   637  		headersDB,
   638  		payloadsDB,
   639  		fullState,
   640  		cache,
   641  		syncCore,
   642  		validator,
   643  		hot,
   644  		voteAggregator,
   645  		timeoutAggregator,
   646  		modulecompliance.DefaultConfig(),
   647  	)
   648  	require.NoError(t, err)
   649  
   650  	comp, err := compliance.NewEngine(log, me, compCore)
   651  	require.NoError(t, err)
   652  
   653  	identities, err := state.Final().Identities(filter.And(
   654  		filter.HasRole[flow.Identity](flow.RoleConsensus),
   655  		filter.Not(filter.HasNodeID[flow.Identity](me.NodeID())),
   656  	))
   657  	require.NoError(t, err)
   658  	idProvider := id.NewFixedIdentifierProvider(identities.NodeIDs())
   659  
   660  	spamConfig, err := synceng.NewSpamDetectionConfig()
   661  	require.NoError(t, err, "could not initialize spam detection config")
   662  
   663  	// initialize the synchronization engine
   664  	sync, err := synceng.New(
   665  		log,
   666  		metricsCollector,
   667  		net,
   668  		me,
   669  		state,
   670  		blocksDB,
   671  		comp,
   672  		syncCore,
   673  		idProvider,
   674  		spamConfig,
   675  		func(cfg *synceng.Config) {
   676  			// use a small pool and scan interval for sync engine
   677  			cfg.ScanInterval = 500 * time.Millisecond
   678  			cfg.PollInterval = time.Second
   679  		},
   680  	)
   681  	require.NoError(t, err)
   682  
   683  	messageHub, err := message_hub.NewMessageHub(
   684  		log,
   685  		metricsCollector,
   686  		net,
   687  		me,
   688  		comp,
   689  		hot,
   690  		voteAggregator,
   691  		timeoutAggregator,
   692  		state,
   693  		payloadsDB,
   694  	)
   695  	require.NoError(t, err)
   696  
   697  	hotstuffDistributor.AddConsumer(messageHub)
   698  
   699  	node.compliance = comp
   700  	node.sync = sync
   701  	node.state = fullState
   702  	node.hot = hot
   703  	node.committee = committee
   704  	node.voteAggregator = hotstuffModules.VoteAggregator
   705  	node.timeoutAggregator = hotstuffModules.TimeoutAggregator
   706  	node.messageHub = messageHub
   707  	node.headers = headersDB
   708  	node.net = net
   709  	node.log = log
   710  
   711  	return node
   712  }
   713  
   714  func cleanupNodes(nodes []*Node) {
   715  	for _, n := range nodes {
   716  		_ = n.db.Close()
   717  		_ = os.RemoveAll(n.dbDir)
   718  	}
   719  }