github.com/onflow/flow-go@v0.33.17/consensus/integration/nodes_test.go (about)

     1  package integration_test
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"sort"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/dgraph-io/badger/v2"
    12  	"github.com/gammazero/workerpool"
    13  	"github.com/rs/zerolog"
    14  	"github.com/stretchr/testify/mock"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg"
    18  	"github.com/onflow/flow-go/cmd/bootstrap/run"
    19  	"github.com/onflow/flow-go/consensus"
    20  	"github.com/onflow/flow-go/consensus/hotstuff"
    21  	"github.com/onflow/flow-go/consensus/hotstuff/committees"
    22  	"github.com/onflow/flow-go/consensus/hotstuff/notifications"
    23  	"github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub"
    24  	"github.com/onflow/flow-go/consensus/hotstuff/persister"
    25  	hsig "github.com/onflow/flow-go/consensus/hotstuff/signature"
    26  	"github.com/onflow/flow-go/consensus/hotstuff/timeoutaggregator"
    27  	"github.com/onflow/flow-go/consensus/hotstuff/timeoutcollector"
    28  	"github.com/onflow/flow-go/consensus/hotstuff/verification"
    29  	"github.com/onflow/flow-go/consensus/hotstuff/voteaggregator"
    30  	"github.com/onflow/flow-go/consensus/hotstuff/votecollector"
    31  	"github.com/onflow/flow-go/crypto"
    32  	synceng "github.com/onflow/flow-go/engine/common/synchronization"
    33  	"github.com/onflow/flow-go/engine/consensus/compliance"
    34  	"github.com/onflow/flow-go/engine/consensus/message_hub"
    35  	"github.com/onflow/flow-go/model/bootstrap"
    36  	"github.com/onflow/flow-go/model/flow"
    37  	"github.com/onflow/flow-go/model/flow/filter"
    38  	"github.com/onflow/flow-go/module"
    39  	"github.com/onflow/flow-go/module/buffer"
    40  	builder "github.com/onflow/flow-go/module/builder/consensus"
    41  	synccore "github.com/onflow/flow-go/module/chainsync"
    42  	modulecompliance "github.com/onflow/flow-go/module/compliance"
    43  	finalizer "github.com/onflow/flow-go/module/finalizer/consensus"
    44  	"github.com/onflow/flow-go/module/id"
    45  	"github.com/onflow/flow-go/module/irrecoverable"
    46  	"github.com/onflow/flow-go/module/local"
    47  	consensusMempools "github.com/onflow/flow-go/module/mempool/consensus"
    48  	"github.com/onflow/flow-go/module/mempool/stdmap"
    49  	"github.com/onflow/flow-go/module/metrics"
    50  	mockmodule "github.com/onflow/flow-go/module/mock"
    51  	msig "github.com/onflow/flow-go/module/signature"
    52  	"github.com/onflow/flow-go/module/trace"
    53  	"github.com/onflow/flow-go/state/protocol"
    54  	bprotocol "github.com/onflow/flow-go/state/protocol/badger"
    55  	"github.com/onflow/flow-go/state/protocol/blocktimer"
    56  	"github.com/onflow/flow-go/state/protocol/events"
    57  	"github.com/onflow/flow-go/state/protocol/inmem"
    58  	"github.com/onflow/flow-go/state/protocol/util"
    59  	storage "github.com/onflow/flow-go/storage/badger"
    60  	storagemock "github.com/onflow/flow-go/storage/mock"
    61  	"github.com/onflow/flow-go/utils/unittest"
    62  )
    63  
    64  const hotstuffTimeout = 500 * time.Millisecond
    65  
    66  // RandomBeaconNodeInfo stores information about participation in DKG process for consensus node
    67  // contains private + public keys and participant index
    68  // Each node has unique structure
    69  type RandomBeaconNodeInfo struct {
    70  	RandomBeaconPrivKey crypto.PrivateKey
    71  	DKGParticipant      flow.DKGParticipant
    72  }
    73  
    74  // ConsensusParticipant stores information about node which is fixed during epoch changes
    75  // like staking key, role, network key and random beacon info which changes every epoch
    76  // Contains a mapping of DKG info per epoch.
    77  type ConsensusParticipant struct {
    78  	nodeInfo          bootstrap.NodeInfo
    79  	beaconInfoByEpoch map[uint64]RandomBeaconNodeInfo
    80  }
    81  
    82  // ConsensusParticipants is a special cache which stores information about consensus participants across multiple epochs
    83  // This structure is used to launch nodes in our integration test setup
    84  type ConsensusParticipants struct {
    85  	lookup map[flow.Identifier]ConsensusParticipant // nodeID -> ConsensusParticipant
    86  }
    87  
    88  func NewConsensusParticipants(data *run.ParticipantData) *ConsensusParticipants {
    89  	lookup := make(map[flow.Identifier]ConsensusParticipant)
    90  	for _, participant := range data.Participants {
    91  		lookup[participant.NodeID] = ConsensusParticipant{
    92  			nodeInfo: participant.NodeInfo,
    93  			beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{
    94  				1: {
    95  					RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
    96  					DKGParticipant:      data.Lookup[participant.NodeID],
    97  				},
    98  			},
    99  		}
   100  	}
   101  	return &ConsensusParticipants{
   102  		lookup: lookup,
   103  	}
   104  }
   105  
   106  // Lookup performs lookup of participant by nodeID
   107  func (p *ConsensusParticipants) Lookup(nodeID flow.Identifier) *ConsensusParticipant {
   108  	participant, ok := p.lookup[nodeID]
   109  	if ok {
   110  		return &participant
   111  	}
   112  	return nil
   113  }
   114  
   115  // Update stores information about consensus participants for some epoch
   116  // If this node was part of previous epoch it will get updated, if not created.
   117  func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.ParticipantData) {
   118  	for _, participant := range data.Participants {
   119  		dkgParticipant := data.Lookup[participant.NodeID]
   120  		entry, ok := p.lookup[participant.NodeID]
   121  		if !ok {
   122  			entry = ConsensusParticipant{
   123  				nodeInfo:          participant.NodeInfo,
   124  				beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{},
   125  			}
   126  		}
   127  
   128  		entry.beaconInfoByEpoch[epochCounter] = RandomBeaconNodeInfo{
   129  			RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
   130  			DKGParticipant:      dkgParticipant,
   131  		}
   132  		p.lookup[participant.NodeID] = entry
   133  	}
   134  }
   135  
   136  type Node struct {
   137  	db                *badger.DB
   138  	dbDir             string
   139  	index             int
   140  	log               zerolog.Logger
   141  	id                *flow.Identity
   142  	compliance        *compliance.Engine
   143  	sync              *synceng.Engine
   144  	hot               module.HotStuff
   145  	committee         *committees.Consensus
   146  	voteAggregator    hotstuff.VoteAggregator
   147  	timeoutAggregator hotstuff.TimeoutAggregator
   148  	messageHub        *message_hub.MessageHub
   149  	state             *bprotocol.ParticipantState
   150  	headers           *storage.Headers
   151  	net               *Network
   152  }
   153  
   154  // epochInfo is a helper structure for storing epoch information such as counter and final view
   155  type epochInfo struct {
   156  	finalView uint64
   157  	counter   uint64
   158  }
   159  
   160  // buildEpochLookupList is a helper function which builds an auxiliary structure of epochs sorted by counter
   161  func buildEpochLookupList(epochs ...protocol.Epoch) []epochInfo {
   162  	infos := make([]epochInfo, 0)
   163  	for _, epoch := range epochs {
   164  		finalView, err := epoch.FinalView()
   165  		if err != nil {
   166  			continue
   167  		}
   168  		counter, err := epoch.Counter()
   169  		if err != nil {
   170  			continue
   171  		}
   172  		infos = append(infos, epochInfo{
   173  			finalView: finalView,
   174  			counter:   counter,
   175  		})
   176  	}
   177  	sort.Slice(infos, func(i, j int) bool {
   178  		return infos[i].finalView < infos[j].finalView
   179  	})
   180  	return infos
   181  }
   182  
   183  // createNodes creates consensus nodes based on the input ConsensusParticipants info.
   184  // All nodes will be started using a common parent context.
   185  // Each node is connected to the Stopper, which will cancel the context when the
   186  // stopping condition is reached.
   187  // The list of created nodes, the common network hub, and a function which starts
   188  // all the nodes together, is returned.
   189  func createNodes(t *testing.T, participants *ConsensusParticipants, rootSnapshot protocol.Snapshot, stopper *Stopper) (nodes []*Node, hub *Hub, runFor func(time.Duration)) {
   190  	consensus, err := rootSnapshot.Identities(filter.HasRole(flow.RoleConsensus))
   191  	require.NoError(t, err)
   192  
   193  	epochViewLookup := buildEpochLookupList(rootSnapshot.Epochs().Current(),
   194  		rootSnapshot.Epochs().Next())
   195  
   196  	epochLookup := &mockmodule.EpochLookup{}
   197  	epochLookup.On("EpochForViewWithFallback", mock.Anything).Return(
   198  		func(view uint64) uint64 {
   199  			for _, info := range epochViewLookup {
   200  				if view <= info.finalView {
   201  					return info.counter
   202  				}
   203  			}
   204  			return 0
   205  		}, func(view uint64) error {
   206  			if view > epochViewLookup[len(epochViewLookup)-1].finalView {
   207  				return fmt.Errorf("unexpected epoch transition")
   208  			} else {
   209  				return nil
   210  			}
   211  		})
   212  
   213  	hub = NewNetworkHub()
   214  	nodes = make([]*Node, 0, len(consensus))
   215  	for i, identity := range consensus {
   216  		consensusParticipant := participants.Lookup(identity.NodeID)
   217  		require.NotNil(t, consensusParticipant)
   218  		node := createNode(t, consensusParticipant, i, identity, rootSnapshot, hub, stopper, epochLookup)
   219  		nodes = append(nodes, node)
   220  	}
   221  
   222  	// create a context which will be used for all nodes
   223  	ctx, cancel := context.WithCancel(context.Background())
   224  	signalerCtx, _ := irrecoverable.WithSignaler(ctx)
   225  
   226  	// create a function to return which the test case can use to run the nodes for some maximum duration
   227  	// and gracefully stop after.
   228  	runFor = func(maxDuration time.Duration) {
   229  		runNodes(signalerCtx, nodes)
   230  		unittest.RequireCloseBefore(t, stopper.stopped, maxDuration, "expect to get signal from stopper before timeout")
   231  		stopNodes(t, cancel, nodes)
   232  	}
   233  
   234  	stopper.WithStopFunc(func() {
   235  
   236  	})
   237  
   238  	return nodes, hub, runFor
   239  }
   240  
   241  func createRootQC(t *testing.T, root *flow.Block, participantData *run.ParticipantData) *flow.QuorumCertificate {
   242  	consensusCluster := participantData.Identities()
   243  	votes, err := run.GenerateRootBlockVotes(root, participantData)
   244  	require.NoError(t, err)
   245  	qc, invalidVotes, err := run.GenerateRootQC(root, votes, participantData, consensusCluster)
   246  	require.NoError(t, err)
   247  	require.Len(t, invalidVotes, 0)
   248  	return qc
   249  }
   250  
   251  // createRootBlockData creates genesis block with first epoch and real data node identities.
   252  // This function requires all participants to pass DKG process.
   253  func createRootBlockData(participantData *run.ParticipantData) (*flow.Block, *flow.ExecutionResult, *flow.Seal) {
   254  	root := unittest.GenesisFixture()
   255  	consensusParticipants := participantData.Identities()
   256  
   257  	// add other roles to create a complete identity list
   258  	participants := unittest.CompleteIdentitySet(consensusParticipants...)
   259  	participants.Sort(flow.Canonical)
   260  
   261  	dkgParticipantsKeys := make([]crypto.PublicKey, 0, len(consensusParticipants))
   262  	for _, participant := range participants.Filter(filter.HasRole(flow.RoleConsensus)) {
   263  		dkgParticipantsKeys = append(dkgParticipantsKeys, participantData.Lookup[participant.NodeID].KeyShare)
   264  	}
   265  
   266  	counter := uint64(1)
   267  	setup := unittest.EpochSetupFixture(
   268  		unittest.WithParticipants(participants),
   269  		unittest.SetupWithCounter(counter),
   270  		unittest.WithFirstView(root.Header.View),
   271  		unittest.WithFinalView(root.Header.View+1000),
   272  	)
   273  	commit := unittest.EpochCommitFixture(
   274  		unittest.CommitWithCounter(counter),
   275  		unittest.WithClusterQCsFromAssignments(setup.Assignments),
   276  		func(commit *flow.EpochCommit) {
   277  			commit.DKGGroupKey = participantData.GroupKey
   278  			commit.DKGParticipantKeys = dkgParticipantsKeys
   279  		},
   280  	)
   281  
   282  	result := unittest.BootstrapExecutionResultFixture(root, unittest.GenesisStateCommitment)
   283  	result.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent(), commit.ServiceEvent()}
   284  
   285  	seal := unittest.Seal.Fixture(unittest.Seal.WithResult(result))
   286  
   287  	return root, result, seal
   288  }
   289  
   290  func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo {
   291  	consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical)
   292  	infos := make([]bootstrap.NodeInfo, 0, n)
   293  	for _, node := range consensus {
   294  		networkPrivKey := unittest.NetworkingPrivKeyFixture()
   295  		stakingPrivKey := unittest.StakingPrivKeyFixture()
   296  		nodeInfo := bootstrap.NewPrivateNodeInfo(
   297  			node.NodeID,
   298  			node.Role,
   299  			node.Address,
   300  			node.Weight,
   301  			networkPrivKey,
   302  			stakingPrivKey,
   303  		)
   304  		infos = append(infos, nodeInfo)
   305  	}
   306  	return infos
   307  }
   308  
   309  func createConsensusIdentities(t *testing.T, n int) *run.ParticipantData {
   310  	// create n consensus node participants
   311  	consensus := createPrivateNodeIdentities(n)
   312  	return completeConsensusIdentities(t, consensus)
   313  }
   314  
   315  // completeConsensusIdentities runs KG process and fills nodeInfos with missing random beacon keys
   316  func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) *run.ParticipantData {
   317  	dkgData, err := bootstrapDKG.RandomBeaconKG(len(nodeInfos), unittest.RandomBytes(48))
   318  	require.NoError(t, err)
   319  
   320  	participantData := &run.ParticipantData{
   321  		Participants: make([]run.Participant, 0, len(nodeInfos)),
   322  		Lookup:       make(map[flow.Identifier]flow.DKGParticipant),
   323  		GroupKey:     dkgData.PubGroupKey,
   324  	}
   325  	for index, node := range nodeInfos {
   326  		participant := run.Participant{
   327  			NodeInfo:            node,
   328  			RandomBeaconPrivKey: dkgData.PrivKeyShares[index],
   329  		}
   330  		participantData.Participants = append(participantData.Participants, participant)
   331  		participantData.Lookup[node.NodeID] = flow.DKGParticipant{
   332  			Index:    uint(index),
   333  			KeyShare: dkgData.PubKeyShares[index],
   334  		}
   335  	}
   336  
   337  	return participantData
   338  }
   339  
   340  // createRootSnapshot creates root block, generates root QC and builds a root snapshot for
   341  // bootstrapping a node
   342  func createRootSnapshot(t *testing.T, participantData *run.ParticipantData) *inmem.Snapshot {
   343  	root, result, seal := createRootBlockData(participantData)
   344  	rootQC := createRootQC(t, root, participantData)
   345  
   346  	rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, rootQC)
   347  	require.NoError(t, err)
   348  	return rootSnapshot
   349  }
   350  
   351  func createNode(
   352  	t *testing.T,
   353  	participant *ConsensusParticipant,
   354  	index int,
   355  	identity *flow.Identity,
   356  	rootSnapshot protocol.Snapshot,
   357  	hub *Hub,
   358  	stopper *Stopper,
   359  	epochLookup module.EpochLookup,
   360  ) *Node {
   361  
   362  	db, dbDir := unittest.TempBadgerDB(t)
   363  	metricsCollector := metrics.NewNoopCollector()
   364  	tracer := trace.NewNoopTracer()
   365  
   366  	headersDB := storage.NewHeaders(metricsCollector, db)
   367  	guaranteesDB := storage.NewGuarantees(metricsCollector, db, storage.DefaultCacheSize)
   368  	sealsDB := storage.NewSeals(metricsCollector, db)
   369  	indexDB := storage.NewIndex(metricsCollector, db)
   370  	resultsDB := storage.NewExecutionResults(metricsCollector, db)
   371  	receiptsDB := storage.NewExecutionReceipts(metricsCollector, db, resultsDB, storage.DefaultCacheSize)
   372  	payloadsDB := storage.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB)
   373  	blocksDB := storage.NewBlocks(db, headersDB, payloadsDB)
   374  	qcsDB := storage.NewQuorumCertificates(metricsCollector, db, storage.DefaultCacheSize)
   375  	setupsDB := storage.NewEpochSetups(metricsCollector, db)
   376  	commitsDB := storage.NewEpochCommits(metricsCollector, db)
   377  	statusesDB := storage.NewEpochStatuses(metricsCollector, db)
   378  	versionBeaconDB := storage.NewVersionBeacons(db)
   379  	protocolStateEvents := events.NewDistributor()
   380  
   381  	localID := identity.ID()
   382  
   383  	log := unittest.Logger().With().
   384  		Int("index", index).
   385  		Hex("node_id", localID[:]).
   386  		Logger()
   387  
   388  	state, err := bprotocol.Bootstrap(
   389  		metricsCollector,
   390  		db,
   391  		headersDB,
   392  		sealsDB,
   393  		resultsDB,
   394  		blocksDB,
   395  		qcsDB,
   396  		setupsDB,
   397  		commitsDB,
   398  		statusesDB,
   399  		versionBeaconDB,
   400  		rootSnapshot,
   401  	)
   402  	require.NoError(t, err)
   403  
   404  	blockTimer, err := blocktimer.NewBlockTimer(1*time.Millisecond, 90*time.Second)
   405  	require.NoError(t, err)
   406  
   407  	fullState, err := bprotocol.NewFullConsensusState(
   408  		log,
   409  		tracer,
   410  		protocolStateEvents,
   411  		state,
   412  		indexDB,
   413  		payloadsDB,
   414  		blockTimer,
   415  		util.MockReceiptValidator(),
   416  		util.MockSealValidator(sealsDB),
   417  	)
   418  	require.NoError(t, err)
   419  
   420  	node := &Node{
   421  		db:    db,
   422  		dbDir: dbDir,
   423  		index: index,
   424  		id:    identity,
   425  	}
   426  
   427  	stopper.AddNode(node)
   428  
   429  	counterConsumer := &CounterConsumer{
   430  		finalized: func(total uint) {
   431  			stopper.onFinalizedTotal(node.id.ID(), total)
   432  		},
   433  	}
   434  
   435  	// log with node index
   436  	logConsumer := notifications.NewLogConsumer(log)
   437  	hotstuffDistributor := pubsub.NewDistributor()
   438  	hotstuffDistributor.AddConsumer(counterConsumer)
   439  	hotstuffDistributor.AddConsumer(logConsumer)
   440  
   441  	require.Equal(t, participant.nodeInfo.NodeID, localID)
   442  	privateKeys, err := participant.nodeInfo.PrivateKeys()
   443  	require.NoError(t, err)
   444  
   445  	// make local
   446  	me, err := local.New(identity, privateKeys.StakingKey)
   447  	require.NoError(t, err)
   448  
   449  	// add a network for this node to the hub
   450  	net := hub.AddNetwork(localID, node)
   451  
   452  	guaranteeLimit, sealLimit := uint(1000), uint(1000)
   453  	guarantees, err := stdmap.NewGuarantees(guaranteeLimit)
   454  	require.NoError(t, err)
   455  
   456  	receipts := consensusMempools.NewExecutionTree()
   457  
   458  	seals := stdmap.NewIncorporatedResultSeals(sealLimit)
   459  
   460  	// initialize the block builder
   461  	build, err := builder.NewBuilder(metricsCollector, db, fullState, headersDB, sealsDB, indexDB, blocksDB, resultsDB, receiptsDB,
   462  		guarantees, consensusMempools.NewIncorporatedResultSeals(seals, receiptsDB), receipts, tracer)
   463  	require.NoError(t, err)
   464  
   465  	// initialize the pending blocks cache
   466  	cache := buffer.NewPendingBlocks()
   467  
   468  	rootHeader, err := rootSnapshot.Head()
   469  	require.NoError(t, err)
   470  
   471  	rootQC, err := rootSnapshot.QuorumCertificate()
   472  	require.NoError(t, err)
   473  
   474  	// selector := filter.HasRole(flow.RoleConsensus)
   475  	committee, err := committees.NewConsensusCommittee(state, localID)
   476  	require.NoError(t, err)
   477  	protocolStateEvents.AddConsumer(committee)
   478  
   479  	// initialize the block finalizer
   480  	final := finalizer.NewFinalizer(db, headersDB, fullState, trace.NewNoopTracer())
   481  
   482  	syncCore, err := synccore.New(log, synccore.DefaultConfig(), metricsCollector, rootHeader.ChainID)
   483  	require.NoError(t, err)
   484  
   485  	voteAggregationDistributor := pubsub.NewVoteAggregationDistributor()
   486  	voteAggregationDistributor.AddVoteAggregationConsumer(logConsumer)
   487  
   488  	forks, err := consensus.NewForks(rootHeader, headersDB, final, hotstuffDistributor, rootHeader, rootQC)
   489  	require.NoError(t, err)
   490  
   491  	validator := consensus.NewValidator(metricsCollector, committee)
   492  	require.NoError(t, err)
   493  
   494  	keys := &storagemock.SafeBeaconKeys{}
   495  	// there is DKG key for this epoch
   496  	keys.On("RetrieveMyBeaconPrivateKey", mock.Anything).Return(
   497  		func(epochCounter uint64) crypto.PrivateKey {
   498  			dkgInfo, ok := participant.beaconInfoByEpoch[epochCounter]
   499  			if !ok {
   500  				return nil
   501  			}
   502  			return dkgInfo.RandomBeaconPrivKey
   503  		},
   504  		func(epochCounter uint64) bool {
   505  			_, ok := participant.beaconInfoByEpoch[epochCounter]
   506  			return ok
   507  		},
   508  		nil)
   509  
   510  	// use epoch aware store for testing scenarios where epoch changes
   511  	beaconKeyStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys)
   512  
   513  	signer := verification.NewCombinedSigner(me, beaconKeyStore)
   514  
   515  	persist := persister.New(db, rootHeader.ChainID)
   516  
   517  	livenessData, err := persist.GetLivenessData()
   518  	require.NoError(t, err)
   519  
   520  	voteProcessorFactory := votecollector.NewCombinedVoteProcessorFactory(committee, voteAggregationDistributor.OnQcConstructedFromVotes)
   521  
   522  	createCollectorFactoryMethod := votecollector.NewStateMachineFactory(log, voteAggregationDistributor, voteProcessorFactory.Create)
   523  	voteCollectors := voteaggregator.NewVoteCollectors(log, livenessData.CurrentView, workerpool.New(2), createCollectorFactoryMethod)
   524  
   525  	voteAggregator, err := voteaggregator.NewVoteAggregator(
   526  		log,
   527  		metricsCollector,
   528  		metricsCollector,
   529  		metricsCollector,
   530  		voteAggregationDistributor,
   531  		livenessData.CurrentView,
   532  		voteCollectors,
   533  	)
   534  	require.NoError(t, err)
   535  
   536  	timeoutAggregationDistributor := pubsub.NewTimeoutAggregationDistributor()
   537  	timeoutAggregationDistributor.AddTimeoutCollectorConsumer(logConsumer)
   538  
   539  	timeoutProcessorFactory := timeoutcollector.NewTimeoutProcessorFactory(
   540  		log,
   541  		timeoutAggregationDistributor,
   542  		committee,
   543  		validator,
   544  		msig.ConsensusTimeoutTag,
   545  	)
   546  	timeoutCollectorsFactory := timeoutcollector.NewTimeoutCollectorFactory(
   547  		log,
   548  		timeoutAggregationDistributor,
   549  		timeoutProcessorFactory,
   550  	)
   551  	timeoutCollectors := timeoutaggregator.NewTimeoutCollectors(
   552  		log,
   553  		metricsCollector,
   554  		livenessData.CurrentView,
   555  		timeoutCollectorsFactory,
   556  	)
   557  
   558  	timeoutAggregator, err := timeoutaggregator.NewTimeoutAggregator(
   559  		log,
   560  		metricsCollector,
   561  		metricsCollector,
   562  		metricsCollector,
   563  		livenessData.CurrentView,
   564  		timeoutCollectors,
   565  	)
   566  	require.NoError(t, err)
   567  
   568  	hotstuffModules := &consensus.HotstuffModules{
   569  		Forks:                       forks,
   570  		Validator:                   validator,
   571  		Notifier:                    hotstuffDistributor,
   572  		Committee:                   committee,
   573  		Signer:                      signer,
   574  		Persist:                     persist,
   575  		VoteCollectorDistributor:    voteAggregationDistributor.VoteCollectorDistributor,
   576  		TimeoutCollectorDistributor: timeoutAggregationDistributor.TimeoutCollectorDistributor,
   577  		VoteAggregator:              voteAggregator,
   578  		TimeoutAggregator:           timeoutAggregator,
   579  	}
   580  
   581  	// initialize hotstuff
   582  	hot, err := consensus.NewParticipant(
   583  		log,
   584  		metricsCollector,
   585  		metricsCollector,
   586  		build,
   587  		rootHeader,
   588  		[]*flow.Header{},
   589  		hotstuffModules,
   590  		consensus.WithMinTimeout(hotstuffTimeout),
   591  		func(cfg *consensus.ParticipantConfig) {
   592  			cfg.MaxTimeoutObjectRebroadcastInterval = hotstuffTimeout
   593  		},
   594  	)
   595  	require.NoError(t, err)
   596  
   597  	// initialize the compliance engine
   598  	compCore, err := compliance.NewCore(
   599  		log,
   600  		metricsCollector,
   601  		metricsCollector,
   602  		metricsCollector,
   603  		metricsCollector,
   604  		hotstuffDistributor,
   605  		tracer,
   606  		headersDB,
   607  		payloadsDB,
   608  		fullState,
   609  		cache,
   610  		syncCore,
   611  		validator,
   612  		hot,
   613  		voteAggregator,
   614  		timeoutAggregator,
   615  		modulecompliance.DefaultConfig(),
   616  	)
   617  	require.NoError(t, err)
   618  
   619  	comp, err := compliance.NewEngine(log, me, compCore)
   620  	require.NoError(t, err)
   621  
   622  	identities, err := state.Final().Identities(filter.And(
   623  		filter.HasRole(flow.RoleConsensus),
   624  		filter.Not(filter.HasNodeID(me.NodeID())),
   625  	))
   626  	require.NoError(t, err)
   627  	idProvider := id.NewFixedIdentifierProvider(identities.NodeIDs())
   628  
   629  	spamConfig, err := synceng.NewSpamDetectionConfig()
   630  	require.NoError(t, err, "could not initialize spam detection config")
   631  
   632  	// initialize the synchronization engine
   633  	sync, err := synceng.New(
   634  		log,
   635  		metricsCollector,
   636  		net,
   637  		me,
   638  		state,
   639  		blocksDB,
   640  		comp,
   641  		syncCore,
   642  		idProvider,
   643  		spamConfig,
   644  		func(cfg *synceng.Config) {
   645  			// use a small pool and scan interval for sync engine
   646  			cfg.ScanInterval = 500 * time.Millisecond
   647  			cfg.PollInterval = time.Second
   648  		},
   649  	)
   650  	require.NoError(t, err)
   651  
   652  	messageHub, err := message_hub.NewMessageHub(
   653  		log,
   654  		metricsCollector,
   655  		net,
   656  		me,
   657  		comp,
   658  		hot,
   659  		voteAggregator,
   660  		timeoutAggregator,
   661  		state,
   662  		payloadsDB,
   663  	)
   664  	require.NoError(t, err)
   665  
   666  	hotstuffDistributor.AddConsumer(messageHub)
   667  
   668  	node.compliance = comp
   669  	node.sync = sync
   670  	node.state = fullState
   671  	node.hot = hot
   672  	node.committee = committee
   673  	node.voteAggregator = hotstuffModules.VoteAggregator
   674  	node.timeoutAggregator = hotstuffModules.TimeoutAggregator
   675  	node.messageHub = messageHub
   676  	node.headers = headersDB
   677  	node.net = net
   678  	node.log = log
   679  
   680  	return node
   681  }
   682  
   683  func cleanupNodes(nodes []*Node) {
   684  	for _, n := range nodes {
   685  		_ = n.db.Close()
   686  		_ = os.RemoveAll(n.dbDir)
   687  	}
   688  }