github.com/koko1123/flow-go-1@v0.29.6/consensus/integration/nodes_test.go (about)

     1  package integration_test
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"sort"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/dgraph-io/badger/v3"
    11  	"github.com/gammazero/workerpool"
    12  	"github.com/rs/zerolog"
    13  	"github.com/stretchr/testify/mock"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	bootstrapDKG "github.com/koko1123/flow-go-1/cmd/bootstrap/dkg"
    17  	"github.com/koko1123/flow-go-1/cmd/bootstrap/run"
    18  	"github.com/koko1123/flow-go-1/consensus"
    19  	"github.com/koko1123/flow-go-1/consensus/hotstuff"
    20  	"github.com/koko1123/flow-go-1/consensus/hotstuff/committees"
    21  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications"
    22  	"github.com/koko1123/flow-go-1/consensus/hotstuff/notifications/pubsub"
    23  	"github.com/koko1123/flow-go-1/consensus/hotstuff/persister"
    24  	hsig "github.com/koko1123/flow-go-1/consensus/hotstuff/signature"
    25  	"github.com/koko1123/flow-go-1/consensus/hotstuff/verification"
    26  	"github.com/koko1123/flow-go-1/consensus/hotstuff/voteaggregator"
    27  	"github.com/koko1123/flow-go-1/consensus/hotstuff/votecollector"
    28  	"github.com/onflow/flow-go/crypto"
    29  	synceng "github.com/koko1123/flow-go-1/engine/common/synchronization"
    30  	"github.com/koko1123/flow-go-1/engine/consensus/compliance"
    31  	"github.com/koko1123/flow-go-1/model/bootstrap"
    32  	"github.com/koko1123/flow-go-1/model/flow"
    33  	"github.com/koko1123/flow-go-1/model/flow/filter"
    34  	"github.com/koko1123/flow-go-1/model/flow/order"
    35  	"github.com/koko1123/flow-go-1/module"
    36  	"github.com/koko1123/flow-go-1/module/buffer"
    37  	builder "github.com/koko1123/flow-go-1/module/builder/consensus"
    38  	synccore "github.com/koko1123/flow-go-1/module/chainsync"
    39  	finalizer "github.com/koko1123/flow-go-1/module/finalizer/consensus"
    40  	"github.com/koko1123/flow-go-1/module/id"
    41  	"github.com/koko1123/flow-go-1/module/local"
    42  	consensusMempools "github.com/koko1123/flow-go-1/module/mempool/consensus"
    43  	"github.com/koko1123/flow-go-1/module/mempool/stdmap"
    44  	"github.com/koko1123/flow-go-1/module/metrics"
    45  	mockmodule "github.com/koko1123/flow-go-1/module/mock"
    46  	"github.com/koko1123/flow-go-1/module/trace"
    47  	"github.com/koko1123/flow-go-1/network/mocknetwork"
    48  	"github.com/koko1123/flow-go-1/state/protocol"
    49  	bprotocol "github.com/koko1123/flow-go-1/state/protocol/badger"
    50  	"github.com/koko1123/flow-go-1/state/protocol/blocktimer"
    51  	"github.com/koko1123/flow-go-1/state/protocol/events"
    52  	"github.com/koko1123/flow-go-1/state/protocol/inmem"
    53  	"github.com/koko1123/flow-go-1/state/protocol/util"
    54  	storage "github.com/koko1123/flow-go-1/storage/badger"
    55  	storagemock "github.com/koko1123/flow-go-1/storage/mock"
    56  	"github.com/koko1123/flow-go-1/utils/unittest"
    57  )
    58  
    59  const hotstuffTimeout = 100 * time.Millisecond
    60  
    61  // RandomBeaconNodeInfo stores information about participation in DKG process for consensus node
    62  // contains private + public keys and participant index
    63  // Each node has unique structure
    64  type RandomBeaconNodeInfo struct {
    65  	RandomBeaconPrivKey crypto.PrivateKey
    66  	DKGParticipant      flow.DKGParticipant
    67  }
    68  
    69  // ConsensusParticipant stores information about node which is fixed during epoch changes
    70  // like staking key, role, network key and random beacon info which changes every epoch
    71  // Contains a mapping of DKG info per epoch.
    72  type ConsensusParticipant struct {
    73  	nodeInfo          bootstrap.NodeInfo
    74  	beaconInfoByEpoch map[uint64]RandomBeaconNodeInfo
    75  }
    76  
    77  // ConsensusParticipants is a special cache which stores information about consensus participants across multiple epochs
    78  // This structure is used to launch nodes in our integration test setup
    79  type ConsensusParticipants struct {
    80  	lookup map[flow.Identifier]ConsensusParticipant // nodeID -> ConsensusParticipant
    81  }
    82  
    83  func NewConsensusParticipants(data *run.ParticipantData) *ConsensusParticipants {
    84  	lookup := make(map[flow.Identifier]ConsensusParticipant)
    85  	for _, participant := range data.Participants {
    86  		lookup[participant.NodeID] = ConsensusParticipant{
    87  			nodeInfo: participant.NodeInfo,
    88  			beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{
    89  				1: {
    90  					RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
    91  					DKGParticipant:      data.Lookup[participant.NodeID],
    92  				},
    93  			},
    94  		}
    95  	}
    96  	return &ConsensusParticipants{
    97  		lookup: lookup,
    98  	}
    99  }
   100  
   101  // Lookup performs lookup of participant by nodeID
   102  func (p *ConsensusParticipants) Lookup(nodeID flow.Identifier) *ConsensusParticipant {
   103  	participant, ok := p.lookup[nodeID]
   104  	if ok {
   105  		return &participant
   106  	}
   107  	return nil
   108  }
   109  
   110  // Update stores information about consensus participants for some epoch
   111  // If this node was part of previous epoch it will get updated, if not created.
   112  func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.ParticipantData) {
   113  	for _, participant := range data.Participants {
   114  		dkgParticipant := data.Lookup[participant.NodeID]
   115  		entry, ok := p.lookup[participant.NodeID]
   116  		if !ok {
   117  			entry = ConsensusParticipant{
   118  				nodeInfo:          participant.NodeInfo,
   119  				beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{},
   120  			}
   121  		}
   122  
   123  		entry.beaconInfoByEpoch[epochCounter] = RandomBeaconNodeInfo{
   124  			RandomBeaconPrivKey: participant.RandomBeaconPrivKey,
   125  			DKGParticipant:      dkgParticipant,
   126  		}
   127  		p.lookup[participant.NodeID] = entry
   128  	}
   129  }
   130  
   131  type Node struct {
   132  	db         *badger.DB
   133  	dbDir      string
   134  	index      int
   135  	log        zerolog.Logger
   136  	id         *flow.Identity
   137  	compliance *compliance.Engine
   138  	sync       *synceng.Engine
   139  	hot        module.HotStuff
   140  	aggregator hotstuff.VoteAggregator
   141  	state      *bprotocol.MutableState
   142  	headers    *storage.Headers
   143  	net        *Network
   144  }
   145  
   146  func (n *Node) Shutdown() {
   147  	<-n.sync.Done()
   148  	<-n.compliance.Done()
   149  }
   150  
   151  // epochInfo is a helper structure for storing epoch information such as counter and final view
   152  type epochInfo struct {
   153  	finalView uint64
   154  	counter   uint64
   155  }
   156  
   157  // buildEpochLookupList is a helper function which builds an auxiliary structure of epochs sorted by counter
   158  func buildEpochLookupList(epochs ...protocol.Epoch) []epochInfo {
   159  	infos := make([]epochInfo, 0)
   160  	for _, epoch := range epochs {
   161  		finalView, err := epoch.FinalView()
   162  		if err != nil {
   163  			continue
   164  		}
   165  		counter, err := epoch.Counter()
   166  		if err != nil {
   167  			continue
   168  		}
   169  		infos = append(infos, epochInfo{
   170  			finalView: finalView,
   171  			counter:   counter,
   172  		})
   173  	}
   174  	sort.Slice(infos, func(i, j int) bool {
   175  		return infos[i].finalView < infos[j].finalView
   176  	})
   177  	return infos
   178  }
   179  
   180  // Inputs:
   181  //   - n: the total number of nodes to be created
   182  //   - finalizedCount: the number of finalized blocks before stopping the tests
   183  //   - tolerate: the number of node to tolerate that don't need to reach the finalization count
   184  //     before stopping the tests
   185  func createNodes(
   186  	t *testing.T,
   187  	participants *ConsensusParticipants,
   188  	rootSnapshot protocol.Snapshot,
   189  	stopper *Stopper,
   190  ) ([]*Node, *Hub) {
   191  	consensus, err := rootSnapshot.Identities(filter.HasRole(flow.RoleConsensus))
   192  	require.NoError(t, err)
   193  
   194  	epochViewLookup := buildEpochLookupList(rootSnapshot.Epochs().Current(),
   195  		rootSnapshot.Epochs().Next())
   196  
   197  	epochLookup := &mockmodule.EpochLookup{}
   198  	epochLookup.On("EpochForViewWithFallback", mock.Anything).Return(
   199  		func(view uint64) uint64 {
   200  			for _, info := range epochViewLookup {
   201  				if view <= info.finalView {
   202  					return info.counter
   203  				}
   204  			}
   205  			return 0
   206  		}, func(view uint64) error {
   207  			if view > epochViewLookup[len(epochViewLookup)-1].finalView {
   208  				return fmt.Errorf("unexpected epoch transition")
   209  			} else {
   210  				return nil
   211  			}
   212  		})
   213  
   214  	hub := NewNetworkHub()
   215  	nodes := make([]*Node, 0, len(consensus))
   216  	for i, identity := range consensus {
   217  		consensusParticipant := participants.Lookup(identity.NodeID)
   218  		require.NotNil(t, consensusParticipant)
   219  		node := createNode(t, consensusParticipant, i, identity, rootSnapshot, hub, stopper, epochLookup)
   220  		nodes = append(nodes, node)
   221  	}
   222  
   223  	return nodes, hub
   224  }
   225  
   226  func createRootQC(t *testing.T, root *flow.Block, participantData *run.ParticipantData) *flow.QuorumCertificate {
   227  	consensusCluster := participantData.Identities()
   228  	votes, err := run.GenerateRootBlockVotes(root, participantData)
   229  	require.NoError(t, err)
   230  	qc, err := run.GenerateRootQC(root, votes, participantData, consensusCluster)
   231  	require.NoError(t, err)
   232  	return qc
   233  }
   234  
   235  // createRootBlockData creates genesis block with first epoch and real data node identities.
   236  // This function requires all participants to pass DKG process.
   237  func createRootBlockData(participantData *run.ParticipantData) (*flow.Block, *flow.ExecutionResult, *flow.Seal) {
   238  	root := unittest.GenesisFixture()
   239  	consensusParticipants := participantData.Identities()
   240  
   241  	// add other roles to create a complete identity list
   242  	participants := unittest.CompleteIdentitySet(consensusParticipants...)
   243  	participants.Sort(order.Canonical)
   244  
   245  	dkgParticipantsKeys := make([]crypto.PublicKey, 0, len(consensusParticipants))
   246  	for _, participant := range participants.Filter(filter.HasRole(flow.RoleConsensus)) {
   247  		dkgParticipantsKeys = append(dkgParticipantsKeys, participantData.Lookup[participant.NodeID].KeyShare)
   248  	}
   249  
   250  	counter := uint64(1)
   251  	setup := unittest.EpochSetupFixture(
   252  		unittest.WithParticipants(participants),
   253  		unittest.SetupWithCounter(counter),
   254  		unittest.WithFirstView(root.Header.View),
   255  		unittest.WithFinalView(root.Header.View+1000),
   256  	)
   257  	commit := unittest.EpochCommitFixture(
   258  		unittest.CommitWithCounter(counter),
   259  		unittest.WithClusterQCsFromAssignments(setup.Assignments),
   260  		func(commit *flow.EpochCommit) {
   261  			commit.DKGGroupKey = participantData.GroupKey
   262  			commit.DKGParticipantKeys = dkgParticipantsKeys
   263  		},
   264  	)
   265  
   266  	result := unittest.BootstrapExecutionResultFixture(root, unittest.GenesisStateCommitment)
   267  	result.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent(), commit.ServiceEvent()}
   268  
   269  	seal := unittest.Seal.Fixture(unittest.Seal.WithResult(result))
   270  
   271  	return root, result, seal
   272  }
   273  
   274  func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo {
   275  	consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical)
   276  	infos := make([]bootstrap.NodeInfo, 0, n)
   277  	for _, node := range consensus {
   278  		networkPrivKey := unittest.NetworkingPrivKeyFixture()
   279  		stakingPrivKey := unittest.StakingPrivKeyFixture()
   280  		nodeInfo := bootstrap.NewPrivateNodeInfo(
   281  			node.NodeID,
   282  			node.Role,
   283  			node.Address,
   284  			node.Weight,
   285  			networkPrivKey,
   286  			stakingPrivKey,
   287  		)
   288  		infos = append(infos, nodeInfo)
   289  	}
   290  	return infos
   291  }
   292  
   293  func createConsensusIdentities(t *testing.T, n int) *run.ParticipantData {
   294  	// create n consensus node participants
   295  	consensus := createPrivateNodeIdentities(n)
   296  	return completeConsensusIdentities(t, consensus)
   297  }
   298  
   299  // completeConsensusIdentities runs KG process and fills nodeInfos with missing random beacon keys
   300  func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) *run.ParticipantData {
   301  	dkgData, err := bootstrapDKG.RunFastKG(len(nodeInfos), unittest.RandomBytes(48))
   302  	require.NoError(t, err)
   303  
   304  	participantData := &run.ParticipantData{
   305  		Participants: make([]run.Participant, 0, len(nodeInfos)),
   306  		Lookup:       make(map[flow.Identifier]flow.DKGParticipant),
   307  		GroupKey:     dkgData.PubGroupKey,
   308  	}
   309  	for index, node := range nodeInfos {
   310  		participant := run.Participant{
   311  			NodeInfo:            node,
   312  			RandomBeaconPrivKey: dkgData.PrivKeyShares[index],
   313  		}
   314  		participantData.Participants = append(participantData.Participants, participant)
   315  		participantData.Lookup[node.NodeID] = flow.DKGParticipant{
   316  			Index:    uint(index),
   317  			KeyShare: dkgData.PubKeyShares[index],
   318  		}
   319  	}
   320  
   321  	return participantData
   322  }
   323  
   324  // createRootSnapshot creates root block, generates root QC and builds a root snapshot for
   325  // bootstrapping a node
   326  func createRootSnapshot(t *testing.T, participantData *run.ParticipantData) *inmem.Snapshot {
   327  	root, result, seal := createRootBlockData(participantData)
   328  	rootQC := createRootQC(t, root, participantData)
   329  
   330  	rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, rootQC)
   331  	require.NoError(t, err)
   332  	return rootSnapshot
   333  }
   334  
   335  func createNode(
   336  	t *testing.T,
   337  	participant *ConsensusParticipant,
   338  	index int,
   339  	identity *flow.Identity,
   340  	rootSnapshot protocol.Snapshot,
   341  	hub *Hub,
   342  	stopper *Stopper,
   343  	epochLookup module.EpochLookup,
   344  ) *Node {
   345  
   346  	db, dbDir := unittest.TempBadgerDB(t)
   347  	metricsCollector := metrics.NewNoopCollector()
   348  	tracer := trace.NewNoopTracer()
   349  
   350  	headersDB := storage.NewHeaders(metricsCollector, db)
   351  	guaranteesDB := storage.NewGuarantees(metricsCollector, db, storage.DefaultCacheSize)
   352  	sealsDB := storage.NewSeals(metricsCollector, db)
   353  	indexDB := storage.NewIndex(metricsCollector, db)
   354  	resultsDB := storage.NewExecutionResults(metricsCollector, db)
   355  	receiptsDB := storage.NewExecutionReceipts(metricsCollector, db, resultsDB, storage.DefaultCacheSize)
   356  	payloadsDB := storage.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB)
   357  	blocksDB := storage.NewBlocks(db, headersDB, payloadsDB)
   358  	setupsDB := storage.NewEpochSetups(metricsCollector, db)
   359  	commitsDB := storage.NewEpochCommits(metricsCollector, db)
   360  	statusesDB := storage.NewEpochStatuses(metricsCollector, db)
   361  	consumer := events.NewDistributor()
   362  
   363  	state, err := bprotocol.Bootstrap(metricsCollector, db, headersDB, sealsDB, resultsDB, blocksDB, setupsDB, commitsDB, statusesDB, rootSnapshot)
   364  	require.NoError(t, err)
   365  
   366  	blockTimer, err := blocktimer.NewBlockTimer(1*time.Millisecond, 90*time.Second)
   367  	require.NoError(t, err)
   368  
   369  	fullState, err := bprotocol.NewFullConsensusState(state, indexDB, payloadsDB, tracer, consumer,
   370  		blockTimer, util.MockReceiptValidator(), util.MockSealValidator(sealsDB))
   371  	require.NoError(t, err)
   372  
   373  	localID := identity.ID()
   374  
   375  	node := &Node{
   376  		db:    db,
   377  		dbDir: dbDir,
   378  		index: index,
   379  		id:    identity,
   380  	}
   381  
   382  	// log with node index an ID
   383  	log := unittest.Logger().With().
   384  		Int("index", index).
   385  		Hex("node_id", localID[:]).
   386  		Logger()
   387  
   388  	stopConsumer := stopper.AddNode(node)
   389  
   390  	counterConsumer := &CounterConsumer{
   391  		finalized: func(total uint) {
   392  			stopper.onFinalizedTotal(node.id.ID(), total)
   393  		},
   394  	}
   395  
   396  	// log with node index
   397  	logConsumer := notifications.NewLogConsumer(log)
   398  	notifier := pubsub.NewDistributor()
   399  	notifier.AddConsumer(stopConsumer)
   400  	notifier.AddConsumer(counterConsumer)
   401  	notifier.AddConsumer(logConsumer)
   402  
   403  	cleaner := &storagemock.Cleaner{}
   404  	cleaner.On("RunGC")
   405  
   406  	require.Equal(t, participant.nodeInfo.NodeID, localID)
   407  	privateKeys, err := participant.nodeInfo.PrivateKeys()
   408  	require.NoError(t, err)
   409  
   410  	// make local
   411  	me, err := local.New(identity, privateKeys.StakingKey)
   412  	require.NoError(t, err)
   413  
   414  	// add a network for this node to the hub
   415  	net := hub.AddNetwork(localID, node)
   416  
   417  	guaranteeLimit, sealLimit := uint(1000), uint(1000)
   418  	guarantees, err := stdmap.NewGuarantees(guaranteeLimit)
   419  	require.NoError(t, err)
   420  
   421  	receipts := consensusMempools.NewExecutionTree()
   422  
   423  	seals := stdmap.NewIncorporatedResultSeals(sealLimit)
   424  
   425  	// initialize the block builder
   426  	build, err := builder.NewBuilder(metricsCollector, db, fullState, headersDB, sealsDB, indexDB, blocksDB, resultsDB, receiptsDB,
   427  		guarantees, consensusMempools.NewIncorporatedResultSeals(seals, receiptsDB), receipts, tracer)
   428  	require.NoError(t, err)
   429  
   430  	// initialize the pending blocks cache
   431  	cache := buffer.NewPendingBlocks()
   432  
   433  	rootHeader, err := rootSnapshot.Head()
   434  	require.NoError(t, err)
   435  
   436  	rootQC, err := rootSnapshot.QuorumCertificate()
   437  	require.NoError(t, err)
   438  
   439  	// selector := filter.HasRole(flow.RoleConsensus)
   440  	committee, err := committees.NewConsensusCommittee(state, localID)
   441  	require.NoError(t, err)
   442  
   443  	// initialize the block finalizer
   444  	final := finalizer.NewFinalizer(db, headersDB, fullState, trace.NewNoopTracer())
   445  
   446  	prov := &mocknetwork.Engine{}
   447  	prov.On("SubmitLocal", mock.Anything).Return(nil)
   448  
   449  	syncCore, err := synccore.New(log, synccore.DefaultConfig(), metricsCollector)
   450  	require.NoError(t, err)
   451  
   452  	qcDistributor := pubsub.NewQCCreatedDistributor()
   453  
   454  	forks, err := consensus.NewForks(rootHeader, headersDB, final, notifier, rootHeader, rootQC)
   455  	require.NoError(t, err)
   456  
   457  	validator := consensus.NewValidator(metricsCollector, committee, forks)
   458  	require.NoError(t, err)
   459  
   460  	keys := &storagemock.SafeBeaconKeys{}
   461  	// there is DKG key for this epoch
   462  	keys.On("RetrieveMyBeaconPrivateKey", mock.Anything).Return(
   463  		func(epochCounter uint64) crypto.PrivateKey {
   464  			dkgInfo, ok := participant.beaconInfoByEpoch[epochCounter]
   465  			if !ok {
   466  				return nil
   467  			}
   468  			return dkgInfo.RandomBeaconPrivKey
   469  		},
   470  		func(epochCounter uint64) bool {
   471  			_, ok := participant.beaconInfoByEpoch[epochCounter]
   472  			return ok
   473  		},
   474  		nil)
   475  
   476  	// use epoch aware store for testing scenarios where epoch changes
   477  	beaconKeyStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys)
   478  
   479  	signer := verification.NewCombinedSigner(me, beaconKeyStore)
   480  
   481  	persist := persister.New(db, rootHeader.ChainID)
   482  
   483  	started, err := persist.GetStarted()
   484  	require.NoError(t, err)
   485  
   486  	voteProcessorFactory := votecollector.NewCombinedVoteProcessorFactory(committee, qcDistributor.OnQcConstructedFromVotes)
   487  
   488  	createCollectorFactoryMethod := votecollector.NewStateMachineFactory(log, notifier, voteProcessorFactory.Create)
   489  	voteCollectors := voteaggregator.NewVoteCollectors(log, started, workerpool.New(2), createCollectorFactoryMethod)
   490  
   491  	aggregator, err := voteaggregator.NewVoteAggregator(log, notifier, started, voteCollectors)
   492  	require.NoError(t, err)
   493  
   494  	hotstuffModules := &consensus.HotstuffModules{
   495  		Forks:                forks,
   496  		Validator:            validator,
   497  		Notifier:             notifier,
   498  		Committee:            committee,
   499  		Signer:               signer,
   500  		Persist:              persist,
   501  		QCCreatedDistributor: qcDistributor,
   502  		Aggregator:           aggregator,
   503  	}
   504  
   505  	// initialize the compliance engine
   506  	compCore, err := compliance.NewCore(
   507  		log,
   508  		metricsCollector,
   509  		tracer,
   510  		metricsCollector,
   511  		metricsCollector,
   512  		cleaner,
   513  		headersDB,
   514  		payloadsDB,
   515  		fullState,
   516  		cache,
   517  		syncCore,
   518  		aggregator,
   519  	)
   520  	require.NoError(t, err)
   521  
   522  	comp, err := compliance.NewEngine(log, net, me, prov, compCore)
   523  	require.NoError(t, err)
   524  
   525  	finalizedHeader, err := synceng.NewFinalizedHeaderCache(log, state, pubsub.NewFinalizationDistributor())
   526  	require.NoError(t, err)
   527  
   528  	identities, err := state.Final().Identities(filter.And(
   529  		filter.HasRole(flow.RoleConsensus),
   530  		filter.Not(filter.HasNodeID(me.NodeID())),
   531  	))
   532  	require.NoError(t, err)
   533  	idProvider := id.NewFixedIdentifierProvider(identities.NodeIDs())
   534  
   535  	// initialize the synchronization engine
   536  	sync, err := synceng.New(
   537  		log,
   538  		metricsCollector,
   539  		net,
   540  		me,
   541  		blocksDB,
   542  		comp,
   543  		syncCore,
   544  		finalizedHeader,
   545  		idProvider,
   546  	)
   547  	require.NoError(t, err)
   548  
   549  	// initialize the block finalizer
   550  	hot, err := consensus.NewParticipant(
   551  		log,
   552  		metricsCollector,
   553  		build,
   554  		comp,
   555  		rootHeader,
   556  		[]*flow.Header{},
   557  		hotstuffModules,
   558  		consensus.WithInitialTimeout(hotstuffTimeout),
   559  		consensus.WithMinTimeout(hotstuffTimeout),
   560  	)
   561  
   562  	require.NoError(t, err)
   563  
   564  	comp = comp.WithConsensus(hot)
   565  
   566  	node.compliance = comp
   567  	node.sync = sync
   568  	node.state = fullState
   569  	node.hot = hot
   570  	node.aggregator = hotstuffModules.Aggregator
   571  	node.headers = headersDB
   572  	node.net = net
   573  	node.log = log
   574  
   575  	return node
   576  }
   577  
   578  func cleanupNodes(nodes []*Node) {
   579  	for _, n := range nodes {
   580  		_ = n.db.Close()
   581  		_ = os.RemoveAll(n.dbDir)
   582  	}
   583  }