github.com/onflow/flow-go@v0.33.17/engine/common/follower/integration_test.go (about) 1 package follower 2 3 import ( 4 "context" 5 "sync" 6 "testing" 7 "time" 8 9 "github.com/dgraph-io/badger/v2" 10 "github.com/stretchr/testify/mock" 11 "github.com/stretchr/testify/require" 12 "go.uber.org/atomic" 13 14 "github.com/onflow/flow-go/consensus" 15 "github.com/onflow/flow-go/consensus/hotstuff" 16 "github.com/onflow/flow-go/consensus/hotstuff/mocks" 17 "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" 18 "github.com/onflow/flow-go/model/flow" 19 "github.com/onflow/flow-go/model/messages" 20 "github.com/onflow/flow-go/module/compliance" 21 moduleconsensus "github.com/onflow/flow-go/module/finalizer/consensus" 22 "github.com/onflow/flow-go/module/irrecoverable" 23 "github.com/onflow/flow-go/module/metrics" 24 module "github.com/onflow/flow-go/module/mock" 25 "github.com/onflow/flow-go/module/trace" 26 moduleutil "github.com/onflow/flow-go/module/util" 27 "github.com/onflow/flow-go/network/mocknetwork" 28 pbadger "github.com/onflow/flow-go/state/protocol/badger" 29 "github.com/onflow/flow-go/state/protocol/events" 30 "github.com/onflow/flow-go/state/protocol/util" 31 "github.com/onflow/flow-go/storage/badger/operation" 32 storageutil "github.com/onflow/flow-go/storage/util" 33 "github.com/onflow/flow-go/utils/unittest" 34 ) 35 36 // TestFollowerHappyPath tests ComplianceEngine integrated with real modules, mocked modules are used only for functionality which is static 37 // or implemented by our test case. Tests that syncing batches of blocks from other participants results in extending protocol state. 38 // After processing all available blocks we check if chain has correct height and finalized block. 39 // We use the following setup: 40 // Number of workers - workers 41 // Number of batches submitted by worker - batchesPerWorker 42 // Number of blocks in each batch submitted by worker - blocksPerBatch 43 // Each worker submits batchesPerWorker*blocksPerBatch blocks 44 // In total we will submit workers*batchesPerWorker*blocksPerBatch 45 func TestFollowerHappyPath(t *testing.T) { 46 allIdentities := unittest.CompleteIdentitySet() 47 rootSnapshot := unittest.RootSnapshotFixture(allIdentities) 48 unittest.RunWithBadgerDB(t, func(db *badger.DB) { 49 metrics := metrics.NewNoopCollector() 50 tracer := trace.NewNoopTracer() 51 log := unittest.Logger() 52 consumer := events.NewNoop() 53 all := storageutil.StorageLayer(t, db) 54 55 // bootstrap root snapshot 56 state, err := pbadger.Bootstrap( 57 metrics, 58 db, 59 all.Headers, 60 all.Seals, 61 all.Results, 62 all.Blocks, 63 all.QuorumCertificates, 64 all.Setups, 65 all.EpochCommits, 66 all.Statuses, 67 all.VersionBeacons, 68 rootSnapshot, 69 ) 70 require.NoError(t, err) 71 mockTimer := util.MockBlockTimer() 72 73 // create follower state 74 followerState, err := pbadger.NewFollowerState( 75 log, 76 tracer, 77 consumer, 78 state, 79 all.Index, 80 all.Payloads, 81 mockTimer, 82 ) 83 require.NoError(t, err) 84 finalizer := moduleconsensus.NewFinalizer(db, all.Headers, followerState, tracer) 85 rootHeader, err := rootSnapshot.Head() 86 require.NoError(t, err) 87 rootQC, err := rootSnapshot.QuorumCertificate() 88 require.NoError(t, err) 89 90 // Hack EECC. 91 // Since root snapshot is created with 1000 views for first epoch, we will forcefully enter EECC to avoid errors 92 // related to epoch transitions. 93 db.NewTransaction(true) 94 err = db.Update(func(txn *badger.Txn) error { 95 return operation.SetEpochEmergencyFallbackTriggered(rootHeader.ID())(txn) 96 }) 97 require.NoError(t, err) 98 99 consensusConsumer := pubsub.NewFollowerDistributor() 100 // use real consensus modules 101 forks, err := consensus.NewForks(rootHeader, all.Headers, finalizer, consensusConsumer, rootHeader, rootQC) 102 require.NoError(t, err) 103 104 // assume all proposals are valid 105 validator := mocks.NewValidator(t) 106 validator.On("ValidateProposal", mock.Anything).Return(nil) 107 108 // initialize the follower loop 109 followerLoop, err := hotstuff.NewFollowerLoop(unittest.Logger(), metrics, forks) 110 require.NoError(t, err) 111 112 syncCore := module.NewBlockRequester(t) 113 followerCore, err := NewComplianceCore( 114 unittest.Logger(), 115 metrics, 116 metrics, 117 consensusConsumer, 118 followerState, 119 followerLoop, 120 validator, 121 syncCore, 122 tracer, 123 ) 124 require.NoError(t, err) 125 126 me := module.NewLocal(t) 127 nodeID := unittest.IdentifierFixture() 128 me.On("NodeID").Return(nodeID).Maybe() 129 130 net := mocknetwork.NewNetwork(t) 131 con := mocknetwork.NewConduit(t) 132 net.On("Register", mock.Anything, mock.Anything).Return(con, nil) 133 134 // use real engine 135 engine, err := NewComplianceLayer( 136 unittest.Logger(), 137 net, 138 me, 139 metrics, 140 all.Headers, 141 rootHeader, 142 followerCore, 143 compliance.DefaultConfig(), 144 ) 145 require.NoError(t, err) 146 // don't forget to subscribe for finalization notifications 147 consensusConsumer.AddOnBlockFinalizedConsumer(engine.OnFinalizedBlock) 148 149 // start hotstuff logic and follower engine 150 ctx, cancel, errs := irrecoverable.WithSignallerAndCancel(context.Background()) 151 followerLoop.Start(ctx) 152 engine.Start(ctx) 153 unittest.RequireCloseBefore(t, moduleutil.AllReady(engine, followerLoop), time.Second, "engine failed to start") 154 155 // prepare chain of blocks, we will use a continuous chain assuming it was generated on happy path. 156 workers := 5 157 batchesPerWorker := 10 158 blocksPerBatch := 100 159 blocksPerWorker := blocksPerBatch * batchesPerWorker 160 flowBlocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, rootHeader) 161 require.Greaterf(t, len(flowBlocks), defaultPendingBlocksCacheCapacity, "this test assumes that we operate with more blocks than cache's upper limit") 162 163 // ensure sequential block views - that way we can easily know which block will be finalized after the test 164 for i, block := range flowBlocks { 165 block.Header.View = block.Header.Height 166 if i > 0 { 167 block.Header.ParentView = flowBlocks[i-1].Header.View 168 block.Header.ParentID = flowBlocks[i-1].Header.ID() 169 } 170 } 171 pendingBlocks := flowBlocksToBlockProposals(flowBlocks...) 172 173 // Regarding the block that we expect to be finalized based on 2-chain finalization rule, we consider the last few blocks in `pendingBlocks` 174 // ... <-- X <-- Y <-- Z 175 // ╰─────────╯ 176 // 2-chain on top of X 177 // Hence, we expect X to be finalized, which has the index `len(pendingBlocks)-3` 178 // Note: the HotStuff Follower does not see block Z (as there is no QC for X proving its validity). Instead, it sees the certified block 179 // [◄(X) Y] ◄(Y) 180 // where ◄(B) denotes a QC for block B 181 targetBlockHeight := pendingBlocks[len(pendingBlocks)-3].Block.Header.Height 182 183 // emulate syncing logic, where we push same blocks over and over. 184 originID := unittest.IdentifierFixture() 185 submittingBlocks := atomic.NewBool(true) 186 var wg sync.WaitGroup 187 wg.Add(workers) 188 for i := 0; i < workers; i++ { 189 go func(blocks []*messages.BlockProposal) { 190 defer wg.Done() 191 for submittingBlocks.Load() { 192 for batch := 0; batch < batchesPerWorker; batch++ { 193 engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ 194 OriginID: originID, 195 Message: blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch], 196 }) 197 } 198 } 199 }(pendingBlocks[i*blocksPerWorker : (i+1)*blocksPerWorker]) 200 } 201 202 // wait for target block to become finalized, this might take a while. 203 require.Eventually(t, func() bool { 204 final, err := followerState.Final().Head() 205 require.NoError(t, err) 206 return final.Height == targetBlockHeight 207 }, time.Minute, time.Second, "expect to process all blocks before timeout") 208 209 // shutdown and cleanup test 210 submittingBlocks.Store(false) 211 unittest.RequireReturnsBefore(t, wg.Wait, time.Second, "expect workers to stop producing") 212 cancel() 213 unittest.RequireCloseBefore(t, moduleutil.AllDone(engine, followerLoop), time.Second, "engine failed to stop") 214 select { 215 case err := <-errs: 216 require.NoError(t, err) 217 default: 218 } 219 }) 220 }