github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/common/follower/integration_test.go (about) 1 package follower 2 3 import ( 4 "context" 5 "sync" 6 "testing" 7 "time" 8 9 "github.com/dgraph-io/badger/v2" 10 "github.com/stretchr/testify/mock" 11 "github.com/stretchr/testify/require" 12 "go.uber.org/atomic" 13 14 "github.com/onflow/flow-go/consensus" 15 "github.com/onflow/flow-go/consensus/hotstuff" 16 "github.com/onflow/flow-go/consensus/hotstuff/mocks" 17 "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" 18 "github.com/onflow/flow-go/model/flow" 19 "github.com/onflow/flow-go/model/messages" 20 "github.com/onflow/flow-go/module/compliance" 21 moduleconsensus "github.com/onflow/flow-go/module/finalizer/consensus" 22 "github.com/onflow/flow-go/module/irrecoverable" 23 "github.com/onflow/flow-go/module/metrics" 24 module "github.com/onflow/flow-go/module/mock" 25 "github.com/onflow/flow-go/module/trace" 26 moduleutil "github.com/onflow/flow-go/module/util" 27 "github.com/onflow/flow-go/network/mocknetwork" 28 pbadger "github.com/onflow/flow-go/state/protocol/badger" 29 "github.com/onflow/flow-go/state/protocol/events" 30 "github.com/onflow/flow-go/state/protocol/util" 31 "github.com/onflow/flow-go/storage/badger/operation" 32 storageutil "github.com/onflow/flow-go/storage/util" 33 "github.com/onflow/flow-go/utils/unittest" 34 ) 35 36 // TestFollowerHappyPath tests ComplianceEngine integrated with real modules, mocked modules are used only for functionality which is static 37 // or implemented by our test case. Tests that syncing batches of blocks from other participants results in extending protocol state. 38 // After processing all available blocks we check if chain has correct height and finalized block. 39 // We use the following setup: 40 // Number of workers - workers 41 // Number of batches submitted by worker - batchesPerWorker 42 // Number of blocks in each batch submitted by worker - blocksPerBatch 43 // Each worker submits batchesPerWorker*blocksPerBatch blocks 44 // In total we will submit workers*batchesPerWorker*blocksPerBatch 45 func TestFollowerHappyPath(t *testing.T) { 46 allIdentities := unittest.CompleteIdentitySet() 47 rootSnapshot := unittest.RootSnapshotFixture(allIdentities) 48 unittest.RunWithBadgerDB(t, func(db *badger.DB) { 49 metrics := metrics.NewNoopCollector() 50 tracer := trace.NewNoopTracer() 51 log := unittest.Logger() 52 consumer := events.NewNoop() 53 all := storageutil.StorageLayer(t, db) 54 55 // bootstrap root snapshot 56 state, err := pbadger.Bootstrap( 57 metrics, 58 db, 59 all.Headers, 60 all.Seals, 61 all.Results, 62 all.Blocks, 63 all.QuorumCertificates, 64 all.Setups, 65 all.EpochCommits, 66 all.EpochProtocolState, 67 all.ProtocolKVStore, 68 all.VersionBeacons, 69 rootSnapshot, 70 ) 71 require.NoError(t, err) 72 mockTimer := util.MockBlockTimer() 73 74 // create follower state 75 followerState, err := pbadger.NewFollowerState( 76 log, 77 tracer, 78 consumer, 79 state, 80 all.Index, 81 all.Payloads, 82 mockTimer, 83 ) 84 require.NoError(t, err) 85 finalizer := moduleconsensus.NewFinalizer(db, all.Headers, followerState, tracer) 86 rootHeader, err := rootSnapshot.Head() 87 require.NoError(t, err) 88 rootQC, err := rootSnapshot.QuorumCertificate() 89 require.NoError(t, err) 90 rootProtocolState, err := rootSnapshot.ProtocolState() 91 require.NoError(t, err) 92 rootProtocolStateID := rootProtocolState.ID() 93 94 // Hack EFM. 95 // Since root snapshot is created with 1000 views for first epoch, we will forcefully enter EFM to avoid errors 96 // related to epoch transitions. 97 db.NewTransaction(true) 98 err = db.Update(func(txn *badger.Txn) error { 99 return operation.SetEpochEmergencyFallbackTriggered(rootHeader.ID())(txn) 100 }) 101 require.NoError(t, err) 102 103 consensusConsumer := pubsub.NewFollowerDistributor() 104 // use real consensus modules 105 forks, err := consensus.NewForks(rootHeader, all.Headers, finalizer, consensusConsumer, rootHeader, rootQC) 106 require.NoError(t, err) 107 108 // assume all proposals are valid 109 validator := mocks.NewValidator(t) 110 validator.On("ValidateProposal", mock.Anything).Return(nil) 111 112 // initialize the follower loop 113 followerLoop, err := hotstuff.NewFollowerLoop(unittest.Logger(), metrics, forks) 114 require.NoError(t, err) 115 116 syncCore := module.NewBlockRequester(t) 117 followerCore, err := NewComplianceCore( 118 unittest.Logger(), 119 metrics, 120 metrics, 121 consensusConsumer, 122 followerState, 123 followerLoop, 124 validator, 125 syncCore, 126 tracer, 127 ) 128 require.NoError(t, err) 129 130 me := module.NewLocal(t) 131 nodeID := unittest.IdentifierFixture() 132 me.On("NodeID").Return(nodeID).Maybe() 133 134 net := mocknetwork.NewNetwork(t) 135 con := mocknetwork.NewConduit(t) 136 net.On("Register", mock.Anything, mock.Anything).Return(con, nil) 137 138 // use real engine 139 engine, err := NewComplianceLayer( 140 unittest.Logger(), 141 net, 142 me, 143 metrics, 144 all.Headers, 145 rootHeader, 146 followerCore, 147 compliance.DefaultConfig(), 148 ) 149 require.NoError(t, err) 150 // don't forget to subscribe for finalization notifications 151 consensusConsumer.AddOnBlockFinalizedConsumer(engine.OnFinalizedBlock) 152 153 // start hotstuff logic and follower engine 154 ctx, cancel, errs := irrecoverable.WithSignallerAndCancel(context.Background()) 155 followerLoop.Start(ctx) 156 engine.Start(ctx) 157 unittest.RequireCloseBefore(t, moduleutil.AllReady(engine, followerLoop), time.Second, "engine failed to start") 158 159 // prepare chain of blocks, we will use a continuous chain assuming it was generated on happy path. 160 workers := 5 161 batchesPerWorker := 10 162 blocksPerBatch := 100 163 blocksPerWorker := blocksPerBatch * batchesPerWorker 164 flowBlocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, rootHeader) 165 require.Greaterf(t, len(flowBlocks), defaultPendingBlocksCacheCapacity, "this test assumes that we operate with more blocks than cache's upper limit") 166 167 // ensure sequential block views - that way we can easily know which block will be finalized after the test 168 for i, block := range flowBlocks { 169 block.Header.View = block.Header.Height 170 block.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) 171 if i > 0 { 172 block.Header.ParentView = flowBlocks[i-1].Header.View 173 block.Header.ParentID = flowBlocks[i-1].Header.ID() 174 } 175 } 176 pendingBlocks := flowBlocksToBlockProposals(flowBlocks...) 177 178 // Regarding the block that we expect to be finalized based on 2-chain finalization rule, we consider the last few blocks in `pendingBlocks` 179 // ... <-- X <-- Y <-- Z 180 // ╰─────────╯ 181 // 2-chain on top of X 182 // Hence, we expect X to be finalized, which has the index `len(pendingBlocks)-3` 183 // Note: the HotStuff Follower does not see block Z (as there is no QC for X proving its validity). Instead, it sees the certified block 184 // [◄(X) Y] ◄(Y) 185 // where ◄(B) denotes a QC for block B 186 targetBlockHeight := pendingBlocks[len(pendingBlocks)-3].Block.Header.Height 187 188 // emulate syncing logic, where we push same blocks over and over. 189 originID := unittest.IdentifierFixture() 190 submittingBlocks := atomic.NewBool(true) 191 var wg sync.WaitGroup 192 wg.Add(workers) 193 for i := 0; i < workers; i++ { 194 go func(blocks []*messages.BlockProposal) { 195 defer wg.Done() 196 for submittingBlocks.Load() { 197 for batch := 0; batch < batchesPerWorker; batch++ { 198 engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ 199 OriginID: originID, 200 Message: blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch], 201 }) 202 } 203 } 204 }(pendingBlocks[i*blocksPerWorker : (i+1)*blocksPerWorker]) 205 } 206 207 // wait for target block to become finalized, this might take a while. 208 require.Eventually(t, func() bool { 209 final, err := followerState.Final().Head() 210 require.NoError(t, err) 211 return final.Height == targetBlockHeight 212 }, time.Minute, time.Second, "expect to process all blocks before timeout") 213 214 // shutdown and cleanup test 215 submittingBlocks.Store(false) 216 unittest.RequireReturnsBefore(t, wg.Wait, time.Second, "expect workers to stop producing") 217 cancel() 218 unittest.RequireCloseBefore(t, moduleutil.AllDone(engine, followerLoop), time.Second, "engine failed to stop") 219 select { 220 case err := <-errs: 221 require.NoError(t, err) 222 default: 223 } 224 }) 225 }