github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/bft/consensus/replay_test.go (about) 1 package consensus 2 3 import ( 4 "bytes" 5 "context" 6 "errors" 7 "fmt" 8 "io" 9 "log/slog" 10 "os" 11 "path/filepath" 12 "runtime" 13 "sort" 14 "testing" 15 "time" 16 17 "github.com/stretchr/testify/assert" 18 "github.com/stretchr/testify/require" 19 20 "github.com/gnolang/gno/tm2/pkg/amino" 21 "github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore" 22 abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" 23 "github.com/gnolang/gno/tm2/pkg/bft/appconn" 24 cfg "github.com/gnolang/gno/tm2/pkg/bft/config" 25 cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" 26 "github.com/gnolang/gno/tm2/pkg/bft/mempool/mock" 27 "github.com/gnolang/gno/tm2/pkg/bft/privval" 28 "github.com/gnolang/gno/tm2/pkg/bft/proxy" 29 sm "github.com/gnolang/gno/tm2/pkg/bft/state" 30 "github.com/gnolang/gno/tm2/pkg/bft/types" 31 walm "github.com/gnolang/gno/tm2/pkg/bft/wal" 32 dbm "github.com/gnolang/gno/tm2/pkg/db" 33 "github.com/gnolang/gno/tm2/pkg/db/memdb" 34 "github.com/gnolang/gno/tm2/pkg/events" 35 "github.com/gnolang/gno/tm2/pkg/log" 36 "github.com/gnolang/gno/tm2/pkg/random" 37 "github.com/gnolang/gno/tm2/pkg/testutils" 38 ) 39 40 func TestMain(m *testing.M) { 41 config, _ = ResetConfig("consensus_reactor_test") 42 consensusReplayConfig, _ = ResetConfig("consensus_replay_test") 43 configStateTest, _ := ResetConfig("consensus_state_test") 44 configMempoolTest, _ := ResetConfig("consensus_mempool_test") 45 configByzantineTest, _ := ResetConfig("consensus_byzantine_test") 46 code := m.Run() 47 os.RemoveAll(config.RootDir) 48 os.RemoveAll(consensusReplayConfig.RootDir) 49 os.RemoveAll(configStateTest.RootDir) 50 os.RemoveAll(configMempoolTest.RootDir) 51 os.RemoveAll(configByzantineTest.RootDir) 52 os.Exit(code) 53 } 54 55 // These tests ensure we can always recover from failure at any part of the consensus process. 56 // There are two general failure scenarios: failure during consensus, and failure while applying the block. 57 // Only the latter interacts with the app and store, 58 // but the former has to deal with restrictions on re-use of priv_validator keys. 59 // The `WAL Tests` are for failures during the consensus; 60 // the `Handshake Tests` are for failures in applying the block. 61 // With the help of the WAL, we can recover from it all! 62 63 // ------------------------------------------------------------------------------------------ 64 // WAL Tests 65 66 // TODO: It would be better to verify explicitly which states we can recover from without the wal 67 // and which ones we need the wal for - then we'd also be able to only flush the 68 // wal writer when we need to, instead of with every message. 69 70 func startNewConsensusStateAndWaitForBlock( 71 t *testing.T, 72 consensusReplayConfig *cfg.Config, 73 consensusReplayGenesisFile string, 74 lastBlockHeight int64, 75 blockDB dbm.DB, 76 stateDB dbm.DB, 77 ) { 78 t.Helper() 79 80 logger := log.NewTestingLogger(t) 81 state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayGenesisFile) 82 privValidator := loadPrivValidator(consensusReplayConfig) 83 cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) 84 cs.SetLogger(logger) 85 86 bytes, _ := os.ReadFile(cs.config.WalFile()) 87 t.Logf("====== WAL: \n\r%X\n", bytes) 88 89 // This is just a signal that we haven't halted; its not something contained 90 // in the WAL itself. Assuming the consensus state is running, replay of any 91 // WAL, including the empty one, should eventually be followed by a new 92 // block, or else something is wrong. 93 newBlockSub := subscribe(cs.evsw, types.EventNewBlock{}) 94 95 go func() { 96 err := cs.Start() 97 require.NoError(t, err) 98 }() 99 defer cs.Stop() 100 101 LOOP: 102 for { 103 select { 104 case event, ok := <-newBlockSub: 105 if !ok { 106 t.Fatal("newBlockSub was cancelled") 107 } 108 event_ := event.(types.EventNewBlock) 109 if lastBlockHeight <= event_.Block.Header.Height { 110 break LOOP 111 } 112 case <-time.After(60 * time.Second): // XXX why so long? 113 t.Fatal("Timed out waiting for new block (see trace above)") 114 } 115 } 116 } 117 118 func sendTxs(ctx context.Context, cs *ConsensusState) { 119 for i := 0; i < 256; i++ { 120 select { 121 case <-ctx.Done(): 122 return 123 default: 124 tx := []byte{byte(i)} 125 assertMempool(cs.txNotifier).CheckTx(tx, nil) 126 i++ 127 } 128 } 129 } 130 131 // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. 132 func TestWALCrash(t *testing.T) { 133 t.Parallel() 134 135 testCases := []struct { 136 name string 137 initFn func(dbm.DB, *ConsensusState, context.Context) 138 lastBlockHeight int64 139 }{ 140 { 141 "empty block", 142 func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {}, 143 1, 144 }, 145 { 146 "many non-empty blocks", 147 func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { 148 go sendTxs(ctx, cs) 149 }, 150 3, 151 }, 152 } 153 154 for i, tc := range testCases { 155 tc := tc 156 consensusReplayConfig, genesisFile := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) 157 t.Run(tc.name, func(t *testing.T) { 158 t.Parallel() 159 160 crashWALandCheckLiveness( 161 t, 162 consensusReplayConfig, 163 genesisFile, 164 tc.initFn, 165 tc.lastBlockHeight, 166 ) 167 }) 168 } 169 } 170 171 func crashWALandCheckLiveness( 172 t *testing.T, 173 consensusReplayConfig *cfg.Config, 174 genesisFile string, 175 initFn func(dbm.DB, *ConsensusState, context.Context), 176 lastBlockHeight int64, 177 ) { 178 t.Helper() 179 180 crashCh := make(chan error) 181 crashingWal := &crashingWAL{crashCh: crashCh, lastBlockHeight: lastBlockHeight} 182 183 i := 1 184 LOOP: 185 for { 186 t.Logf("====== LOOP %d\n", i) 187 188 // create consensus state from a clean slate 189 logger := log.NewTestingLogger(t) 190 blockDB := memdb.NewMemDB() 191 stateDB := blockDB 192 state, _ := sm.MakeGenesisStateFromFile(genesisFile) 193 privValidator := loadPrivValidator(consensusReplayConfig) 194 cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) 195 cs.SetLogger(logger) 196 197 // start sending transactions 198 ctx, cancel := context.WithCancel(context.Background()) 199 initFn(stateDB, cs, ctx) 200 201 // clean up WAL file from the previous iteration 202 walFile := cs.config.WalFile() 203 os.Remove(walFile) 204 205 // set crashing WAL 206 csWal, err := cs.OpenWAL(walFile) 207 require.NoError(t, err) 208 crashingWal.next = csWal 209 // reset the message counter 210 crashingWal.msgIndex = 1 211 cs.wal = crashingWal 212 213 // start consensus state 214 err = cs.Start() 215 require.NoError(t, err) 216 217 i++ 218 219 select { 220 case err := <-crashCh: 221 t.Logf("WAL crashed: %v", err) 222 223 // make sure we can make blocks after a crash 224 startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, genesisFile, cs.Height, blockDB, stateDB) 225 226 // stop consensus state and transactions sender (initFn) 227 cs.Stop() 228 cancel() 229 230 // if we reached the required height, exit 231 if _, ok := err.(ReachedLastBlockHeightError); ok { 232 break LOOP 233 } 234 case <-time.After(10 * time.Second): 235 t.Fatal("WAL did not panic for 10 seconds (check the log)") 236 } 237 } 238 } 239 240 // crashingWAL is a WAL which crashes or rather simulates a crash during Save 241 // (before and after). It remembers a message for which we last panicked 242 // (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations. 243 type crashingWAL struct { 244 next walm.WAL 245 crashCh chan error 246 lastBlockHeight int64 // inclusive 247 248 msgIndex int // current message index 249 lastPanickedForMsgIndex int // last message for which we panicked 250 } 251 252 var _ walm.WAL = &crashingWAL{} 253 254 // WALWriteError indicates a WAL crash. 255 type WALWriteError struct { 256 msg string 257 } 258 259 func (e WALWriteError) Error() string { 260 return e.msg 261 } 262 263 // ReachedLastBlockHeightError indicates we've reached the required consensus 264 // height and may exit. 265 type ReachedLastBlockHeightError struct { 266 height int64 267 } 268 269 func (e ReachedLastBlockHeightError) Error() string { 270 return fmt.Sprintf("reached height to stop %d", e.height) 271 } 272 273 func (w *crashingWAL) SetLogger(logger *slog.Logger) { 274 w.next.SetLogger(logger) 275 } 276 277 // Write simulate WAL's crashing by sending an error to the crashCh and then 278 // exiting the cs.receiveRoutine. 279 func (w *crashingWAL) Write(m walm.WALMessage) error { 280 if w.msgIndex > w.lastPanickedForMsgIndex { 281 w.lastPanickedForMsgIndex = w.msgIndex 282 _, file, line, _ := runtime.Caller(1) 283 w.crashCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} 284 runtime.Goexit() 285 return nil 286 } 287 288 w.msgIndex++ 289 return w.next.Write(m) 290 } 291 292 func (w *crashingWAL) WriteMetaSync(m walm.MetaMessage) error { 293 // we crash once we've reached w.lastBlockHeight+1, 294 // to test all the WAL lines produced during w.lastBlockHeight. 295 if m.Height != 0 && m.Height == w.lastBlockHeight+1 { 296 w.crashCh <- ReachedLastBlockHeightError{m.Height} 297 runtime.Goexit() 298 return nil 299 } 300 return w.next.WriteMetaSync(m) 301 } 302 303 func (w *crashingWAL) WriteSync(m walm.WALMessage) error { 304 return w.Write(m) 305 } 306 307 func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } 308 309 func (w *crashingWAL) SearchForHeight(height int64, options *walm.WALSearchOptions) (rd io.ReadCloser, found bool, err error) { 310 return w.next.SearchForHeight(height, options) 311 } 312 313 func (w *crashingWAL) Start() error { return w.next.Start() } 314 func (w *crashingWAL) Stop() error { return w.next.Stop() } 315 func (w *crashingWAL) Wait() { w.next.Wait() } 316 317 // ------------------------------------------------------------------------------------------ 318 type testSim struct { 319 GenesisState sm.State 320 Config *cfg.Config 321 Chain []*types.Block 322 Commits []*types.Commit 323 CleanupFunc cleanupFunc 324 } 325 326 const ( 327 numBlocks = 6 328 ) 329 330 var mempool = mock.Mempool{} 331 332 // --------------------------------------- 333 // Test handshake/replay 334 335 // 0 - all synced up 336 // 1 - saved block but app and state are behind 337 // 2 - save block and committed but state is behind 338 var modes = []uint{0, 1, 2} 339 340 // Caller should call `defer sim.CleanupFunc()` 341 func makeTestSim(t *testing.T, name string) (sim testSim) { 342 t.Helper() 343 344 nPeers := 7 345 nVals := 4 346 css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test_"+name, newMockTickerFunc(true), newPersistentKVStoreWithPath) 347 sim.Config = config 348 sim.GenesisState, _ = sm.MakeGenesisState(genDoc) 349 sim.CleanupFunc = cleanup 350 351 partSize := types.BlockPartSizeBytes 352 353 newRoundCh := subscribe(css[0].evsw, cstypes.EventNewRound{}) 354 proposalCh := subscribe(css[0].evsw, cstypes.EventCompleteProposal{}) 355 356 vss := make([]*validatorStub, nPeers) 357 for i := 0; i < nPeers; i++ { 358 vss[i] = NewValidatorStub(css[i].privValidator, i) 359 } 360 height, round := css[0].Height, css[0].Round 361 // start the machine 362 startFrom(css[0], height, round) 363 incrementHeight(vss...) 364 ensureNewRound(newRoundCh, height, 0) 365 ensureNewProposal(proposalCh, height, round) 366 rs := css[0].GetRoundState() 367 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 368 ensureNewRound(newRoundCh, height+1, 0) 369 370 // height 2 371 height++ 372 incrementHeight(vss...) 373 newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() 374 newValidatorTx1 := kvstore.MakeValSetChangeTx(newValidatorPubKey1, testMinPower) 375 err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil) 376 assert.Nil(t, err) 377 propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) 378 propBlockParts := propBlock.MakePartSet(partSize) 379 blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 380 proposal := types.NewProposal(vss[1].Height, round, -1, blockID) 381 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 382 t.Fatal("failed to sign bad proposal", err) 383 } 384 385 // set the proposal block 386 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 387 t.Fatal(err) 388 } 389 ensureNewProposal(proposalCh, height, round) 390 rs = css[0].GetRoundState() 391 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 392 ensureNewRound(newRoundCh, height+1, 0) 393 394 // height 3 395 height++ 396 incrementHeight(vss...) 397 updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() 398 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updateValidatorPubKey1, 25) 399 err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil) 400 assert.Nil(t, err) 401 propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) 402 propBlockParts = propBlock.MakePartSet(partSize) 403 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 404 proposal = types.NewProposal(vss[2].Height, round, -1, blockID) 405 if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { 406 t.Fatal("failed to sign bad proposal", err) 407 } 408 409 // set the proposal block 410 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 411 t.Fatal(err) 412 } 413 ensureNewProposal(proposalCh, height, round) 414 rs = css[0].GetRoundState() 415 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 416 ensureNewRound(newRoundCh, height+1, 0) 417 418 // height 4 419 height++ 420 incrementHeight(vss...) 421 newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() 422 newValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2, testMinPower) 423 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil) 424 assert.Nil(t, err) 425 newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() 426 newValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3, testMinPower) 427 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil) 428 assert.Nil(t, err) 429 propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) 430 propBlockParts = propBlock.MakePartSet(partSize) 431 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 432 newVss := make([]*validatorStub, nVals+1) 433 copy(newVss, vss[:nVals+1]) 434 sort.Sort(ValidatorStubsByAddress(newVss)) 435 selfIndex := 0 436 for i, vs := range newVss { 437 if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { 438 selfIndex = i 439 break 440 } 441 } 442 443 proposal = types.NewProposal(vss[3].Height, round, -1, blockID) 444 if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { 445 t.Fatal("failed to sign bad proposal", err) 446 } 447 448 // set the proposal block 449 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 450 t.Fatal(err) 451 } 452 ensureNewProposal(proposalCh, height, round) 453 454 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2, 0) 455 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil) 456 assert.Nil(t, err) 457 458 rs = css[0].GetRoundState() 459 for i := 0; i < nVals+1; i++ { 460 if i == selfIndex { 461 continue 462 } 463 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 464 } 465 466 ensureNewRound(newRoundCh, height+1, 0) 467 468 // height 5 469 height++ 470 incrementHeight(vss...) 471 ensureNewProposal(proposalCh, height, round) 472 rs = css[0].GetRoundState() 473 for i := 0; i < nVals+1; i++ { 474 if i == selfIndex { 475 continue 476 } 477 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 478 } 479 ensureNewRound(newRoundCh, height+1, 0) 480 481 // height 6 482 height++ 483 incrementHeight(vss...) 484 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3, 0) 485 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil) 486 assert.Nil(t, err) 487 propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) 488 propBlockParts = propBlock.MakePartSet(partSize) 489 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 490 newVss = make([]*validatorStub, nVals+3) 491 copy(newVss, vss[:nVals+3]) 492 sort.Sort(ValidatorStubsByAddress(newVss)) 493 for i, vs := range newVss { 494 if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { 495 selfIndex = i 496 break 497 } 498 } 499 proposal = types.NewProposal(vss[1].Height, round, -1, blockID) 500 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 501 t.Fatal("failed to sign bad proposal", err) 502 } 503 504 // set the proposal block 505 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 506 t.Fatal(err) 507 } 508 ensureNewProposal(proposalCh, height, round) 509 rs = css[0].GetRoundState() 510 for i := 0; i < nVals+3; i++ { 511 if i == selfIndex { 512 continue 513 } 514 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 515 } 516 ensureNewRound(newRoundCh, height+1, 0) 517 518 sim.Chain = make([]*types.Block, 0) 519 sim.Commits = make([]*types.Commit, 0) 520 for i := 1; i <= numBlocks; i++ { 521 sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) 522 sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) 523 } 524 525 return sim 526 } 527 528 // Sync from scratch 529 func TestHandshakeReplayAll(t *testing.T) { 530 t.Parallel() 531 532 for _, m := range modes { 533 testHandshakeReplay(t, config, 0, m, nil) 534 } 535 sim := makeTestSim(t, "all") 536 defer sim.CleanupFunc() 537 for _, m := range modes { 538 testHandshakeReplay(t, config, 0, m, &sim) 539 } 540 } 541 542 // Sync many, not from scratch 543 func TestHandshakeReplaySome(t *testing.T) { 544 t.Parallel() 545 546 for _, m := range modes { 547 testHandshakeReplay(t, config, 1, m, nil) 548 } 549 sim := makeTestSim(t, "some") 550 defer sim.CleanupFunc() 551 for _, m := range modes { 552 testHandshakeReplay(t, config, 1, m, &sim) 553 } 554 } 555 556 // Sync from lagging by one 557 func TestHandshakeReplayOne(t *testing.T) { 558 t.Parallel() 559 560 for _, m := range modes { 561 testHandshakeReplay(t, config, numBlocks-1, m, nil) 562 } 563 sim := makeTestSim(t, "one") 564 defer sim.CleanupFunc() 565 for _, m := range modes { 566 testHandshakeReplay(t, config, numBlocks-1, m, &sim) 567 } 568 } 569 570 // Sync from caught up 571 func TestFlappyHandshakeReplayNone(t *testing.T) { 572 t.Parallel() 573 574 testutils.FilterStability(t, testutils.Flappy) 575 576 for _, m := range modes { 577 testHandshakeReplay(t, config, numBlocks, m, nil) 578 } 579 sim := makeTestSim(t, "none") 580 defer sim.CleanupFunc() 581 for _, m := range modes { 582 testHandshakeReplay(t, config, numBlocks, m, &sim) 583 } 584 } 585 586 // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx 587 func TestMockProxyApp(t *testing.T) { 588 t.Parallel() 589 590 logger := log.NewTestingLogger(t) 591 validTxs, invalidTxs := 0, 0 592 txIndex := 0 593 594 assert.NotPanics(t, func() { 595 abciResWithEmptyDeliverTx := new(sm.ABCIResponses) 596 abciResWithEmptyDeliverTx.DeliverTxs = make([]abci.ResponseDeliverTx, 0) 597 abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, abci.ResponseDeliverTx{}) 598 599 // called when saveABCIResponses: 600 bytes := amino.MustMarshal(abciResWithEmptyDeliverTx) 601 loadedAbciRes := new(sm.ABCIResponses) 602 603 // this also happens sm.LoadABCIResponses 604 err := amino.Unmarshal(bytes, loadedAbciRes) 605 require.NoError(t, err) 606 607 mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) 608 609 abciRes := new(sm.ABCIResponses) 610 abciRes.DeliverTxs = make([]abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) 611 // Execute transactions and get hash. 612 proxyCb := func(req abci.Request, res abci.Response) { 613 if res, ok := res.(abci.ResponseDeliverTx); ok { 614 // TODO: make use of res.Log 615 // TODO: make use of this info 616 // Blocks may include invalid txs. 617 if res.Error == nil { 618 validTxs++ 619 } else { 620 logger.Debug("Invalid tx", "code", res.Error, "log", res.Log) 621 invalidTxs++ 622 } 623 abciRes.DeliverTxs[txIndex] = res 624 txIndex++ 625 } 626 } 627 mock.SetResponseCallback(proxyCb) 628 629 someTx := []byte("tx") 630 mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}) 631 }) 632 assert.True(t, validTxs == 1) 633 assert.True(t, invalidTxs == 0) 634 } 635 636 func tempWALWithData(data []byte) string { 637 walFile, err := os.CreateTemp("", "wal") 638 if err != nil { 639 panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) 640 } 641 _, err = walFile.Write(data) 642 if err != nil { 643 panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) 644 } 645 if err := walFile.Close(); err != nil { 646 panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) 647 } 648 return walFile.Name() 649 } 650 651 // Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks 652 func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, sim *testSim) { 653 t.Helper() 654 655 var ( 656 chain []*types.Block 657 commits []*types.Commit 658 store *mockBlockStore 659 stateDB dbm.DB 660 genesisState sm.State 661 662 genesisFile string 663 ) 664 665 if sim != nil { 666 testConfig, gf := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) 667 defer os.RemoveAll(testConfig.RootDir) 668 stateDB = memdb.NewMemDB() 669 defer stateDB.Close() 670 genesisState = sim.GenesisState 671 config = sim.Config 672 chain = sim.Chain 673 commits = sim.Commits 674 store = newMockBlockStore(config, genesisState.ConsensusParams) 675 genesisFile = gf 676 } else { // test single node 677 testConfig, gf := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) 678 defer os.RemoveAll(testConfig.RootDir) 679 walBody, err := WALWithNBlocks(t, numBlocks) 680 require.NoError(t, err) 681 walFile := tempWALWithData(walBody) 682 config.Consensus.SetWalFile(walFile) 683 684 wal, err := walm.NewWAL(walFile, maxMsgSize) 685 require.NoError(t, err) 686 wal.SetLogger(log.NewTestingLogger(t)) 687 err = wal.Start() 688 require.NoError(t, err) 689 defer wal.Stop() 690 691 chain, commits, err = makeBlockchainFromWAL(wal) 692 require.NoError(t, err) 693 stateDB, genesisState, store = makeStateAndStore(config, gf, kvstore.AppVersion) 694 defer stateDB.Close() 695 genesisFile = gf 696 } 697 store.chain = chain 698 store.commits = commits 699 700 state := genesisState.Copy() 701 // run the chain through state.ApplyBlock to build up the tendermint state 702 state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) 703 latestAppHash := state.AppHash 704 705 // make a new client creator 706 kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) 707 defer kvstoreApp.Close() 708 709 clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) 710 if nBlocks > 0 { 711 // run nBlocks against a new client to build up the app state. 712 // use a throwaway tendermint state 713 proxyApp := appconn.NewAppConns(clientCreator2) 714 stateDB1 := memdb.NewMemDB() 715 sm.SaveState(stateDB1, genesisState) 716 buildAppStateFromChain(proxyApp, stateDB1, genesisState, chain, nBlocks, mode) 717 } 718 719 // now start the app using the handshake - it should sync 720 evsw := events.NewEventSwitch() 721 genDoc, _ := sm.MakeGenesisDocFromFile(genesisFile) 722 handshaker := NewHandshaker(stateDB, state, store, genDoc) 723 handshaker.SetEventSwitch(evsw) 724 proxyApp := appconn.NewAppConns(clientCreator2) 725 if err := proxyApp.Start(); err != nil { 726 t.Fatalf("Error starting proxy app connections: %v", err) 727 } 728 defer proxyApp.Stop() 729 if err := handshaker.Handshake(proxyApp); err != nil { 730 t.Fatalf("Error on abci handshake: %v", err) 731 } 732 733 // get the latest app hash from the app 734 res, err := proxyApp.Query().InfoSync(abci.RequestInfo{}) 735 if err != nil { 736 t.Fatal(err) 737 } 738 739 // the app hash should be synced up 740 if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { 741 t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) 742 } 743 744 expectedBlocksToSync := numBlocks - nBlocks 745 if nBlocks == numBlocks && mode > 0 { 746 expectedBlocksToSync++ 747 } else if nBlocks > 0 && mode == 1 { 748 expectedBlocksToSync++ 749 } 750 751 if handshaker.NBlocks() != expectedBlocksToSync { 752 t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) 753 } 754 } 755 756 func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp appconn.AppConns) sm.State { 757 testPartSize := types.BlockPartSizeBytes 758 blockExec := sm.NewBlockExecutor(stateDB, log.NewNoopLogger(), proxyApp.Consensus(), mempool) 759 760 blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} 761 newState, err := blockExec.ApplyBlock(st, blkID, blk) 762 if err != nil { 763 panic(err) 764 } 765 return newState 766 } 767 768 func buildAppStateFromChain(proxyApp appconn.AppConns, stateDB dbm.DB, 769 state sm.State, chain []*types.Block, nBlocks int, mode uint, 770 ) { 771 // start a new app without handshake, play nBlocks blocks 772 if err := proxyApp.Start(); err != nil { 773 panic(err) 774 } 775 defer proxyApp.Stop() 776 777 state.AppVersion = kvstore.AppVersion // simulate handshake, receive app version 778 validators := state.Validators.ABCIValidatorUpdates() 779 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 780 Validators: validators, 781 }); err != nil { 782 panic(err) 783 } 784 sm.SaveState(stateDB, state) // save height 1's validatorsInfo 785 786 switch mode { 787 case 0: 788 for i := 0; i < nBlocks; i++ { 789 block := chain[i] 790 state = applyBlock(stateDB, state, block, proxyApp) 791 } 792 case 1, 2: 793 for i := 0; i < nBlocks-1; i++ { 794 block := chain[i] 795 state = applyBlock(stateDB, state, block, proxyApp) 796 } 797 798 if mode == 2 { 799 // update the kvstore height and apphash 800 // as if we ran commit but not 801 state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) 802 } 803 } 804 } 805 806 func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State { 807 // run the whole chain against this client to build up the tendermint state 808 app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) 809 defer app.Close() 810 clientCreator := proxy.NewLocalClientCreator(app) 811 proxyApp := appconn.NewAppConns(clientCreator) 812 if err := proxyApp.Start(); err != nil { 813 panic(err) 814 } 815 defer proxyApp.Stop() 816 817 state.AppVersion = kvstore.AppVersion // simulate handshake, receive app version 818 validators := state.Validators.ABCIValidatorUpdates() 819 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 820 Validators: validators, 821 }); err != nil { 822 panic(err) 823 } 824 sm.SaveState(stateDB, state) // save height 1's validatorsInfo 825 826 switch mode { 827 case 0: 828 // sync right up 829 for _, block := range chain { 830 state = applyBlock(stateDB, state, block, proxyApp) 831 } 832 833 case 1, 2: 834 // sync up to the penultimate as if we stored the block. 835 // whether we commit or not depends on the appHash 836 for _, block := range chain[:len(chain)-1] { 837 state = applyBlock(stateDB, state, block, proxyApp) 838 } 839 840 // apply the final block to a state copy so we can 841 // get the right next appHash but keep the state back 842 applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) 843 } 844 845 return state 846 } 847 848 func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { 849 t.Parallel() 850 851 // 1. Initialize tendermint and commit 3 blocks with the following app hashes: 852 // - 0x01 853 // - 0x02 854 // - 0x03 855 config, genesisFile := ResetConfig("handshake_test_") 856 defer os.RemoveAll(config.RootDir) 857 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 858 const appVersion = "v0.0.0-test" 859 stateDB, state, store := makeStateAndStore(config, genesisFile, appVersion) 860 genDoc, _ := sm.MakeGenesisDocFromFile(genesisFile) 861 state.LastValidators = state.Validators.Copy() 862 // mode = 0 for committing all the blocks 863 blocks := makeBlocks(3, &state, privVal) 864 store.chain = blocks 865 866 // 2. Tendermint must panic if app returns wrong hash for the first block 867 // - RANDOM HASH 868 // - 0x02 869 // - 0x03 870 { 871 app := &badApp{numBlocks: 3, allHashesAreWrong: true} 872 clientCreator := proxy.NewLocalClientCreator(app) 873 proxyApp := appconn.NewAppConns(clientCreator) 874 err := proxyApp.Start() 875 require.NoError(t, err) 876 defer proxyApp.Stop() 877 878 assert.Panics(t, func() { 879 h := NewHandshaker(stateDB, state, store, genDoc) 880 h.Handshake(proxyApp) 881 }) 882 } 883 884 // 3. Tendermint must panic if app returns wrong hash for the last block 885 // - 0x01 886 // - 0x02 887 // - RANDOM HASH 888 { 889 app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} 890 clientCreator := proxy.NewLocalClientCreator(app) 891 proxyApp := appconn.NewAppConns(clientCreator) 892 err := proxyApp.Start() 893 require.NoError(t, err) 894 defer proxyApp.Stop() 895 896 assert.Panics(t, func() { 897 h := NewHandshaker(stateDB, state, store, genDoc) 898 h.Handshake(proxyApp) 899 }) 900 } 901 } 902 903 func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { 904 blocks := make([]*types.Block, 0) 905 906 var ( 907 prevBlock *types.Block 908 prevBlockMeta *types.BlockMeta 909 ) 910 911 appHeight := byte(0x01) 912 for i := 0; i < n; i++ { 913 height := int64(i + 1) 914 915 block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) 916 blocks = append(blocks, block) 917 918 prevBlock = block 919 prevBlockMeta = types.NewBlockMeta(block, parts) 920 921 // update state 922 state.AppHash = []byte{appHeight} 923 appHeight++ 924 state.LastBlockHeight = height 925 } 926 927 return blocks 928 } 929 930 func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, 931 privVal types.PrivValidator, height int64, 932 ) (*types.Block, *types.PartSet) { 933 lastCommit := types.NewCommit(types.BlockID{}, nil) 934 if height > 1 { 935 vote, _ := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVal, lastBlock.Header.ChainID) 936 voteCommitSig := vote.CommitSig() 937 lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig}) 938 } 939 940 return state.MakeBlock(height, []types.Tx{}, lastCommit, state.Validators.GetProposer().Address) 941 } 942 943 type badApp struct { 944 abci.BaseApplication 945 numBlocks byte 946 height byte 947 allHashesAreWrong bool 948 onlyLastHashIsWrong bool 949 } 950 951 func (app *badApp) Commit() (res abci.ResponseCommit) { 952 app.height++ 953 if app.onlyLastHashIsWrong { 954 if app.height == app.numBlocks { 955 res.Data = random.RandBytes(8) 956 return 957 } 958 res.Data = []byte{app.height} 959 return 960 } else if app.allHashesAreWrong { 961 res.Data = random.RandBytes(8) 962 return 963 } 964 965 panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") 966 } 967 968 // -------------------------- 969 // utils for making blocks 970 971 func makeBlockchainFromWAL(wal walm.WAL) ([]*types.Block, []*types.Commit, error) { 972 var height int64 = 1 973 974 // Search for height marker 975 gr, found, err := wal.SearchForHeight(height, &walm.WALSearchOptions{}) 976 if err != nil { 977 return nil, nil, err 978 } 979 if !found { 980 return nil, nil, fmt.Errorf("WAL does not contain height %d", height) 981 } 982 defer gr.Close() //nolint: errcheck 983 984 // log.Notice("Build a blockchain by reading from the WAL") 985 986 var ( 987 blocks []*types.Block 988 commits []*types.Commit 989 thisBlockParts *types.PartSet 990 thisBlockCommit *types.Commit 991 ) 992 993 dec := walm.NewWALReader(gr, maxMsgSize) 994 for { 995 msg, meta, err := dec.ReadMessage() 996 if errors.Is(err, io.EOF) { 997 break 998 } else if err != nil { 999 return nil, nil, err 1000 } 1001 1002 if meta != nil { 1003 // if its not the first one, we have a full block 1004 if thisBlockParts != nil { 1005 block := new(types.Block) 1006 _, err = amino.UnmarshalSizedReader(thisBlockParts.GetReader(), block, 0) 1007 if err != nil { 1008 panic(err) 1009 } 1010 if block.Height != height { 1011 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height)) 1012 } 1013 commitHeight := thisBlockCommit.Precommits[0].Height 1014 if commitHeight != height { 1015 panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height)) 1016 } 1017 blocks = append(blocks, block) 1018 commits = append(commits, thisBlockCommit) 1019 height++ 1020 } 1021 } 1022 1023 if msg != nil { 1024 piece := readPieceFromWAL(msg) 1025 if piece == nil { 1026 continue 1027 } 1028 1029 switch p := piece.(type) { 1030 case *types.PartSetHeader: 1031 thisBlockParts = types.NewPartSetFromHeader(*p) 1032 case *types.Part: 1033 _, err := thisBlockParts.AddPart(p) 1034 if err != nil { 1035 return nil, nil, err 1036 } 1037 case *types.Vote: 1038 if p.Type == types.PrecommitType { 1039 commitSigs := []*types.CommitSig{p.CommitSig()} 1040 thisBlockCommit = types.NewCommit(p.BlockID, commitSigs) 1041 } 1042 } 1043 } 1044 } 1045 // grab the last block too 1046 block := new(types.Block) 1047 _, err = amino.UnmarshalSizedReader(thisBlockParts.GetReader(), block, 0) 1048 if err != nil { 1049 panic(err) 1050 } 1051 if block.Height != height { 1052 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height)) 1053 } 1054 commitHeight := thisBlockCommit.Precommits[0].Height 1055 if commitHeight != height { 1056 panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height)) 1057 } 1058 blocks = append(blocks, block) 1059 commits = append(commits, thisBlockCommit) 1060 return blocks, commits, nil 1061 } 1062 1063 func readPieceFromWAL(msg *walm.TimedWALMessage) interface{} { 1064 // for logging 1065 switch m := msg.Msg.(type) { 1066 case msgInfo: 1067 switch msg := m.Msg.(type) { 1068 case *ProposalMessage: 1069 return &msg.Proposal.BlockID.PartsHeader 1070 case *BlockPartMessage: 1071 return msg.Part 1072 case *VoteMessage: 1073 return msg.Vote 1074 } 1075 } 1076 1077 return nil 1078 } 1079 1080 // fresh state and mock store 1081 func makeStateAndStore(config *cfg.Config, genesisFile string, appVersion string) (dbm.DB, sm.State, *mockBlockStore) { 1082 stateDB := memdb.NewMemDB() 1083 state, _ := sm.MakeGenesisStateFromFile(genesisFile) 1084 state.AppVersion = appVersion 1085 store := newMockBlockStore(config, state.ConsensusParams) 1086 sm.SaveState(stateDB, state) 1087 return stateDB, state, store 1088 } 1089 1090 // ---------------------------------- 1091 // mock block store 1092 1093 type mockBlockStore struct { 1094 config *cfg.Config 1095 params abci.ConsensusParams 1096 chain []*types.Block 1097 commits []*types.Commit 1098 } 1099 1100 // TODO: NewBlockStore(memdb.NewMemDB) ... 1101 func newMockBlockStore(config *cfg.Config, params abci.ConsensusParams) *mockBlockStore { 1102 return &mockBlockStore{config, params, nil, nil} 1103 } 1104 1105 func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } 1106 func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } 1107 func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { 1108 block := bs.chain[height-1] 1109 return &types.BlockMeta{ 1110 BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, 1111 Header: block.Header, 1112 } 1113 } 1114 func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } 1115 func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { 1116 } 1117 1118 func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { 1119 return bs.commits[height-1] 1120 } 1121 1122 func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { 1123 return bs.commits[height-1] 1124 } 1125 1126 // --------------------------------------- 1127 // Test handshake/init chain 1128 1129 func TestHandshakeUpdatesValidators(t *testing.T) { 1130 t.Parallel() 1131 1132 val, _ := types.RandValidator(true, 10) 1133 vals := types.NewValidatorSet([]*types.Validator{val}) 1134 app := &initChainApp{vals: vals.ABCIValidatorUpdates()} 1135 clientCreator := proxy.NewLocalClientCreator(app) 1136 1137 config, genesisFile := ResetConfig("handshake_test_") 1138 defer os.RemoveAll(config.RootDir) 1139 stateDB, state, store := makeStateAndStore(config, genesisFile, "v0.0.0-test") 1140 1141 oldValAddr := state.Validators.Validators[0].Address 1142 1143 // now start the app using the handshake - it should sync 1144 genDoc, _ := sm.MakeGenesisDocFromFile(genesisFile) 1145 handshaker := NewHandshaker(stateDB, state, store, genDoc) 1146 proxyApp := appconn.NewAppConns(clientCreator) 1147 if err := proxyApp.Start(); err != nil { 1148 t.Fatalf("Error starting proxy app connections: %v", err) 1149 } 1150 defer proxyApp.Stop() 1151 if err := handshaker.Handshake(proxyApp); err != nil { 1152 t.Fatalf("Error on abci handshake: %v", err) 1153 } 1154 1155 // reload the state, check the validator set was updated 1156 state = sm.LoadState(stateDB) 1157 1158 newValAddr := state.Validators.Validators[0].Address 1159 expectValAddr := val.Address 1160 assert.NotEqual(t, oldValAddr, newValAddr) 1161 assert.Equal(t, newValAddr, expectValAddr) 1162 } 1163 1164 // returns the vals on InitChain 1165 type initChainApp struct { 1166 abci.BaseApplication 1167 vals []abci.ValidatorUpdate 1168 } 1169 1170 func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { 1171 return abci.ResponseInitChain{ 1172 Validators: ica.vals, 1173 } 1174 }