github.com/vipernet-xyz/tendermint-core@v0.32.0/consensus/replay_test.go (about) 1 package consensus 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path/filepath" 11 "runtime" 12 "testing" 13 "time" 14 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 18 "sort" 19 20 dbm "github.com/tendermint/tm-db" 21 22 "github.com/tendermint/tendermint/abci/example/kvstore" 23 abci "github.com/tendermint/tendermint/abci/types" 24 cfg "github.com/tendermint/tendermint/config" 25 "github.com/tendermint/tendermint/crypto" 26 "github.com/tendermint/tendermint/libs/log" 27 tmrand "github.com/tendermint/tendermint/libs/rand" 28 mempl "github.com/tendermint/tendermint/mempool" 29 "github.com/tendermint/tendermint/mock" 30 "github.com/tendermint/tendermint/privval" 31 "github.com/tendermint/tendermint/proxy" 32 sm "github.com/tendermint/tendermint/state" 33 "github.com/tendermint/tendermint/types" 34 "github.com/tendermint/tendermint/version" 35 ) 36 37 func TestMain(m *testing.M) { 38 config = ResetConfig("consensus_reactor_test") 39 consensusReplayConfig = ResetConfig("consensus_replay_test") 40 configStateTest := ResetConfig("consensus_state_test") 41 configMempoolTest := ResetConfig("consensus_mempool_test") 42 configByzantineTest := ResetConfig("consensus_byzantine_test") 43 code := m.Run() 44 os.RemoveAll(config.RootDir) 45 os.RemoveAll(consensusReplayConfig.RootDir) 46 os.RemoveAll(configStateTest.RootDir) 47 os.RemoveAll(configMempoolTest.RootDir) 48 os.RemoveAll(configByzantineTest.RootDir) 49 os.Exit(code) 50 } 51 52 // These tests ensure we can always recover from failure at any part of the consensus process. 53 // There are two general failure scenarios: failure during consensus, and failure while applying the block. 54 // Only the latter interacts with the app and store, 55 // but the former has to deal with restrictions on re-use of priv_validator keys. 56 // The `WAL Tests` are for failures during the consensus; 57 // the `Handshake Tests` are for failures in applying the block. 58 // With the help of the WAL, we can recover from it all! 59 60 //------------------------------------------------------------------------------------------ 61 // WAL Tests 62 63 // TODO: It would be better to verify explicitly which states we can recover from without the wal 64 // and which ones we need the wal for - then we'd also be able to only flush the 65 // wal writer when we need to, instead of with every message. 66 67 func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, 68 lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { 69 logger := log.TestingLogger() 70 state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) 71 privValidator := loadPrivValidator(consensusReplayConfig) 72 cs := newStateWithConfigAndBlockStore( 73 consensusReplayConfig, 74 state, 75 privValidator, 76 kvstore.NewApplication(), 77 blockDB, 78 ) 79 cs.SetLogger(logger) 80 81 bytes, _ := ioutil.ReadFile(cs.config.WalFile()) 82 t.Logf("====== WAL: \n\r%X\n", bytes) 83 84 err := cs.Start() 85 require.NoError(t, err) 86 defer cs.Stop() 87 88 // This is just a signal that we haven't halted; its not something contained 89 // in the WAL itself. Assuming the consensus state is running, replay of any 90 // WAL, including the empty one, should eventually be followed by a new 91 // block, or else something is wrong. 92 newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 93 require.NoError(t, err) 94 select { 95 case <-newBlockSub.Out(): 96 case <-newBlockSub.Cancelled(): 97 t.Fatal("newBlockSub was cancelled") 98 case <-time.After(120 * time.Second): 99 t.Fatal("Timed out waiting for new block (see trace above)") 100 } 101 } 102 103 func sendTxs(ctx context.Context, cs *State) { 104 for i := 0; i < 256; i++ { 105 select { 106 case <-ctx.Done(): 107 return 108 default: 109 tx := []byte{byte(i)} 110 assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 111 i++ 112 } 113 } 114 } 115 116 // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. 117 func TestWALCrash(t *testing.T) { 118 testCases := []struct { 119 name string 120 initFn func(dbm.DB, *State, context.Context) 121 heightToStop int64 122 }{ 123 {"empty block", 124 func(stateDB dbm.DB, cs *State, ctx context.Context) {}, 125 1}, 126 {"many non-empty blocks", 127 func(stateDB dbm.DB, cs *State, ctx context.Context) { 128 go sendTxs(ctx, cs) 129 }, 130 3}, 131 } 132 133 for i, tc := range testCases { 134 tc := tc 135 consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) 136 t.Run(tc.name, func(t *testing.T) { 137 crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) 138 }) 139 } 140 } 141 142 func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, 143 initFn func(dbm.DB, *State, context.Context), heightToStop int64) { 144 walPanicked := make(chan error) 145 crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} 146 147 i := 1 148 LOOP: 149 for { 150 t.Logf("====== LOOP %d\n", i) 151 152 // create consensus state from a clean slate 153 logger := log.NewNopLogger() 154 blockDB := dbm.NewMemDB() 155 stateDB := blockDB 156 state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) 157 privValidator := loadPrivValidator(consensusReplayConfig) 158 cs := newStateWithConfigAndBlockStore( 159 consensusReplayConfig, 160 state, 161 privValidator, 162 kvstore.NewApplication(), 163 blockDB, 164 ) 165 cs.SetLogger(logger) 166 167 // start sending transactions 168 ctx, cancel := context.WithCancel(context.Background()) 169 initFn(stateDB, cs, ctx) 170 171 // clean up WAL file from the previous iteration 172 walFile := cs.config.WalFile() 173 os.Remove(walFile) 174 175 // set crashing WAL 176 csWal, err := cs.OpenWAL(walFile) 177 require.NoError(t, err) 178 crashingWal.next = csWal 179 // reset the message counter 180 crashingWal.msgIndex = 1 181 cs.wal = crashingWal 182 183 // start consensus state 184 err = cs.Start() 185 require.NoError(t, err) 186 187 i++ 188 189 select { 190 case err := <-walPanicked: 191 t.Logf("WAL panicked: %v", err) 192 193 // make sure we can make blocks after a crash 194 startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB) 195 196 // stop consensus state and transactions sender (initFn) 197 cs.Stop() 198 cancel() 199 200 // if we reached the required height, exit 201 if _, ok := err.(ReachedHeightToStopError); ok { 202 break LOOP 203 } 204 case <-time.After(10 * time.Second): 205 t.Fatal("WAL did not panic for 10 seconds (check the log)") 206 } 207 } 208 } 209 210 // crashingWAL is a WAL which crashes or rather simulates a crash during Save 211 // (before and after). It remembers a message for which we last panicked 212 // (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations. 213 type crashingWAL struct { 214 next WAL 215 panicCh chan error 216 heightToStop int64 217 218 msgIndex int // current message index 219 lastPanickedForMsgIndex int // last message for which we panicked 220 } 221 222 var _ WAL = &crashingWAL{} 223 224 // WALWriteError indicates a WAL crash. 225 type WALWriteError struct { 226 msg string 227 } 228 229 func (e WALWriteError) Error() string { 230 return e.msg 231 } 232 233 // ReachedHeightToStopError indicates we've reached the required consensus 234 // height and may exit. 235 type ReachedHeightToStopError struct { 236 height int64 237 } 238 239 func (e ReachedHeightToStopError) Error() string { 240 return fmt.Sprintf("reached height to stop %d", e.height) 241 } 242 243 // Write simulate WAL's crashing by sending an error to the panicCh and then 244 // exiting the cs.receiveRoutine. 245 func (w *crashingWAL) Write(m WALMessage) error { 246 if endMsg, ok := m.(EndHeightMessage); ok { 247 if endMsg.Height == w.heightToStop { 248 w.panicCh <- ReachedHeightToStopError{endMsg.Height} 249 runtime.Goexit() 250 return nil 251 } 252 253 return w.next.Write(m) 254 } 255 256 if w.msgIndex > w.lastPanickedForMsgIndex { 257 w.lastPanickedForMsgIndex = w.msgIndex 258 _, file, line, _ := runtime.Caller(1) 259 w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} 260 runtime.Goexit() 261 return nil 262 } 263 264 w.msgIndex++ 265 return w.next.Write(m) 266 } 267 268 func (w *crashingWAL) WriteSync(m WALMessage) error { 269 return w.Write(m) 270 } 271 272 func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } 273 274 func (w *crashingWAL) SearchForEndHeight( 275 height int64, 276 options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { 277 return w.next.SearchForEndHeight(height, options) 278 } 279 280 func (w *crashingWAL) Start() error { return w.next.Start() } 281 func (w *crashingWAL) Stop() error { return w.next.Stop() } 282 func (w *crashingWAL) Wait() { w.next.Wait() } 283 284 //------------------------------------------------------------------------------------------ 285 type testSim struct { 286 GenesisState sm.State 287 Config *cfg.Config 288 Chain []*types.Block 289 Commits []*types.Commit 290 CleanupFunc cleanupFunc 291 } 292 293 const ( 294 numBlocks = 6 295 ) 296 297 var ( 298 mempool = mock.Mempool{} 299 evpool = sm.MockEvidencePool{} 300 301 sim testSim 302 ) 303 304 //--------------------------------------- 305 // Test handshake/replay 306 307 // 0 - all synced up 308 // 1 - saved block but app and state are behind 309 // 2 - save block and committed but state is behind 310 // 3 - save block and committed with truncated block store and state behind 311 var modes = []uint{0, 1, 2, 3} 312 313 // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay 314 func TestSimulateValidatorsChange(t *testing.T) { 315 nPeers := 7 316 nVals := 4 317 css, genDoc, config, cleanup := randConsensusNetWithPeers( 318 nVals, 319 nPeers, 320 "replay_test", 321 newMockTickerFunc(true), 322 newPersistentKVStoreWithPath) 323 sim.Config = config 324 sim.GenesisState, _ = sm.MakeGenesisState(genDoc) 325 sim.CleanupFunc = cleanup 326 327 partSize := types.BlockPartSizeBytes 328 329 newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) 330 proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) 331 332 vss := make([]*validatorStub, nPeers) 333 for i := 0; i < nPeers; i++ { 334 vss[i] = newValidatorStub(css[i].privValidator, i) 335 } 336 height, round := css[0].Height, css[0].Round 337 // start the machine 338 startTestRound(css[0], height, round) 339 incrementHeight(vss...) 340 ensureNewRound(newRoundCh, height, 0) 341 ensureNewProposal(proposalCh, height, round) 342 rs := css[0].GetRoundState() 343 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 344 ensureNewRound(newRoundCh, height+1, 0) 345 346 //height 2 347 height++ 348 incrementHeight(vss...) 349 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 350 require.NoError(t, err) 351 valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) 352 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 353 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) 354 assert.Nil(t, err) 355 propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 356 propBlockParts := propBlock.MakePartSet(partSize) 357 blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 358 proposal := types.NewProposal(vss[1].Height, round, -1, blockID) 359 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 360 t.Fatal("failed to sign bad proposal", err) 361 } 362 363 // set the proposal block 364 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 365 t.Fatal(err) 366 } 367 ensureNewProposal(proposalCh, height, round) 368 rs = css[0].GetRoundState() 369 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 370 ensureNewRound(newRoundCh, height+1, 0) 371 372 //height 3 373 height++ 374 incrementHeight(vss...) 375 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 376 require.NoError(t, err) 377 updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) 378 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 379 err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) 380 assert.Nil(t, err) 381 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 382 propBlockParts = propBlock.MakePartSet(partSize) 383 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 384 proposal = types.NewProposal(vss[2].Height, round, -1, blockID) 385 if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { 386 t.Fatal("failed to sign bad proposal", err) 387 } 388 389 // set the proposal block 390 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 391 t.Fatal(err) 392 } 393 ensureNewProposal(proposalCh, height, round) 394 rs = css[0].GetRoundState() 395 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 396 ensureNewRound(newRoundCh, height+1, 0) 397 398 //height 4 399 height++ 400 incrementHeight(vss...) 401 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 402 require.NoError(t, err) 403 newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) 404 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 405 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) 406 assert.Nil(t, err) 407 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 408 require.NoError(t, err) 409 newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) 410 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 411 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{}) 412 assert.Nil(t, err) 413 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 414 propBlockParts = propBlock.MakePartSet(partSize) 415 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 416 newVss := make([]*validatorStub, nVals+1) 417 copy(newVss, vss[:nVals+1]) 418 sort.Sort(ValidatorStubsByAddress(newVss)) 419 selfIndex := 0 420 for i, vs := range newVss { 421 vsPubKey, err := vs.GetPubKey() 422 require.NoError(t, err) 423 424 css0PubKey, err := css[0].privValidator.GetPubKey() 425 require.NoError(t, err) 426 427 if vsPubKey.Equals(css0PubKey) { 428 selfIndex = i 429 break 430 } 431 } 432 433 proposal = types.NewProposal(vss[3].Height, round, -1, blockID) 434 if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { 435 t.Fatal("failed to sign bad proposal", err) 436 } 437 438 // set the proposal block 439 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 440 t.Fatal(err) 441 } 442 ensureNewProposal(proposalCh, height, round) 443 444 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 445 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil, mempl.TxInfo{}) 446 assert.Nil(t, err) 447 448 rs = css[0].GetRoundState() 449 for i := 0; i < nVals+1; i++ { 450 if i == selfIndex { 451 continue 452 } 453 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 454 } 455 456 ensureNewRound(newRoundCh, height+1, 0) 457 458 //height 5 459 height++ 460 incrementHeight(vss...) 461 ensureNewProposal(proposalCh, height, round) 462 rs = css[0].GetRoundState() 463 for i := 0; i < nVals+1; i++ { 464 if i == selfIndex { 465 continue 466 } 467 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 468 } 469 ensureNewRound(newRoundCh, height+1, 0) 470 471 //height 6 472 height++ 473 incrementHeight(vss...) 474 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 475 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{}) 476 assert.Nil(t, err) 477 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 478 propBlockParts = propBlock.MakePartSet(partSize) 479 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 480 newVss = make([]*validatorStub, nVals+3) 481 copy(newVss, vss[:nVals+3]) 482 sort.Sort(ValidatorStubsByAddress(newVss)) 483 for i, vs := range newVss { 484 vsKeyKey, err := vs.GetPubKey() 485 require.NoError(t, err) 486 487 css0PubKey, err := css[0].privValidator.GetPubKey() 488 require.NoError(t, err) 489 490 if vsKeyKey.Equals(css0PubKey) { 491 selfIndex = i 492 break 493 } 494 } 495 proposal = types.NewProposal(vss[1].Height, round, -1, blockID) 496 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 497 t.Fatal("failed to sign bad proposal", err) 498 } 499 500 // set the proposal block 501 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 502 t.Fatal(err) 503 } 504 ensureNewProposal(proposalCh, height, round) 505 rs = css[0].GetRoundState() 506 for i := 0; i < nVals+3; i++ { 507 if i == selfIndex { 508 continue 509 } 510 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 511 } 512 ensureNewRound(newRoundCh, height+1, 0) 513 514 sim.Chain = make([]*types.Block, 0) 515 sim.Commits = make([]*types.Commit, 0) 516 for i := 1; i <= numBlocks; i++ { 517 sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) 518 sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) 519 } 520 } 521 522 // Sync from scratch 523 func TestHandshakeReplayAll(t *testing.T) { 524 for _, m := range modes { 525 testHandshakeReplay(t, config, 0, m, false) 526 } 527 for _, m := range modes { 528 testHandshakeReplay(t, config, 0, m, true) 529 } 530 } 531 532 // Sync many, not from scratch 533 func TestHandshakeReplaySome(t *testing.T) { 534 for _, m := range modes { 535 testHandshakeReplay(t, config, 2, m, false) 536 } 537 for _, m := range modes { 538 testHandshakeReplay(t, config, 2, m, true) 539 } 540 } 541 542 // Sync from lagging by one 543 func TestHandshakeReplayOne(t *testing.T) { 544 for _, m := range modes { 545 testHandshakeReplay(t, config, numBlocks-1, m, false) 546 } 547 for _, m := range modes { 548 testHandshakeReplay(t, config, numBlocks-1, m, true) 549 } 550 } 551 552 // Sync from caught up 553 func TestHandshakeReplayNone(t *testing.T) { 554 for _, m := range modes { 555 testHandshakeReplay(t, config, numBlocks, m, false) 556 } 557 for _, m := range modes { 558 testHandshakeReplay(t, config, numBlocks, m, true) 559 } 560 } 561 562 // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx 563 func TestMockProxyApp(t *testing.T) { 564 sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange 565 logger := log.TestingLogger() 566 var validTxs, invalidTxs = 0, 0 567 txIndex := 0 568 569 assert.NotPanics(t, func() { 570 abciResWithEmptyDeliverTx := new(sm.ABCIResponses) 571 abciResWithEmptyDeliverTx.DeliverTx = make([]*abci.ResponseDeliverTx, 0) 572 abciResWithEmptyDeliverTx.DeliverTx = append(abciResWithEmptyDeliverTx.DeliverTx, &abci.ResponseDeliverTx{}) 573 574 // called when saveABCIResponses: 575 bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx) 576 loadedAbciRes := new(sm.ABCIResponses) 577 578 // this also happens sm.LoadABCIResponses 579 err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes) 580 require.NoError(t, err) 581 582 mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) 583 584 abciRes := new(sm.ABCIResponses) 585 abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx)) 586 // Execute transactions and get hash. 587 proxyCb := func(req *abci.Request, res *abci.Response) { 588 if r, ok := res.Value.(*abci.Response_DeliverTx); ok { 589 // TODO: make use of res.Log 590 // TODO: make use of this info 591 // Blocks may include invalid txs. 592 txRes := r.DeliverTx 593 if txRes.Code == abci.CodeTypeOK { 594 validTxs++ 595 } else { 596 logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) 597 invalidTxs++ 598 } 599 abciRes.DeliverTx[txIndex] = txRes 600 txIndex++ 601 } 602 } 603 mock.SetResponseCallback(proxyCb) 604 605 someTx := []byte("tx") 606 mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}) 607 }) 608 assert.True(t, validTxs == 1) 609 assert.True(t, invalidTxs == 0) 610 } 611 612 func tempWALWithData(data []byte) string { 613 walFile, err := ioutil.TempFile("", "wal") 614 if err != nil { 615 panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) 616 } 617 _, err = walFile.Write(data) 618 if err != nil { 619 panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) 620 } 621 if err := walFile.Close(); err != nil { 622 panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) 623 } 624 return walFile.Name() 625 } 626 627 // Make some blocks. Start a fresh app and apply nBlocks blocks. 628 // Then restart the app and sync it up with the remaining blocks 629 func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { 630 var chain []*types.Block 631 var commits []*types.Commit 632 var store *mockBlockStore 633 var stateDB dbm.DB 634 var genisisState sm.State 635 if testValidatorsChange { 636 testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) 637 defer os.RemoveAll(testConfig.RootDir) 638 stateDB = dbm.NewMemDB() 639 genisisState = sim.GenesisState 640 config = sim.Config 641 chain = append([]*types.Block{}, sim.Chain...) // copy chain 642 commits = sim.Commits 643 store = newMockBlockStore(config, genisisState.ConsensusParams) 644 } else { //test single node 645 testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) 646 defer os.RemoveAll(testConfig.RootDir) 647 walBody, err := WALWithNBlocks(t, numBlocks) 648 require.NoError(t, err) 649 walFile := tempWALWithData(walBody) 650 config.Consensus.SetWalFile(walFile) 651 652 privVal := privval.LoadFilePVLean(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 653 654 wal, err := NewWAL(walFile) 655 require.NoError(t, err) 656 wal.SetLogger(log.TestingLogger()) 657 err = wal.Start() 658 require.NoError(t, err) 659 defer wal.Stop() 660 661 chain, commits, err = makeBlockchainFromWAL(wal) 662 require.NoError(t, err) 663 pubKey, err := privVal.GetPubKey() 664 require.NoError(t, err) 665 stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) 666 } 667 store.chain = chain 668 store.commits = commits 669 670 state := genisisState.Copy() 671 // run the chain through state.ApplyBlock to build up the tendermint state 672 state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) 673 latestAppHash := state.AppHash 674 675 // make a new client creator 676 kvstoreApp := kvstore.NewPersistentKVStoreApplication( 677 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) 678 679 clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) 680 if nBlocks > 0 { 681 // run nBlocks against a new client to build up the app state. 682 // use a throwaway tendermint state 683 proxyApp := proxy.NewAppConns(clientCreator2) 684 stateDB1 := dbm.NewMemDB() 685 sm.SaveState(stateDB1, genisisState) 686 buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) 687 } 688 689 // Prune block store if requested 690 expectError := false 691 if mode == 3 { 692 pruned, err := store.PruneBlocks(2) 693 require.NoError(t, err) 694 require.EqualValues(t, 1, pruned) 695 expectError = int64(nBlocks) < 2 696 } 697 698 // now start the app using the handshake - it should sync 699 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 700 handshaker := NewHandshaker(stateDB, state, store, genDoc) 701 proxyApp := proxy.NewAppConns(clientCreator2) 702 if err := proxyApp.Start(); err != nil { 703 t.Fatalf("Error starting proxy app connections: %v", err) 704 } 705 defer proxyApp.Stop() 706 err := handshaker.Handshake(proxyApp, nil) 707 if expectError { 708 require.Error(t, err) 709 return 710 } else if err != nil { 711 t.Fatalf("Error on abci handshake: %v", err) 712 } 713 714 // get the latest app hash from the app 715 res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) 716 if err != nil { 717 t.Fatal(err) 718 } 719 720 // the app hash should be synced up 721 if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { 722 t.Fatalf( 723 "Expected app hashes to match after handshake/replay. got %X, expected %X", 724 res.LastBlockAppHash, 725 latestAppHash) 726 } 727 728 expectedBlocksToSync := numBlocks - nBlocks 729 if nBlocks == numBlocks && mode > 0 { 730 expectedBlocksToSync++ 731 } else if nBlocks > 0 && mode == 1 { 732 expectedBlocksToSync++ 733 } 734 735 if handshaker.NBlocks() != expectedBlocksToSync { 736 t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) 737 } 738 } 739 740 func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { 741 testPartSize := types.BlockPartSizeBytes 742 blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, nil) 743 744 blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} 745 newState, _, err := blockExec.ApplyBlock(st, blkID, blk) 746 if err != nil { 747 panic(err) 748 } 749 return newState 750 } 751 752 func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, 753 state sm.State, chain []*types.Block, nBlocks int, mode uint) { 754 // start a new app without handshake, play nBlocks blocks 755 if err := proxyApp.Start(); err != nil { 756 panic(err) 757 } 758 defer proxyApp.Stop() 759 760 state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version 761 validators := types.TM2PB.ValidatorUpdates(state.Validators) 762 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 763 Validators: validators, 764 }); err != nil { 765 panic(err) 766 } 767 sm.SaveState(stateDB, state) //save height 1's validatorsInfo 768 769 switch mode { 770 case 0: 771 for i := 0; i < nBlocks; i++ { 772 block := chain[i] 773 state = applyBlock(stateDB, state, block, proxyApp) 774 } 775 case 1, 2, 3: 776 for i := 0; i < nBlocks-1; i++ { 777 block := chain[i] 778 state = applyBlock(stateDB, state, block, proxyApp) 779 } 780 781 if mode == 2 || mode == 3 { 782 // update the kvstore height and apphash 783 // as if we ran commit but not 784 state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) 785 } 786 default: 787 panic(fmt.Sprintf("unknown mode %v", mode)) 788 } 789 790 } 791 792 func buildTMStateFromChain( 793 config *cfg.Config, 794 stateDB dbm.DB, 795 state sm.State, 796 chain []*types.Block, 797 nBlocks int, 798 mode uint) sm.State { 799 // run the whole chain against this client to build up the tendermint state 800 clientCreator := proxy.NewLocalClientCreator( 801 kvstore.NewPersistentKVStoreApplication( 802 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))) 803 proxyApp := proxy.NewAppConns(clientCreator) 804 if err := proxyApp.Start(); err != nil { 805 panic(err) 806 } 807 defer proxyApp.Stop() 808 809 state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version 810 validators := types.TM2PB.ValidatorUpdates(state.Validators) 811 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 812 Validators: validators, 813 }); err != nil { 814 panic(err) 815 } 816 sm.SaveState(stateDB, state) //save height 1's validatorsInfo 817 818 switch mode { 819 case 0: 820 // sync right up 821 for _, block := range chain { 822 state = applyBlock(stateDB, state, block, proxyApp) 823 } 824 825 case 1, 2, 3: 826 // sync up to the penultimate as if we stored the block. 827 // whether we commit or not depends on the appHash 828 for _, block := range chain[:len(chain)-1] { 829 state = applyBlock(stateDB, state, block, proxyApp) 830 } 831 832 // apply the final block to a state copy so we can 833 // get the right next appHash but keep the state back 834 applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) 835 default: 836 panic(fmt.Sprintf("unknown mode %v", mode)) 837 } 838 839 return state 840 } 841 842 func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { 843 // 1. Initialize tendermint and commit 3 blocks with the following app hashes: 844 // - 0x01 845 // - 0x02 846 // - 0x03 847 config := ResetConfig("handshake_test_") 848 defer os.RemoveAll(config.RootDir) 849 privVal := privval.LoadFilePVLean(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 850 const appVersion = 0x0 851 pubKey, err := privVal.GetPubKey() 852 require.NoError(t, err) 853 stateDB, state, store := stateAndStore(config, pubKey, appVersion) 854 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 855 state.LastValidators = state.Validators.Copy() 856 // mode = 0 for committing all the blocks 857 blocks := makeBlocks(3, &state, privVal) 858 store.chain = blocks 859 860 // 2. Tendermint must panic if app returns wrong hash for the first block 861 // - RANDOM HASH 862 // - 0x02 863 // - 0x03 864 { 865 app := &badApp{numBlocks: 3, allHashesAreWrong: true} 866 clientCreator := proxy.NewLocalClientCreator(app) 867 proxyApp := proxy.NewAppConns(clientCreator) 868 err := proxyApp.Start() 869 require.NoError(t, err) 870 defer proxyApp.Stop() 871 872 assert.Panics(t, func() { 873 h := NewHandshaker(stateDB, state, store, genDoc) 874 h.Handshake(proxyApp, nil) 875 }) 876 } 877 878 // 3. Tendermint must panic if app returns wrong hash for the last block 879 // - 0x01 880 // - 0x02 881 // - RANDOM HASH 882 { 883 app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} 884 clientCreator := proxy.NewLocalClientCreator(app) 885 proxyApp := proxy.NewAppConns(clientCreator) 886 err := proxyApp.Start() 887 require.NoError(t, err) 888 defer proxyApp.Stop() 889 890 assert.Panics(t, func() { 891 h := NewHandshaker(stateDB, state, store, genDoc) 892 h.Handshake(proxyApp, nil) 893 }) 894 } 895 } 896 897 func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { 898 blocks := make([]*types.Block, 0) 899 900 var ( 901 prevBlock *types.Block 902 prevBlockMeta *types.BlockMeta 903 ) 904 905 appHeight := byte(0x01) 906 for i := 0; i < n; i++ { 907 height := int64(i + 1) 908 909 block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) 910 blocks = append(blocks, block) 911 912 prevBlock = block 913 prevBlockMeta = types.NewBlockMeta(block, parts) 914 915 // update state 916 state.AppHash = []byte{appHeight} 917 appHeight++ 918 state.LastBlockHeight = height 919 } 920 921 return blocks 922 } 923 924 func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, 925 privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { 926 927 lastCommit := types.NewCommit(types.BlockID{}, nil) 928 if height > 1 { 929 vote, _ := types.MakeVote( 930 lastBlock.Header.Height, 931 lastBlockMeta.BlockID, 932 state.Validators, 933 privVal, 934 lastBlock.Header.ChainID, 935 time.Now()) 936 voteCommitSig := vote.CommitSig() 937 lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig}) 938 } 939 940 return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) 941 } 942 943 type badApp struct { 944 abci.BaseApplication 945 numBlocks byte 946 height byte 947 allHashesAreWrong bool 948 onlyLastHashIsWrong bool 949 } 950 951 func (app *badApp) Commit() abci.ResponseCommit { 952 app.height++ 953 if app.onlyLastHashIsWrong { 954 if app.height == app.numBlocks { 955 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 956 } 957 return abci.ResponseCommit{Data: []byte{app.height}} 958 } else if app.allHashesAreWrong { 959 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 960 } 961 962 panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") 963 } 964 965 //-------------------------- 966 // utils for making blocks 967 968 func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { 969 var height int64 970 971 // Search for height marker 972 gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) 973 if err != nil { 974 return nil, nil, err 975 } 976 if !found { 977 return nil, nil, fmt.Errorf("wal does not contain height %d", height) 978 } 979 defer gr.Close() // nolint: errcheck 980 981 // log.Notice("Build a blockchain by reading from the WAL") 982 983 var ( 984 blocks []*types.Block 985 commits []*types.Commit 986 thisBlockParts *types.PartSet 987 thisBlockCommit *types.Commit 988 ) 989 990 dec := NewWALDecoder(gr) 991 for { 992 msg, err := dec.Decode() 993 if err == io.EOF { 994 break 995 } else if err != nil { 996 return nil, nil, err 997 } 998 999 piece := readPieceFromWAL(msg) 1000 if piece == nil { 1001 continue 1002 } 1003 1004 switch p := piece.(type) { 1005 case EndHeightMessage: 1006 // if its not the first one, we have a full block 1007 if thisBlockParts != nil { 1008 var block = new(types.Block) 1009 _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) 1010 if err != nil { 1011 panic(err) 1012 } 1013 if block.Height != height+1 { 1014 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1015 } 1016 commitHeight := thisBlockCommit.Precommits[0].Height 1017 if commitHeight != height+1 { 1018 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1019 } 1020 blocks = append(blocks, block) 1021 commits = append(commits, thisBlockCommit) 1022 height++ 1023 } 1024 case *types.PartSetHeader: 1025 thisBlockParts = types.NewPartSetFromHeader(*p) 1026 case *types.Part: 1027 _, err := thisBlockParts.AddPart(p) 1028 if err != nil { 1029 return nil, nil, err 1030 } 1031 case *types.Vote: 1032 if p.Type == types.PrecommitType { 1033 commitSigs := []*types.CommitSig{p.CommitSig()} 1034 thisBlockCommit = types.NewCommit(p.BlockID, commitSigs) 1035 } 1036 } 1037 } 1038 // grab the last block too 1039 var block = new(types.Block) 1040 _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) 1041 if err != nil { 1042 panic(err) 1043 } 1044 if block.Height != height+1 { 1045 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1046 } 1047 commitHeight := thisBlockCommit.Precommits[0].Height 1048 if commitHeight != height+1 { 1049 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1050 } 1051 blocks = append(blocks, block) 1052 commits = append(commits, thisBlockCommit) 1053 return blocks, commits, nil 1054 } 1055 1056 func readPieceFromWAL(msg *TimedWALMessage) interface{} { 1057 // for logging 1058 switch m := msg.Msg.(type) { 1059 case msgInfo: 1060 switch msg := m.Msg.(type) { 1061 case *ProposalMessage: 1062 return &msg.Proposal.BlockID.PartsHeader 1063 case *BlockPartMessage: 1064 return msg.Part 1065 case *VoteMessage: 1066 return msg.Vote 1067 } 1068 case EndHeightMessage: 1069 return m 1070 } 1071 1072 return nil 1073 } 1074 1075 // fresh state and mock store 1076 func stateAndStore( 1077 config *cfg.Config, 1078 pubKey crypto.PubKey, 1079 appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) { 1080 stateDB := dbm.NewMemDB() 1081 state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) 1082 state.Version.Consensus.App = appVersion 1083 store := newMockBlockStore(config, state.ConsensusParams) 1084 sm.SaveState(stateDB, state) 1085 return stateDB, state, store 1086 } 1087 1088 //---------------------------------- 1089 // mock block store 1090 1091 type mockBlockStore struct { 1092 config *cfg.Config 1093 params types.ConsensusParams 1094 chain []*types.Block 1095 commits []*types.Commit 1096 base int64 1097 } 1098 1099 // TODO: NewBlockStore(db.NewMemDB) ... 1100 func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { 1101 return &mockBlockStore{config, params, nil, nil, 0} 1102 } 1103 1104 func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } 1105 func (bs *mockBlockStore) Base() int64 { return bs.base } 1106 func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } 1107 func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } 1108 func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { 1109 return bs.chain[int64(len(bs.chain))-1] 1110 } 1111 func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { 1112 block := bs.chain[height-1] 1113 return &types.BlockMeta{ 1114 BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, 1115 Header: block.Header, 1116 } 1117 } 1118 func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } 1119 func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { 1120 } 1121 func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { 1122 return bs.commits[height-1] 1123 } 1124 func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { 1125 return bs.commits[height-1] 1126 } 1127 1128 func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { 1129 pruned := uint64(0) 1130 for i := int64(0); i < height-1; i++ { 1131 bs.chain[i] = nil 1132 bs.commits[i] = nil 1133 pruned++ 1134 } 1135 bs.base = height 1136 return pruned, nil 1137 } 1138 1139 //--------------------------------------- 1140 // Test handshake/init chain 1141 1142 func TestHandshakeUpdatesValidators(t *testing.T) { 1143 val, _ := types.RandValidator(true, 10) 1144 vals := types.NewValidatorSet([]*types.Validator{val}) 1145 app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} 1146 clientCreator := proxy.NewLocalClientCreator(app) 1147 1148 config := ResetConfig("handshake_test_") 1149 defer os.RemoveAll(config.RootDir) 1150 privVal := privval.LoadFilePVLean(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 1151 pubKey, err := privVal.GetPubKey() 1152 require.NoError(t, err) 1153 stateDB, state, store := stateAndStore(config, pubKey, 0x0) 1154 1155 oldValAddr := state.Validators.Validators[0].Address 1156 1157 // now start the app using the handshake - it should sync 1158 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 1159 handshaker := NewHandshaker(stateDB, state, store, genDoc) 1160 proxyApp := proxy.NewAppConns(clientCreator) 1161 if err := proxyApp.Start(); err != nil { 1162 t.Fatalf("Error starting proxy app connections: %v", err) 1163 } 1164 defer proxyApp.Stop() 1165 if err := handshaker.Handshake(proxyApp, nil); err != nil { 1166 t.Fatalf("Error on abci handshake: %v", err) 1167 } 1168 1169 // reload the state, check the validator set was updated 1170 state = sm.LoadState(stateDB) 1171 1172 newValAddr := state.Validators.Validators[0].Address 1173 expectValAddr := val.Address 1174 assert.NotEqual(t, oldValAddr, newValAddr) 1175 assert.Equal(t, newValAddr, expectValAddr) 1176 } 1177 1178 // returns the vals on InitChain 1179 type initChainApp struct { 1180 abci.BaseApplication 1181 vals []abci.ValidatorUpdate 1182 } 1183 1184 func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { 1185 return abci.ResponseInitChain{ 1186 Validators: ica.vals, 1187 } 1188 }