github.com/okex/exchain@v1.8.0/libs/tendermint/consensus/replay_test.go (about) 1 package consensus 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path/filepath" 11 "runtime" 12 "testing" 13 "time" 14 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 18 "sort" 19 20 dbm "github.com/okex/exchain/libs/tm-db" 21 22 "github.com/okex/exchain/libs/tendermint/abci/example/kvstore" 23 abci "github.com/okex/exchain/libs/tendermint/abci/types" 24 cfg "github.com/okex/exchain/libs/tendermint/config" 25 "github.com/okex/exchain/libs/tendermint/crypto" 26 "github.com/okex/exchain/libs/tendermint/libs/log" 27 tmrand "github.com/okex/exchain/libs/tendermint/libs/rand" 28 mempl "github.com/okex/exchain/libs/tendermint/mempool" 29 "github.com/okex/exchain/libs/tendermint/mock" 30 "github.com/okex/exchain/libs/tendermint/privval" 31 "github.com/okex/exchain/libs/tendermint/proxy" 32 sm "github.com/okex/exchain/libs/tendermint/state" 33 "github.com/okex/exchain/libs/tendermint/types" 34 "github.com/okex/exchain/libs/tendermint/version" 35 ) 36 37 func TestMain(m *testing.M) { 38 config = ResetConfig("consensus_reactor_test") 39 consensusReplayConfig = ResetConfig("consensus_replay_test") 40 configStateTest := ResetConfig("consensus_state_test") 41 configMempoolTest := ResetConfig("consensus_mempool_test") 42 configByzantineTest := ResetConfig("consensus_byzantine_test") 43 code := m.Run() 44 os.RemoveAll(config.RootDir) 45 os.RemoveAll(consensusReplayConfig.RootDir) 46 os.RemoveAll(configStateTest.RootDir) 47 os.RemoveAll(configMempoolTest.RootDir) 48 os.RemoveAll(configByzantineTest.RootDir) 49 os.Exit(code) 50 } 51 52 // These tests ensure we can always recover from failure at any part of the consensus process. 53 // There are two general failure scenarios: failure during consensus, and failure while applying the block. 54 // Only the latter interacts with the app and store, 55 // but the former has to deal with restrictions on re-use of priv_validator keys. 56 // The `WAL Tests` are for failures during the consensus; 57 // the `Handshake Tests` are for failures in applying the block. 58 // With the help of the WAL, we can recover from it all! 59 60 //------------------------------------------------------------------------------------------ 61 // WAL Tests 62 63 // TODO: It would be better to verify explicitly which states we can recover from without the wal 64 // and which ones we need the wal for - then we'd also be able to only flush the 65 // wal writer when we need to, instead of with every message. 66 67 func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, 68 lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { 69 logger := log.TestingLogger() 70 state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) 71 privValidator := loadPrivValidator(consensusReplayConfig) 72 cs := newStateWithConfigAndBlockStore( 73 consensusReplayConfig, 74 state, 75 privValidator, 76 kvstore.NewApplication(), 77 blockDB, 78 ) 79 cs.SetLogger(logger) 80 81 bytes, _ := ioutil.ReadFile(cs.config.WalFile()) 82 t.Logf("====== WAL: \n\r%X\n", bytes) 83 84 err := cs.Start() 85 require.NoError(t, err) 86 defer cs.Stop() 87 88 // This is just a signal that we haven't halted; its not something contained 89 // in the WAL itself. Assuming the consensus state is running, replay of any 90 // WAL, including the empty one, should eventually be followed by a new 91 // block, or else something is wrong. 92 newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 93 require.NoError(t, err) 94 select { 95 case <-newBlockSub.Out(): 96 case <-newBlockSub.Cancelled(): 97 t.Fatal("newBlockSub was cancelled") 98 case <-time.After(120 * time.Second): 99 t.Fatal("Timed out waiting for new block (see trace above)") 100 } 101 } 102 103 func sendTxs(ctx context.Context, cs *State) { 104 for i := 0; i < 256; i++ { 105 select { 106 case <-ctx.Done(): 107 return 108 default: 109 tx := []byte{byte(i)} 110 assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) 111 i++ 112 } 113 } 114 } 115 116 // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. 117 func TestWALCrash(t *testing.T) { 118 testCases := []struct { 119 name string 120 initFn func(dbm.DB, *State, context.Context) 121 heightToStop int64 122 }{ 123 {"empty block", 124 func(stateDB dbm.DB, cs *State, ctx context.Context) {}, 125 1}, 126 {"many non-empty blocks", 127 func(stateDB dbm.DB, cs *State, ctx context.Context) { 128 go sendTxs(ctx, cs) 129 }, 130 3}, 131 } 132 133 for i, tc := range testCases { 134 tc := tc 135 consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) 136 t.Run(tc.name, func(t *testing.T) { 137 crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) 138 }) 139 } 140 } 141 142 func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, 143 initFn func(dbm.DB, *State, context.Context), heightToStop int64) { 144 walPanicked := make(chan error) 145 crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} 146 147 i := 1 148 LOOP: 149 for { 150 t.Logf("====== LOOP %d\n", i) 151 152 // create consensus state from a clean slate 153 logger := log.NewNopLogger() 154 blockDB := dbm.NewMemDB() 155 stateDB := blockDB 156 state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) 157 privValidator := loadPrivValidator(consensusReplayConfig) 158 cs := newStateWithConfigAndBlockStore( 159 consensusReplayConfig, 160 state, 161 privValidator, 162 kvstore.NewApplication(), 163 blockDB, 164 ) 165 cs.SetLogger(logger) 166 167 // start sending transactions 168 ctx, cancel := context.WithCancel(context.Background()) 169 initFn(stateDB, cs, ctx) 170 171 // clean up WAL file from the previous iteration 172 walFile := cs.config.WalFile() 173 os.Remove(walFile) 174 175 // set crashing WAL 176 csWal, err := cs.OpenWAL(walFile) 177 require.NoError(t, err) 178 crashingWal.next = csWal 179 // reset the message counter 180 crashingWal.msgIndex = 1 181 cs.wal = crashingWal 182 183 // start consensus state 184 err = cs.Start() 185 require.NoError(t, err) 186 187 i++ 188 189 select { 190 case err := <-walPanicked: 191 t.Logf("WAL panicked: %v", err) 192 193 // make sure we can make blocks after a crash 194 startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB) 195 196 // stop consensus state and transactions sender (initFn) 197 cs.Stop() 198 cancel() 199 200 // if we reached the required height, exit 201 if _, ok := err.(ReachedHeightToStopError); ok { 202 break LOOP 203 } 204 case <-time.After(10 * time.Second): 205 t.Fatal("WAL did not panic for 10 seconds (check the log)") 206 } 207 } 208 } 209 210 // crashingWAL is a WAL which crashes or rather simulates a crash during Save 211 // (before and after). It remembers a message for which we last panicked 212 // (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations. 213 type crashingWAL struct { 214 next WAL 215 panicCh chan error 216 heightToStop int64 217 218 msgIndex int // current message index 219 lastPanickedForMsgIndex int // last message for which we panicked 220 } 221 222 var _ WAL = &crashingWAL{} 223 224 // WALWriteError indicates a WAL crash. 225 type WALWriteError struct { 226 msg string 227 } 228 229 func (e WALWriteError) Error() string { 230 return e.msg 231 } 232 233 // ReachedHeightToStopError indicates we've reached the required consensus 234 // height and may exit. 235 type ReachedHeightToStopError struct { 236 height int64 237 } 238 239 func (e ReachedHeightToStopError) Error() string { 240 return fmt.Sprintf("reached height to stop %d", e.height) 241 } 242 243 // Write simulate WAL's crashing by sending an error to the panicCh and then 244 // exiting the cs.receiveRoutine. 245 func (w *crashingWAL) Write(m WALMessage) error { 246 if endMsg, ok := m.(EndHeightMessage); ok { 247 if endMsg.Height == w.heightToStop { 248 w.panicCh <- ReachedHeightToStopError{endMsg.Height} 249 runtime.Goexit() 250 return nil 251 } 252 253 return w.next.Write(m) 254 } 255 256 if w.msgIndex > w.lastPanickedForMsgIndex { 257 w.lastPanickedForMsgIndex = w.msgIndex 258 _, file, line, _ := runtime.Caller(1) 259 w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} 260 runtime.Goexit() 261 return nil 262 } 263 264 w.msgIndex++ 265 return w.next.Write(m) 266 } 267 268 func (w *crashingWAL) WriteSync(m WALMessage) error { 269 return w.Write(m) 270 } 271 272 // Add Reset noop function to implement interface Reset function 273 // need to implement if ut need 274 func (w *crashingWAL) Reset() error { return w.next.Stop() } 275 276 func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } 277 278 func (w *crashingWAL) SearchForEndHeight( 279 height int64, 280 options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { 281 return w.next.SearchForEndHeight(height, options) 282 } 283 284 func (w *crashingWAL) Start() error { return w.next.Start() } 285 func (w *crashingWAL) Stop() error { return w.next.Stop() } 286 func (w *crashingWAL) Wait() { w.next.Wait() } 287 288 //------------------------------------------------------------------------------------------ 289 type testSim struct { 290 GenesisState sm.State 291 Config *cfg.Config 292 Chain []*types.Block 293 Commits []*types.Commit 294 CleanupFunc cleanupFunc 295 } 296 297 const ( 298 numBlocks = 6 299 ) 300 301 var ( 302 mempool = mock.Mempool{} 303 evpool = sm.MockEvidencePool{} 304 305 sim testSim 306 ) 307 308 //--------------------------------------- 309 // Test handshake/replay 310 311 // 0 - all synced up 312 // 1 - saved block but app and state are behind 313 // 2 - save block and committed but state is behind 314 // 3 - save block and committed with truncated block store and state behind 315 var modes = []uint{0, 1, 2, 3} 316 317 // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay 318 func TestSimulateValidatorsChange(t *testing.T) { 319 nPeers := 7 320 nVals := 4 321 css, genDoc, config, cleanup := randConsensusNetWithPeers( 322 nVals, 323 nPeers, 324 "replay_test", 325 newMockTickerFunc(true), 326 newPersistentKVStoreWithPath) 327 sim.Config = config 328 sim.GenesisState, _ = sm.MakeGenesisState(genDoc) 329 sim.CleanupFunc = cleanup 330 331 partSize := types.BlockPartSizeBytes 332 333 newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) 334 proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) 335 336 vss := make([]*validatorStub, nPeers) 337 for i := 0; i < nPeers; i++ { 338 vss[i] = newValidatorStub(css[i].privValidator, i) 339 } 340 height, round := css[0].Height, css[0].Round 341 // start the machine 342 startTestRound(css[0], height, round) 343 incrementHeight(vss...) 344 ensureNewRound(newRoundCh, height, 0) 345 ensureNewProposal(proposalCh, height, round) 346 rs := css[0].GetRoundState() 347 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 348 ensureNewRound(newRoundCh, height+1, 0) 349 350 //height 2 351 height++ 352 incrementHeight(vss...) 353 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 354 require.NoError(t, err) 355 valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) 356 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 357 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) 358 assert.Nil(t, err) 359 propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 360 propBlockParts := propBlock.MakePartSet(partSize) 361 blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 362 proposal := types.NewProposal(vss[1].Height, round, -1, blockID) 363 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 364 t.Fatal("failed to sign bad proposal", err) 365 } 366 367 // set the proposal block 368 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 369 t.Fatal(err) 370 } 371 ensureNewProposal(proposalCh, height, round) 372 rs = css[0].GetRoundState() 373 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 374 ensureNewRound(newRoundCh, height+1, 0) 375 376 //height 3 377 height++ 378 incrementHeight(vss...) 379 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 380 require.NoError(t, err) 381 updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) 382 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 383 err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) 384 assert.Nil(t, err) 385 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 386 propBlockParts = propBlock.MakePartSet(partSize) 387 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 388 proposal = types.NewProposal(vss[2].Height, round, -1, blockID) 389 if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { 390 t.Fatal("failed to sign bad proposal", err) 391 } 392 393 // set the proposal block 394 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 395 t.Fatal(err) 396 } 397 ensureNewProposal(proposalCh, height, round) 398 rs = css[0].GetRoundState() 399 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) 400 ensureNewRound(newRoundCh, height+1, 0) 401 402 //height 4 403 height++ 404 incrementHeight(vss...) 405 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 406 require.NoError(t, err) 407 newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) 408 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 409 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) 410 assert.Nil(t, err) 411 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 412 require.NoError(t, err) 413 newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) 414 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 415 err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{}) 416 assert.Nil(t, err) 417 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 418 propBlockParts = propBlock.MakePartSet(partSize) 419 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 420 newVss := make([]*validatorStub, nVals+1) 421 copy(newVss, vss[:nVals+1]) 422 sort.Sort(ValidatorStubsByAddress(newVss)) 423 selfIndex := 0 424 for i, vs := range newVss { 425 vsPubKey, err := vs.GetPubKey() 426 require.NoError(t, err) 427 428 css0PubKey, err := css[0].privValidator.GetPubKey() 429 require.NoError(t, err) 430 431 if vsPubKey.Equals(css0PubKey) { 432 selfIndex = i 433 break 434 } 435 } 436 437 proposal = types.NewProposal(vss[3].Height, round, -1, blockID) 438 if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { 439 t.Fatal("failed to sign bad proposal", err) 440 } 441 442 // set the proposal block 443 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 444 t.Fatal(err) 445 } 446 ensureNewProposal(proposalCh, height, round) 447 448 removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) 449 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil, mempl.TxInfo{}) 450 assert.Nil(t, err) 451 452 rs = css[0].GetRoundState() 453 for i := 0; i < nVals+1; i++ { 454 if i == selfIndex { 455 continue 456 } 457 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 458 } 459 460 ensureNewRound(newRoundCh, height+1, 0) 461 462 //height 5 463 height++ 464 incrementHeight(vss...) 465 ensureNewProposal(proposalCh, height, round) 466 rs = css[0].GetRoundState() 467 for i := 0; i < nVals+1; i++ { 468 if i == selfIndex { 469 continue 470 } 471 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 472 } 473 ensureNewRound(newRoundCh, height+1, 0) 474 475 //height 6 476 height++ 477 incrementHeight(vss...) 478 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 479 err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{}) 480 assert.Nil(t, err) 481 propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) 482 propBlockParts = propBlock.MakePartSet(partSize) 483 blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} 484 newVss = make([]*validatorStub, nVals+3) 485 copy(newVss, vss[:nVals+3]) 486 sort.Sort(ValidatorStubsByAddress(newVss)) 487 for i, vs := range newVss { 488 vsKeyKey, err := vs.GetPubKey() 489 require.NoError(t, err) 490 491 css0PubKey, err := css[0].privValidator.GetPubKey() 492 require.NoError(t, err) 493 494 if vsKeyKey.Equals(css0PubKey) { 495 selfIndex = i 496 break 497 } 498 } 499 proposal = types.NewProposal(vss[1].Height, round, -1, blockID) 500 if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { 501 t.Fatal("failed to sign bad proposal", err) 502 } 503 504 // set the proposal block 505 if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 506 t.Fatal(err) 507 } 508 ensureNewProposal(proposalCh, height, round) 509 rs = css[0].GetRoundState() 510 for i := 0; i < nVals+3; i++ { 511 if i == selfIndex { 512 continue 513 } 514 signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) 515 } 516 ensureNewRound(newRoundCh, height+1, 0) 517 518 sim.Chain = make([]*types.Block, 0) 519 sim.Commits = make([]*types.Commit, 0) 520 for i := 1; i <= numBlocks; i++ { 521 sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) 522 sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) 523 } 524 } 525 526 // Sync from scratch 527 func TestHandshakeReplayAll(t *testing.T) { 528 for _, m := range modes { 529 testHandshakeReplay(t, config, 0, m, false) 530 } 531 for _, m := range modes { 532 testHandshakeReplay(t, config, 0, m, true) 533 } 534 } 535 536 // Sync many, not from scratch 537 func TestHandshakeReplaySome(t *testing.T) { 538 for _, m := range modes { 539 testHandshakeReplay(t, config, 2, m, false) 540 } 541 for _, m := range modes { 542 testHandshakeReplay(t, config, 2, m, true) 543 } 544 } 545 546 // Sync from lagging by one 547 func TestHandshakeReplayOne(t *testing.T) { 548 for _, m := range modes { 549 testHandshakeReplay(t, config, numBlocks-1, m, false) 550 } 551 for _, m := range modes { 552 testHandshakeReplay(t, config, numBlocks-1, m, true) 553 } 554 } 555 556 // Sync from caught up 557 func TestHandshakeReplayNone(t *testing.T) { 558 for _, m := range modes { 559 testHandshakeReplay(t, config, numBlocks, m, false) 560 } 561 for _, m := range modes { 562 testHandshakeReplay(t, config, numBlocks, m, true) 563 } 564 } 565 566 // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx 567 func TestMockProxyApp(t *testing.T) { 568 return 569 sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange 570 logger := log.TestingLogger() 571 var validTxs, invalidTxs = 0, 0 572 txIndex := 0 573 574 assert.NotPanics(t, func() { 575 abciResWithEmptyDeliverTx := new(sm.ABCIResponses) 576 abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0) 577 abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{}) 578 579 // called when saveABCIResponses: 580 bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx) 581 loadedAbciRes := new(sm.ABCIResponses) 582 583 // this also happens sm.LoadABCIResponses 584 err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes) 585 require.NoError(t, err) 586 587 mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) 588 589 abciRes := new(sm.ABCIResponses) 590 abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) 591 // Execute transactions and get hash. 592 proxyCb := func(req *abci.Request, res *abci.Response) { 593 if r, ok := res.Value.(*abci.Response_DeliverTx); ok { 594 // TODO: make use of res.Log 595 // TODO: make use of this info 596 // Blocks may include invalid txs. 597 txRes := r.DeliverTx 598 if txRes.Code == abci.CodeTypeOK { 599 validTxs++ 600 } else { 601 logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) 602 invalidTxs++ 603 } 604 abciRes.DeliverTxs[txIndex] = txRes 605 txIndex++ 606 } 607 } 608 mock.SetResponseCallback(proxyCb) 609 610 someTx := []byte("tx") 611 mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}) 612 }) 613 assert.True(t, validTxs == 1) 614 assert.True(t, invalidTxs == 0) 615 } 616 617 func tempWALWithData(data []byte) string { 618 walFile, err := ioutil.TempFile("", "wal") 619 if err != nil { 620 panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) 621 } 622 _, err = walFile.Write(data) 623 if err != nil { 624 panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) 625 } 626 if err := walFile.Close(); err != nil { 627 panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) 628 } 629 return walFile.Name() 630 } 631 632 // Make some blocks. Start a fresh app and apply nBlocks blocks. 633 // Then restart the app and sync it up with the remaining blocks 634 func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { 635 var chain []*types.Block 636 var commits []*types.Commit 637 var store *mockBlockStore 638 var stateDB dbm.DB 639 var genisisState sm.State 640 if testValidatorsChange { 641 testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) 642 defer os.RemoveAll(testConfig.RootDir) 643 stateDB = dbm.NewMemDB() 644 genisisState = sim.GenesisState 645 config = sim.Config 646 chain = append([]*types.Block{}, sim.Chain...) // copy chain 647 commits = sim.Commits 648 store = newMockBlockStore(config, genisisState.ConsensusParams) 649 } else { //test single node 650 testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) 651 defer os.RemoveAll(testConfig.RootDir) 652 walBody, err := WALWithNBlocks(t, numBlocks) 653 require.NoError(t, err) 654 walFile := tempWALWithData(walBody) 655 config.Consensus.SetWalFile(walFile) 656 657 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 658 659 wal, err := NewWAL(walFile) 660 require.NoError(t, err) 661 wal.SetLogger(log.TestingLogger()) 662 err = wal.Start() 663 require.NoError(t, err) 664 defer wal.Stop() 665 666 chain, commits, err = makeBlockchainFromWAL(wal) 667 require.NoError(t, err) 668 pubKey, err := privVal.GetPubKey() 669 require.NoError(t, err) 670 stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) 671 } 672 store.chain = chain 673 store.commits = commits 674 675 state := genisisState.Copy() 676 // run the chain through state.ApplyBlock to build up the tendermint state 677 state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) 678 latestAppHash := state.AppHash 679 680 // make a new client creator 681 kvstoreApp := kvstore.NewPersistentKVStoreApplication( 682 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) 683 clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) 684 if nBlocks > 0 { 685 // run nBlocks against a new client to build up the app state. 686 // use a throwaway tendermint state 687 proxyApp := proxy.NewAppConns(clientCreator2) 688 stateDB1 := dbm.NewMemDB() 689 sm.SaveState(stateDB1, genisisState) 690 buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) 691 } 692 693 // Prune block store if requested 694 expectError := false 695 if mode == 3 { 696 pruned, err := store.PruneBlocks(2) 697 require.NoError(t, err) 698 require.EqualValues(t, 1, pruned) 699 expectError = int64(nBlocks) < 2 700 } 701 702 // now start the app using the handshake - it should sync 703 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 704 handshaker := NewHandshaker(stateDB, state, store, genDoc) 705 proxyApp := proxy.NewAppConns(clientCreator2) 706 if err := proxyApp.Start(); err != nil { 707 t.Fatalf("Error starting proxy app connections: %v", err) 708 } 709 defer proxyApp.Stop() 710 err := handshaker.Handshake(proxyApp) 711 if expectError { 712 require.Error(t, err) 713 return 714 } else if err != nil { 715 t.Fatalf("Error on abci handshake: %v", err) 716 } 717 718 // get the latest app hash from the app 719 res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) 720 if err != nil { 721 t.Fatal(err) 722 } 723 724 // the app hash should be synced up 725 if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { 726 t.Fatalf( 727 "Expected app hashes to match after handshake/replay. got %X, expected %X", 728 res.LastBlockAppHash, 729 latestAppHash) 730 } 731 732 expectedBlocksToSync := numBlocks - nBlocks 733 if nBlocks == numBlocks && mode > 0 { 734 expectedBlocksToSync++ 735 } else if nBlocks > 0 && mode == 1 { 736 expectedBlocksToSync++ 737 } 738 739 if handshaker.NBlocks() != expectedBlocksToSync { 740 t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) 741 } 742 } 743 744 func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { 745 testPartSize := types.BlockPartSizeBytes 746 blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) 747 748 blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} 749 newState, _, err := blockExec.ApplyBlock(st, blkID, blk) 750 if err != nil { 751 panic(err) 752 } 753 return newState 754 } 755 756 func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, 757 state sm.State, chain []*types.Block, nBlocks int, mode uint) { 758 // start a new app without handshake, play nBlocks blocks 759 if err := proxyApp.Start(); err != nil { 760 panic(err) 761 } 762 defer proxyApp.Stop() 763 764 state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version 765 validators := types.TM2PB.ValidatorUpdates(state.Validators) 766 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 767 Validators: validators, 768 }); err != nil { 769 panic(err) 770 } 771 sm.SaveState(stateDB, state) //save height 1's validatorsInfo 772 773 switch mode { 774 case 0: 775 for i := 0; i < nBlocks; i++ { 776 block := chain[i] 777 state = applyBlock(stateDB, state, block, proxyApp) 778 } 779 case 1, 2, 3: 780 for i := 0; i < nBlocks-1; i++ { 781 block := chain[i] 782 state = applyBlock(stateDB, state, block, proxyApp) 783 } 784 785 if mode == 2 || mode == 3 { 786 // update the kvstore height and apphash 787 // as if we ran commit but not 788 state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) 789 } 790 default: 791 panic(fmt.Sprintf("unknown mode %v", mode)) 792 } 793 794 } 795 796 func buildTMStateFromChain( 797 config *cfg.Config, 798 stateDB dbm.DB, 799 state sm.State, 800 chain []*types.Block, 801 nBlocks int, 802 mode uint) sm.State { 803 // run the whole chain against this client to build up the tendermint state 804 clientCreator := proxy.NewLocalClientCreator( 805 kvstore.NewPersistentKVStoreApplication( 806 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))) 807 proxyApp := proxy.NewAppConns(clientCreator) 808 if err := proxyApp.Start(); err != nil { 809 panic(err) 810 } 811 defer proxyApp.Stop() 812 813 state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version 814 validators := types.TM2PB.ValidatorUpdates(state.Validators) 815 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 816 Validators: validators, 817 }); err != nil { 818 panic(err) 819 } 820 sm.SaveState(stateDB, state) //save height 1's validatorsInfo 821 822 switch mode { 823 case 0: 824 // sync right up 825 for _, block := range chain { 826 state = applyBlock(stateDB, state, block, proxyApp) 827 } 828 829 case 1, 2, 3: 830 // sync up to the penultimate as if we stored the block. 831 // whether we commit or not depends on the appHash 832 for _, block := range chain[:len(chain)-1] { 833 state = applyBlock(stateDB, state, block, proxyApp) 834 } 835 836 // apply the final block to a state copy so we can 837 // get the right next appHash but keep the state back 838 applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) 839 default: 840 panic(fmt.Sprintf("unknown mode %v", mode)) 841 } 842 843 return state 844 } 845 846 func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { 847 // 1. Initialize tendermint and commit 3 blocks with the following app hashes: 848 // - 0x01 849 // - 0x02 850 // - 0x03 851 config := ResetConfig("handshake_test_") 852 defer os.RemoveAll(config.RootDir) 853 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 854 const appVersion = 0x0 855 pubKey, err := privVal.GetPubKey() 856 require.NoError(t, err) 857 stateDB, state, store := stateAndStore(config, pubKey, appVersion) 858 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 859 state.LastValidators = state.Validators.Copy() 860 // mode = 0 for committing all the blocks 861 blocks := makeBlocks(3, &state, privVal) 862 store.chain = blocks 863 864 // 2. Tendermint must panic if app returns wrong hash for the first block 865 // - RANDOM HASH 866 // - 0x02 867 // - 0x03 868 { 869 app := &badApp{numBlocks: 3, allHashesAreWrong: true} 870 clientCreator := proxy.NewLocalClientCreator(app) 871 proxyApp := proxy.NewAppConns(clientCreator) 872 err := proxyApp.Start() 873 require.NoError(t, err) 874 defer proxyApp.Stop() 875 876 assert.Panics(t, func() { 877 h := NewHandshaker(stateDB, state, store, genDoc) 878 h.Handshake(proxyApp) 879 }) 880 } 881 882 // 3. Tendermint must panic if app returns wrong hash for the last block 883 // - 0x01 884 // - 0x02 885 // - RANDOM HASH 886 { 887 app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} 888 clientCreator := proxy.NewLocalClientCreator(app) 889 proxyApp := proxy.NewAppConns(clientCreator) 890 err := proxyApp.Start() 891 require.NoError(t, err) 892 defer proxyApp.Stop() 893 894 assert.Panics(t, func() { 895 h := NewHandshaker(stateDB, state, store, genDoc) 896 h.Handshake(proxyApp) 897 }) 898 } 899 } 900 901 func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { 902 blocks := make([]*types.Block, 0) 903 904 var ( 905 prevBlock *types.Block 906 prevBlockMeta *types.BlockMeta 907 ) 908 909 appHeight := byte(0x01) 910 for i := 0; i < n; i++ { 911 height := int64(i + 1) 912 913 block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) 914 blocks = append(blocks, block) 915 916 prevBlock = block 917 prevBlockMeta = types.NewBlockMeta(block, parts) 918 919 // update state 920 state.AppHash = []byte{appHeight} 921 appHeight++ 922 state.LastBlockHeight = height 923 } 924 925 return blocks 926 } 927 928 func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, 929 privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { 930 931 lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) 932 if height > 1 { 933 vote, _ := types.MakeVote( 934 lastBlock.Header.Height, 935 lastBlockMeta.BlockID, 936 state.Validators, 937 privVal, 938 lastBlock.Header.ChainID, 939 time.Now()) 940 lastCommit = types.NewCommit(vote.Height, vote.Round, 941 lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) 942 } 943 944 return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) 945 } 946 947 type badApp struct { 948 abci.BaseApplication 949 numBlocks byte 950 height byte 951 allHashesAreWrong bool 952 onlyLastHashIsWrong bool 953 } 954 955 func (app *badApp) Commit(rc abci.RequestCommit) abci.ResponseCommit { 956 app.height++ 957 if app.onlyLastHashIsWrong { 958 if app.height == app.numBlocks { 959 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 960 } 961 return abci.ResponseCommit{Data: []byte{app.height}} 962 } else if app.allHashesAreWrong { 963 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 964 } 965 966 panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") 967 } 968 969 //-------------------------- 970 // utils for making blocks 971 972 func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { 973 var height int64 974 975 // Search for height marker 976 gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) 977 if err != nil { 978 return nil, nil, err 979 } 980 if !found { 981 return nil, nil, fmt.Errorf("wal does not contain height %d", height) 982 } 983 defer gr.Close() // nolint: errcheck 984 985 // log.Notice("Build a blockchain by reading from the WAL") 986 987 var ( 988 blocks []*types.Block 989 commits []*types.Commit 990 thisBlockParts *types.PartSet 991 thisBlockCommit *types.Commit 992 ) 993 994 dec := NewWALDecoder(gr) 995 for { 996 msg, err := dec.Decode() 997 if err == io.EOF { 998 break 999 } else if err != nil { 1000 return nil, nil, err 1001 } 1002 1003 piece := readPieceFromWAL(msg) 1004 if piece == nil { 1005 continue 1006 } 1007 1008 switch p := piece.(type) { 1009 case EndHeightMessage: 1010 // if its not the first one, we have a full block 1011 if thisBlockParts != nil { 1012 var block = new(types.Block) 1013 _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) 1014 if err != nil { 1015 panic(err) 1016 } 1017 if block.Height != height+1 { 1018 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1019 } 1020 commitHeight := thisBlockCommit.Height 1021 if commitHeight != height+1 { 1022 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1023 } 1024 blocks = append(blocks, block) 1025 commits = append(commits, thisBlockCommit) 1026 height++ 1027 } 1028 case *types.PartSetHeader: 1029 thisBlockParts = types.NewPartSetFromHeader(*p) 1030 case *types.Part: 1031 _, err := thisBlockParts.AddPart(p) 1032 if err != nil { 1033 return nil, nil, err 1034 } 1035 case *types.Vote: 1036 if p.Type == types.PrecommitType { 1037 thisBlockCommit = types.NewCommit(p.Height, p.Round, 1038 p.BlockID, []types.CommitSig{p.CommitSig()}) 1039 } 1040 } 1041 } 1042 // grab the last block too 1043 var block = new(types.Block) 1044 _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) 1045 if err != nil { 1046 panic(err) 1047 } 1048 if block.Height != height+1 { 1049 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1050 } 1051 commitHeight := thisBlockCommit.Height 1052 if commitHeight != height+1 { 1053 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1054 } 1055 blocks = append(blocks, block) 1056 commits = append(commits, thisBlockCommit) 1057 return blocks, commits, nil 1058 } 1059 1060 func readPieceFromWAL(msg *TimedWALMessage) interface{} { 1061 // for logging 1062 switch m := msg.Msg.(type) { 1063 case msgInfo: 1064 switch msg := m.Msg.(type) { 1065 case *ProposalMessage: 1066 return &msg.Proposal.BlockID.PartsHeader 1067 case *BlockPartMessage: 1068 return msg.Part 1069 case *VoteMessage: 1070 return msg.Vote 1071 } 1072 case EndHeightMessage: 1073 return m 1074 } 1075 1076 return nil 1077 } 1078 1079 // fresh state and mock store 1080 func stateAndStore( 1081 config *cfg.Config, 1082 pubKey crypto.PubKey, 1083 appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) { 1084 stateDB := dbm.NewMemDB() 1085 state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) 1086 state.Version.Consensus.App = appVersion 1087 store := newMockBlockStore(config, state.ConsensusParams) 1088 sm.SaveState(stateDB, state) 1089 return stateDB, state, store 1090 } 1091 1092 //---------------------------------- 1093 // mock block store 1094 1095 type mockBlockStore struct { 1096 config *cfg.Config 1097 params types.ConsensusParams 1098 chain []*types.Block 1099 commits []*types.Commit 1100 base int64 1101 } 1102 1103 // TODO: NewBlockStore(db.NewMemDB) ... 1104 func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { 1105 return &mockBlockStore{config, params, nil, nil, 0} 1106 } 1107 1108 func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } 1109 func (bs *mockBlockStore) Base() int64 { return bs.base } 1110 func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } 1111 func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } 1112 func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { 1113 return bs.chain[int64(len(bs.chain))-1] 1114 } 1115 func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { 1116 block := bs.chain[height-1] 1117 return &types.BlockMeta{ 1118 BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, 1119 Header: block.Header, 1120 } 1121 } 1122 func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } 1123 func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { 1124 } 1125 func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { 1126 return bs.commits[height-1] 1127 } 1128 func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { 1129 return bs.commits[height-1] 1130 } 1131 1132 func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { 1133 pruned := uint64(0) 1134 for i := int64(0); i < height-1; i++ { 1135 bs.chain[i] = nil 1136 bs.commits[i] = nil 1137 pruned++ 1138 } 1139 bs.base = height 1140 return pruned, nil 1141 } 1142 1143 // DeleteBlocksFromTop removes block down to (but not including) a height. It returns number of blocks deleted. 1144 func (bs *mockBlockStore) DeleteBlocksFromTop(height int64) (uint64, error) { 1145 deleted := uint64(0) 1146 top := bs.Height() 1147 for i := top; i > height; i-- { 1148 bs.chain[i] = nil 1149 bs.commits[i] = nil 1150 deleted++ 1151 } 1152 return deleted, nil 1153 } 1154 1155 //--------------------------------------- 1156 // Test handshake/init chain 1157 1158 func TestHandshakeUpdatesValidators(t *testing.T) { 1159 val, _ := types.RandValidator(true, 10) 1160 vals := types.NewValidatorSet([]*types.Validator{val}) 1161 app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} 1162 clientCreator := proxy.NewLocalClientCreator(app) 1163 1164 config := ResetConfig("handshake_test_") 1165 defer os.RemoveAll(config.RootDir) 1166 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 1167 pubKey, err := privVal.GetPubKey() 1168 require.NoError(t, err) 1169 stateDB, state, store := stateAndStore(config, pubKey, 0x0) 1170 1171 oldValAddr := state.Validators.Validators[0].Address 1172 1173 // now start the app using the handshake - it should sync 1174 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 1175 handshaker := NewHandshaker(stateDB, state, store, genDoc) 1176 proxyApp := proxy.NewAppConns(clientCreator) 1177 if err := proxyApp.Start(); err != nil { 1178 t.Fatalf("Error starting proxy app connections: %v", err) 1179 } 1180 defer proxyApp.Stop() 1181 if err := handshaker.Handshake(proxyApp); err != nil { 1182 t.Fatalf("Error on abci handshake: %v", err) 1183 } 1184 1185 // reload the state, check the validator set was updated 1186 state = sm.LoadState(stateDB) 1187 1188 newValAddr := state.Validators.Validators[0].Address 1189 expectValAddr := val.Address 1190 assert.NotEqual(t, oldValAddr, newValAddr) 1191 assert.Equal(t, newValAddr, expectValAddr) 1192 } 1193 1194 // returns the vals on InitChain 1195 type initChainApp struct { 1196 abci.BaseApplication 1197 vals []abci.ValidatorUpdate 1198 } 1199 1200 func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { 1201 return abci.ResponseInitChain{ 1202 Validators: ica.vals, 1203 } 1204 }