github.com/Finschia/ostracon@v1.1.5/consensus/replay_test.go (about) 1 package consensus 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io" 8 "os" 9 "path/filepath" 10 "runtime" 11 "sort" 12 "testing" 13 "time" 14 15 "github.com/gogo/protobuf/proto" 16 "github.com/stretchr/testify/assert" 17 "github.com/stretchr/testify/require" 18 abci "github.com/tendermint/tendermint/abci/types" 19 tmstate "github.com/tendermint/tendermint/proto/tendermint/state" 20 tmproto "github.com/tendermint/tendermint/proto/tendermint/types" 21 dbm "github.com/tendermint/tm-db" 22 23 "github.com/Finschia/ostracon/abci/example/kvstore" 24 ocabci "github.com/Finschia/ostracon/abci/types" 25 cfg "github.com/Finschia/ostracon/config" 26 "github.com/Finschia/ostracon/crypto" 27 cryptoenc "github.com/Finschia/ostracon/crypto/encoding" 28 "github.com/Finschia/ostracon/libs/log" 29 tmpubsub "github.com/Finschia/ostracon/libs/pubsub" 30 tmrand "github.com/Finschia/ostracon/libs/rand" 31 mempl "github.com/Finschia/ostracon/mempool" 32 "github.com/Finschia/ostracon/privval" 33 ocproto "github.com/Finschia/ostracon/proto/ostracon/types" 34 "github.com/Finschia/ostracon/proxy" 35 sm "github.com/Finschia/ostracon/state" 36 "github.com/Finschia/ostracon/types" 37 "github.com/Finschia/ostracon/version" 38 ) 39 40 func TestMain(m *testing.M) { 41 config = ResetConfig("consensus_reactor_test") 42 consensusReplayConfig = ResetConfig("consensus_replay_test") 43 configStateTest := ResetConfig("consensus_state_test") 44 configMempoolTest := ResetConfig("consensus_mempool_test") 45 configByzantineTest := ResetConfig("consensus_byzantine_test") 46 code := m.Run() 47 os.RemoveAll(config.RootDir) 48 os.RemoveAll(consensusReplayConfig.RootDir) 49 os.RemoveAll(configStateTest.RootDir) 50 os.RemoveAll(configMempoolTest.RootDir) 51 os.RemoveAll(configByzantineTest.RootDir) 52 os.Exit(code) 53 } 54 55 // These tests ensure we can always recover from failure at any part of the consensus process. 56 // There are two general failure scenarios: failure during consensus, and failure while applying the block. 57 // Only the latter interacts with the app and store, 58 // but the former has to deal with restrictions on re-use of priv_validator keys. 59 // The `WAL Tests` are for failures during the consensus; 60 // the `Handshake Tests` are for failures in applying the block. 61 // With the help of the WAL, we can recover from it all! 62 63 //------------------------------------------------------------------------------------------ 64 // WAL Tests 65 66 // TODO: It would be better to verify explicitly which states we can recover from without the wal 67 // and which ones we need the wal for - then we'd also be able to only flush the 68 // wal writer when we need to, instead of with every message. 69 70 func startNewStateAndWaitForBlock(t *testing.T, i int, consensusReplayConfig *cfg.Config, 71 blockDB dbm.DB, stateStore sm.Store) { 72 logger := log.TestingLogger().With("attr", "make block", "i", i) 73 state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) 74 privValidator := loadPrivValidator(consensusReplayConfig) 75 cs := newStateWithConfigAndBlockStoreWithLoggers( 76 consensusReplayConfig, 77 state, 78 privValidator, 79 kvstore.NewApplication(), 80 blockDB, 81 NewTestLoggers( 82 log.NewNopLogger().With("module", "mempool"), 83 log.NewNopLogger().With("module", "evidence"), 84 logger.With("module", "executor"), 85 logger.With("module", "consensus"), 86 log.NewNopLogger().With("module", "event")), 87 ) 88 89 err := cs.Start() 90 require.NoError(t, err) 91 defer func() { 92 if err := cs.Stop(); err != nil { 93 t.Error(err) 94 } 95 // Wait for closing WAL after writing remains messages to WAL 96 cs.Wait() 97 }() 98 99 // This is just a signal that we haven't halted; its not something contained 100 // in the WAL itself. Assuming the consensus state is running, replay of any 101 // WAL, including the empty one, should eventually be followed by a new 102 // block, or else something is wrong. 103 newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) 104 require.NoError(t, err) 105 select { 106 case msg := <-newBlockSub.Out(): 107 height := msg.Data().(types.EventDataNewBlock).Block.Height 108 t.Logf("Make Block.Height[%d]", height) 109 case <-newBlockSub.Cancelled(): 110 t.Fatal("newBlockSub was cancelled") 111 case <-time.After(10 * time.Second): // XXX 120 second is too much time, so we changed to 10 second 112 t.Fatal("Timed out waiting for new block (see trace above)") 113 } 114 } 115 116 func sendTxs(ctx context.Context, cs *State) { 117 for i := 0; i < 256; i++ { 118 select { 119 case <-ctx.Done(): 120 return 121 default: 122 tx := []byte{byte(i)} 123 if err := assertMempool(cs.txNotifier).CheckTxSync(tx, nil, mempl.TxInfo{}); err != nil { 124 panic(err) 125 } 126 i++ 127 } 128 } 129 } 130 131 // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. 132 func TestWALCrash(t *testing.T) { 133 // TODO The execution result of this test case often fail for indeterminate reasons. 134 // The reason for the fail is a timeout with an "Timed out waiting for new block" or "WAL did not panic for 135 // XX seconds" message, but the behavior that causes it is not reproducible. This issue also occurs in Ostracon, 136 // but seems to be somewhat more pronounced with some changes in Ostracon. 137 // See also: https://github.com/tendermint/tendermint/issues/1040 138 testCases := []struct { 139 name string 140 initFn func(dbm.DB, *State, context.Context) 141 heightToStop int64 142 }{ 143 { 144 "empty block", 145 func(stateDB dbm.DB, cs *State, ctx context.Context) {}, 146 1, 147 }, 148 { 149 "many non-empty blocks", 150 func(stateDB dbm.DB, cs *State, ctx context.Context) { 151 go sendTxs(ctx, cs) 152 }, 153 3, 154 }, 155 } 156 157 for i, tc := range testCases { 158 tc := tc 159 consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) 160 t.Run(tc.name, func(t *testing.T) { 161 crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) 162 }) 163 } 164 } 165 166 func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, 167 initFn func(dbm.DB, *State, context.Context), heightToStop int64, 168 ) { 169 walPanicked := make(chan error) 170 crashingWal := &crashingWAL{t: t, panicCh: walPanicked, heightToStop: heightToStop} 171 172 i := 1 173 LOOP: 174 for { 175 t.Logf("====== LOOP %d\n", i) 176 177 // create consensus state from a clean slate 178 blockDB := dbm.NewMemDB() 179 stateDB := blockDB 180 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 181 DiscardABCIResponses: false, 182 }) 183 state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) 184 require.NoError(t, err) 185 privValidator := loadPrivValidator(consensusReplayConfig) 186 logger := log.TestingLogger().With("attr", "crash wal", "i", i) 187 cs := newStateWithConfigAndBlockStoreWithLoggers( 188 consensusReplayConfig, 189 state, 190 privValidator, 191 kvstore.NewApplication(), 192 blockDB, 193 NewTestLoggers( 194 log.NewNopLogger().With("module", "mempool"), 195 log.NewNopLogger().With("module", "evidence"), 196 logger.With("module", "executor"), 197 logger.With("module", "consensus"), 198 log.NewNopLogger().With("module", "event")), 199 ) 200 201 // start sending transactions 202 ctx, cancel := context.WithCancel(context.Background()) 203 initFn(stateDB, cs, ctx) 204 205 // clean up WAL file from the previous iteration 206 walFile := cs.config.WalFile() 207 os.Remove(walFile) 208 209 // set crashing WAL 210 csWal, err := cs.OpenWAL(walFile) 211 require.NoError(t, err) 212 crashingWal.next = csWal 213 214 // reset the message counter 215 crashingWal.msgIndex = 1 216 cs.wal = crashingWal 217 218 // start consensus state 219 err = cs.Start() 220 require.NoError(t, err) 221 222 select { 223 case err := <-walPanicked: 224 t.Logf("WAL panicked: %v", err) 225 226 // stop consensus state and transactions sender (initFn) 227 cs.Stop() //nolint:errcheck // Logging this error causes failure 228 cancel() 229 // For safety since nobody stops and writing WAL continue sometimes. 230 cs.wal.Stop() //nolint:errcheck 231 232 // make sure we can make blocks after a crash 233 startNewStateAndWaitForBlock(t, i, consensusReplayConfig, blockDB, stateStore) 234 235 // if we reached the required height, exit 236 if _, ok := err.(ReachedHeightToStopError); ok { 237 break LOOP 238 } 239 case <-time.After(10 * time.Second): 240 t.Fatal("WAL did not panic for 10 seconds (check the log)") 241 } 242 243 i++ 244 } 245 } 246 247 // crashingWAL is a WAL which crashes or rather simulates a crash during Save 248 // (before and after). It remembers a message for which we last panicked 249 // (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations. 250 type crashingWAL struct { 251 t *testing.T 252 next WAL 253 panicCh chan error 254 heightToStop int64 255 256 msgIndex int // current message index 257 lastPanickedForMsgIndex int // last message for which we panicked 258 } 259 260 var _ WAL = &crashingWAL{} 261 262 // WALWriteError indicates a WAL crash. 263 type WALWriteError struct { 264 msg string 265 } 266 267 func (e WALWriteError) Error() string { 268 return e.msg 269 } 270 271 // ReachedHeightToStopError indicates we've reached the required consensus 272 // height and may exit. 273 type ReachedHeightToStopError struct { 274 height int64 275 } 276 277 func (e ReachedHeightToStopError) Error() string { 278 return fmt.Sprintf("reached height to stop %d", e.height) 279 } 280 281 // Write simulate WAL's crashing by sending an error to the panicCh and then 282 // exiting the cs.receiveRoutine. 283 func (w *crashingWAL) Write(m WALMessage) error { 284 if endMsg, ok := m.(EndHeightMessage); ok { 285 if endMsg.Height >= w.heightToStop { 286 w.t.Logf("Rearched[%d] WAL messasge[%T], Height[%d]", w.msgIndex, m, endMsg.Height) 287 w.panicCh <- ReachedHeightToStopError{endMsg.Height} 288 runtime.Goexit() 289 return nil 290 } 291 w.t.Logf("Not-Rearched[%d] WAL messasge[%T], Height[%d]", w.msgIndex, m, endMsg.Height) 292 w.msgIndex++ 293 return w.next.Write(m) 294 } 295 296 if mi, ok := m.(msgInfo); ok { 297 if pm, ok := mi.Msg.(*ProposalMessage); ok { 298 w.t.Logf("Skipped[%d] WAL message[%T]:[%T]:[%v]", w.msgIndex, m, mi.Msg, pm.Proposal.Type) 299 } else if vm, ok := mi.Msg.(*VoteMessage); ok { 300 w.t.Logf("Skipped[%d] WAL message[%T]:[%T]:[%v]", w.msgIndex, m, mi.Msg, vm.Vote.Type) 301 } else { 302 w.t.Logf("Skipped[%d] WAL message[%T]:[%T]", w.msgIndex, m, mi.Msg) 303 } 304 } else { 305 w.t.Logf("Skipped[%d] WAL message[%T]", w.msgIndex, m) 306 } 307 308 if w.msgIndex > w.lastPanickedForMsgIndex { 309 w.lastPanickedForMsgIndex = w.msgIndex 310 _, file, line, _ := runtime.Caller(1) 311 w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} 312 runtime.Goexit() 313 return nil 314 } 315 316 w.msgIndex++ 317 return w.next.Write(m) 318 } 319 320 func (w *crashingWAL) WriteSync(m WALMessage) error { 321 return w.Write(m) 322 } 323 324 func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } 325 326 func (w *crashingWAL) SearchForEndHeight( 327 height int64, 328 options *WALSearchOptions, 329 ) (rd io.ReadCloser, found bool, err error) { 330 return w.next.SearchForEndHeight(height, options) 331 } 332 333 func (w *crashingWAL) Start() error { return w.next.Start() } 334 func (w *crashingWAL) Stop() error { return w.next.Stop() } 335 func (w *crashingWAL) Wait() { w.next.Wait() } 336 337 // ------------------------------------------------------------------------------------------ 338 type testSim struct { 339 GenesisState sm.State 340 Config *cfg.Config 341 Chain []*types.Block 342 Commits []*types.Commit 343 CleanupFunc cleanupFunc 344 } 345 346 const ( 347 numBlocks = 6 348 ) 349 350 var ( 351 mempool = emptyMempool{} 352 evpool = sm.EmptyEvidencePool{} 353 354 sim testSim 355 ) 356 357 //--------------------------------------- 358 // Test handshake/replay 359 360 // 0 - all synced up 361 // 1 - saved block but app and state are behind 362 // 2 - save block and committed but state is behind 363 // 3 - save block and committed with truncated block store and state behind 364 // 4 - save block and committed with rollback state and state behind 365 var modes = []uint{0, 1, 2, 3, 4} 366 367 func getProposerIdx(state *State, height int64, round int32) (int32, *types.Validator) { 368 proposer := state.Validators.SelectProposer(state.state.LastProofHash, height, round) 369 return state.Validators.GetByAddress(proposer.PubKey.Address()) 370 } 371 372 func consensusNewBlock(t *testing.T, height int64, vss []*validatorStub, css []*State, 373 newRoundCh, proposalCH <-chan tmpubsub.Message, addTxFn func()) { 374 375 // perform added tx 376 if addTxFn != nil { 377 addTxFn() 378 } 379 380 // state0 is main started machine (css[0]) 381 cs := css[0] 382 csPubKey, err := cs.privValidator.GetPubKey() 383 require.NoError(t, err) 384 proposerIdx, prop := getProposerIdx(cs, height, 0) 385 386 // search idx of proposer in the css 387 proposerIdxOfCSS := 0 388 for i, cs := range css { 389 pubKey, err := cs.privValidator.GetPubKey() 390 require.NoError(t, err) 391 if prop.PubKey.Equals(pubKey) { 392 proposerIdxOfCSS = i 393 break 394 } 395 } 396 397 // make idx of validator in the vss 398 proposerIdxOfVSS := 0 399 valSet := cs.Validators 400 vssIndexOfValidatorList := make([]int, len(valSet.Validators)-1) 401 var idx = 0 402 for i, vs := range vss { 403 vsPubKey, err := vs.GetPubKey() 404 require.NoError(t, err) 405 if vsPubKey.Equals(csPubKey) { 406 continue 407 } 408 index, val := valSet.GetByAddress(vsPubKey.Address()) 409 if index == -1 && val == nil { 410 continue 411 } 412 if index == proposerIdx { 413 proposerIdxOfVSS = i 414 } 415 vs.Index = index // Update validatorStub.Index for signAndVote since ValSet's order is changed 416 vssIndexOfValidatorList[idx] = i 417 idx++ 418 } 419 420 if proposerIdxOfCSS != 0 { 421 proposal, propBlock := decideProposal(cs, vss[proposerIdxOfVSS], height, 0) 422 propBlockParts := propBlock.MakePartSet(types.BlockPartSizeBytes) 423 // set the proposal block 424 if err := cs.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { 425 t.Fatal(err) 426 } 427 } 428 429 ensureNewProposal(proposalCH, height, 0) 430 rs := cs.GetRoundState() 431 for _, valIdx := range vssIndexOfValidatorList { 432 signAddVotes(cs, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[valIdx]) 433 } 434 435 ensureNewRound(newRoundCh, height+1, 0) 436 } 437 438 // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay 439 func TestSimulateValidatorsChange(t *testing.T) { 440 const nPeers = 7 441 const nVals = 4 442 css, genDoc, config, cleanup := randConsensusNetWithPeers( 443 nVals, 444 nPeers, 445 "replay_test", 446 newMockTickerFunc(true), 447 newPersistentKVStoreWithPath) 448 sim.Config = config 449 sim.GenesisState, _ = sm.MakeGenesisState(genDoc) 450 sim.CleanupFunc = cleanup 451 452 newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) 453 proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) 454 455 vss := make([]*validatorStub, nPeers) 456 for i := 0; i < nPeers; i++ { 457 vss[i] = newValidatorStub(css[i].privValidator, int32(i)) 458 } 459 height, round := css[0].Height, css[0].Round 460 461 // start the machine 462 startTestRound(css[0], height, round) 463 incrementHeight(vss...) 464 ensureNewRound(newRoundCh, height, 0) 465 466 // height 1 467 consensusNewBlock(t, height, vss, css, newRoundCh, proposalCh, nil) 468 469 // height 2 470 height++ 471 incrementHeight(vss...) 472 473 // proposal.Signature = p.Signature 474 475 consensusNewBlock(t, height, vss, css, newRoundCh, proposalCh, func() { 476 newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 477 assert.Nil(t, err) 478 valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) 479 assert.Nil(t, err) 480 newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) 481 err = assertMempool(css[0].txNotifier).CheckTxSync(newValidatorTx1, nil, mempl.TxInfo{}) 482 assert.Nil(t, err) 483 }) 484 485 // height 3 486 height++ 487 incrementHeight(vss...) 488 489 consensusNewBlock(t, height, vss, css, newRoundCh, proposalCh, func() { 490 updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() 491 require.NoError(t, err) 492 updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) 493 require.NoError(t, err) 494 updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) 495 err = assertMempool(css[0].txNotifier).CheckTxSync(updateValidatorTx1, nil, mempl.TxInfo{}) 496 assert.Nil(t, err) 497 }) 498 499 // height 4 500 height++ 501 incrementHeight(vss...) 502 newVss := make([]*validatorStub, nVals+1) 503 copy(newVss, vss[:nVals+1]) 504 sort.Sort(ValidatorStubsByPower(newVss)) 505 506 consensusNewBlock(t, height, newVss, css, newRoundCh, proposalCh, func() { 507 newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() 508 require.NoError(t, err) 509 newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) 510 require.NoError(t, err) 511 newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) 512 err = assertMempool(css[0].txNotifier).CheckTxSync(newValidatorTx2, nil, mempl.TxInfo{}) 513 assert.Nil(t, err) 514 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 515 require.NoError(t, err) 516 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 517 require.NoError(t, err) 518 newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) 519 err = assertMempool(css[0].txNotifier).CheckTxSync(newValidatorTx3, nil, mempl.TxInfo{}) 520 assert.Nil(t, err) 521 }) 522 523 // height 5 524 height++ 525 incrementHeight(vss...) 526 consensusNewBlock(t, height, newVss, css, newRoundCh, proposalCh, nil) 527 528 // Reflect the changes to vss[nVals] at height 3 and resort newVss. 529 newVssIdx := func(cssIdx int) int { 530 for i, vs := range newVss { 531 vsPubKey, err := vs.GetPubKey() 532 require.NoError(t, err) 533 cssPubKey, err := css[cssIdx].privValidator.GetPubKey() 534 require.NoError(t, err) 535 if vsPubKey.Equals(cssPubKey) { 536 return i 537 } 538 } 539 panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx)) 540 }(nVals) 541 newVss[newVssIdx].VotingPower = 25 542 sort.Sort(ValidatorStubsByPower(newVss)) 543 544 // height 6 545 height++ 546 incrementHeight(vss...) 547 548 // re-calculate vss 549 newVss = make([]*validatorStub, nVals+3) 550 copy(newVss, vss[:nVals+3]) 551 sort.Sort(ValidatorStubsByPower(newVss)) 552 553 consensusNewBlock(t, height, newVss, css, newRoundCh, proposalCh, func() { 554 newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() 555 require.NoError(t, err) 556 newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) 557 require.NoError(t, err) 558 removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) 559 err = assertMempool(css[0].txNotifier).CheckTxSync(removeValidatorTx3, nil, mempl.TxInfo{}) 560 assert.Nil(t, err) 561 }) 562 563 sim.Chain = make([]*types.Block, 0) 564 sim.Commits = make([]*types.Commit, 0) 565 for i := 1; i <= numBlocks; i++ { 566 sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) 567 sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) 568 } 569 } 570 571 // Sync from scratch 572 func TestHandshakeReplayAll(t *testing.T) { 573 for _, m := range modes { 574 testHandshakeReplay(t, config, 0, m, false) 575 } 576 for _, m := range modes { 577 testHandshakeReplay(t, config, 0, m, true) 578 } 579 } 580 581 // Sync many, not from scratch 582 func TestHandshakeReplaySome(t *testing.T) { 583 for _, m := range modes { 584 testHandshakeReplay(t, config, 2, m, false) 585 } 586 for _, m := range modes { 587 testHandshakeReplay(t, config, 2, m, true) 588 } 589 } 590 591 // Sync from lagging by one 592 func TestHandshakeReplayOne(t *testing.T) { 593 for _, m := range modes { 594 testHandshakeReplay(t, config, numBlocks-1, m, false) 595 } 596 for _, m := range modes { 597 testHandshakeReplay(t, config, numBlocks-1, m, true) 598 } 599 } 600 601 // Sync from caught up 602 func TestHandshakeReplayNone(t *testing.T) { 603 for _, m := range modes { 604 testHandshakeReplay(t, config, numBlocks, m, false) 605 } 606 for _, m := range modes { 607 testHandshakeReplay(t, config, numBlocks, m, true) 608 } 609 } 610 611 // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx 612 func TestMockProxyApp(t *testing.T) { 613 sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange 614 logger := log.TestingLogger() 615 validTxs, invalidTxs := 0, 0 616 txIndex := 0 617 618 assert.NotPanics(t, func() { 619 abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses) 620 abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0) 621 abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{}) 622 623 // called when saveABCIResponses: 624 bytes, err := proto.Marshal(abciResWithEmptyDeliverTx) 625 require.NoError(t, err) 626 loadedAbciRes := new(tmstate.ABCIResponses) 627 628 // this also happens sm.LoadABCIResponses 629 err = proto.Unmarshal(bytes, loadedAbciRes) 630 require.NoError(t, err) 631 632 mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) 633 634 abciRes := new(tmstate.ABCIResponses) 635 abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) 636 // Execute transactions and get hash. 637 proxyCb := func(req *ocabci.Request, res *ocabci.Response) { 638 if r, ok := res.Value.(*ocabci.Response_DeliverTx); ok { 639 // TODO: make use of res.Log 640 // TODO: make use of this info 641 // Blocks may include invalid txs. 642 txRes := r.DeliverTx 643 if txRes.Code == ocabci.CodeTypeOK { 644 validTxs++ 645 } else { 646 logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) 647 invalidTxs++ 648 } 649 abciRes.DeliverTxs[txIndex] = txRes 650 txIndex++ 651 } 652 } 653 mock.SetGlobalCallback(proxyCb) 654 655 someTx := []byte("tx") 656 mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}, nil) 657 }) 658 assert.True(t, validTxs == 1) 659 assert.True(t, invalidTxs == 0) 660 } 661 662 func tempWALWithData(data []byte) string { 663 walFile, err := os.CreateTemp("", "wal") 664 if err != nil { 665 panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) 666 } 667 _, err = walFile.Write(data) 668 if err != nil { 669 panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) 670 } 671 if err := walFile.Close(); err != nil { 672 panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) 673 } 674 return walFile.Name() 675 } 676 677 // Make some blocks. Start a fresh app and apply nBlocks blocks. 678 // Then restart the app and sync it up with the remaining blocks 679 func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { 680 var chain []*types.Block 681 var commits []*types.Commit 682 var store *mockBlockStore 683 var stateDB dbm.DB 684 var genesisState sm.State 685 if testValidatorsChange { 686 testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) 687 defer os.RemoveAll(testConfig.RootDir) 688 stateDB = dbm.NewMemDB() 689 690 // Make the global variable "sim" be initialized forcefully by calling "TestSimulateValidatorChange()" 691 // if it is not initialized as in unit execution. 692 if sim.Config == nil { 693 TestSimulateValidatorsChange(t) 694 } 695 genesisState = sim.GenesisState 696 genesisState.ConsensusParams.Version.AppVersion = kvstore.ProtocolVersion 697 genesisState.Version.Consensus.App = kvstore.ProtocolVersion 698 config = sim.Config 699 chain = append([]*types.Block{}, sim.Chain...) // copy chain 700 commits = sim.Commits 701 store = newMockBlockStore(config, genesisState.ConsensusParams) 702 } else { // test single node 703 testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) 704 defer os.RemoveAll(testConfig.RootDir) 705 walBody, err := WALWithNBlocks(t, numBlocks) 706 require.NoError(t, err) 707 walFile := tempWALWithData(walBody) 708 config.Consensus.SetWalFile(walFile) 709 710 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 711 712 wal, err := NewWAL(walFile) 713 require.NoError(t, err) 714 wal.SetLogger(log.TestingLogger()) 715 err = wal.Start() 716 require.NoError(t, err) 717 t.Cleanup(func() { 718 if err := wal.Stop(); err != nil { 719 t.Error(err) 720 } 721 }) 722 chain, commits, err = makeBlockchainFromWAL(wal) 723 require.NoError(t, err) 724 pubKey, err := privVal.GetPubKey() 725 require.NoError(t, err) 726 stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) 727 728 } 729 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 730 DiscardABCIResponses: false, 731 }) 732 store.chain = chain 733 store.commits = commits 734 735 state := genesisState.Copy() 736 // run the chain through state.ApplyBlock to build up the ostracon state 737 state = buildOCStateFromChain(config, stateStore, state, chain, nBlocks, mode) 738 latestAppHash := state.AppHash 739 740 // make a new client creator 741 kvstoreApp := kvstore.NewPersistentKVStoreApplication( 742 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) 743 744 clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) 745 if nBlocks > 0 { 746 // run nBlocks against a new client to build up the app state. 747 // use a throwaway ostracon state 748 proxyApp := proxy.NewAppConns(clientCreator2) 749 stateDB1 := dbm.NewMemDB() 750 stateStore := sm.NewStore(stateDB1, sm.StoreOptions{ 751 DiscardABCIResponses: false, 752 }) 753 err := stateStore.Save(genesisState) 754 require.NoError(t, err) 755 buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode) 756 } 757 758 // Prune block store if requested 759 expectError := false 760 if mode == 3 { 761 pruned, err := store.PruneBlocks(2) 762 require.NoError(t, err) 763 require.EqualValues(t, 1, pruned) 764 expectError = int64(nBlocks) < 2 765 } 766 if mode == 4 { 767 rollbackHeight, rollbackAppHash, err := sm.Rollback(store, stateStore) 768 require.NoError(t, err) 769 require.EqualValues(t, state.LastBlockHeight, rollbackHeight) 770 require.EqualValues(t, state.AppHash, rollbackAppHash) 771 } 772 773 t.Log("####: now start the app using the handshake - it should sync") 774 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 775 handshaker := NewHandshaker(stateStore, state, store, genDoc) 776 handshaker.SetLogger(log.TestingLogger()) 777 proxyApp := proxy.NewAppConns(clientCreator2) 778 if err := proxyApp.Start(); err != nil { 779 t.Fatalf("Error starting proxy app connections: %v", err) 780 } 781 782 t.Cleanup(func() { 783 if err := proxyApp.Stop(); err != nil { 784 t.Error(err) 785 } 786 }) 787 788 err := handshaker.Handshake(proxyApp) 789 if expectError { 790 require.Error(t, err) 791 return 792 } else if err != nil { 793 t.Fatalf("Error on abci handshake: %v", err) 794 } 795 796 // get the latest app hash from the app 797 res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) 798 if err != nil { 799 t.Fatal(err) 800 } 801 802 // the app hash should be synced up 803 if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { 804 t.Fatalf( 805 "Expected app hashes to match after handshake/replay. got %X, expected %X", 806 res.LastBlockAppHash, 807 latestAppHash) 808 } 809 810 expectedBlocksToSync := numBlocks - nBlocks 811 if nBlocks == numBlocks && mode > 0 { 812 expectedBlocksToSync++ 813 } else if nBlocks > 0 && mode == 1 { 814 expectedBlocksToSync++ 815 } 816 817 if handshaker.NBlocks() != expectedBlocksToSync { 818 t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) 819 } 820 } 821 822 func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { 823 testPartSize := types.BlockPartSizeBytes 824 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) 825 826 blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} 827 newState, _, err := blockExec.ApplyBlock(st, blkID, blk, nil) 828 if err != nil { 829 panic(err) 830 } 831 return newState 832 } 833 834 func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, 835 state sm.State, chain []*types.Block, nBlocks int, mode uint, 836 ) { 837 // start a new app without handshake, play nBlocks blocks 838 if err := proxyApp.Start(); err != nil { 839 panic(err) 840 } 841 defer proxyApp.Stop() //nolint:errcheck // ignore 842 843 state.ConsensusParams.Version.AppVersion = kvstore.ProtocolVersion // simulate handshake, receive app version 844 state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version 845 validators := types.OC2PB.ValidatorUpdates(state.Validators) 846 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 847 Validators: validators, 848 }); err != nil { 849 panic(err) 850 } 851 if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo 852 panic(err) 853 } 854 switch mode { 855 case 0: 856 for i := 0; i < nBlocks; i++ { 857 block := chain[i] 858 state = applyBlock(stateStore, state, block, proxyApp) 859 } 860 case 1, 2, 3, 4: 861 for i := 0; i < nBlocks-1; i++ { 862 block := chain[i] 863 state = applyBlock(stateStore, state, block, proxyApp) 864 } 865 866 if mode == 2 || mode == 3 || mode == 4 { 867 // update the kvstore height and apphash 868 // as if we ran commit but not 869 state = applyBlock(stateStore, state, chain[nBlocks-1], proxyApp) 870 } 871 default: 872 panic(fmt.Sprintf("unknown mode %v", mode)) 873 } 874 } 875 876 func buildOCStateFromChain( 877 config *cfg.Config, 878 stateStore sm.Store, 879 state sm.State, 880 chain []*types.Block, 881 nBlocks int, 882 mode uint, 883 ) sm.State { 884 // run the whole chain against this client to build up the tendermint state 885 clientCreator := proxy.NewLocalClientCreator( 886 kvstore.NewPersistentKVStoreApplication( 887 filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))) 888 proxyApp := proxy.NewAppConns(clientCreator) 889 if err := proxyApp.Start(); err != nil { 890 panic(err) 891 } 892 defer proxyApp.Stop() //nolint:errcheck 893 894 state.ConsensusParams.Version.AppVersion = kvstore.ProtocolVersion // simulate handshake, receive app version 895 state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version 896 validators := types.OC2PB.ValidatorUpdates(state.Validators) 897 if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ 898 Validators: validators, 899 }); err != nil { 900 panic(err) 901 } 902 if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo 903 panic(err) 904 } 905 switch mode { 906 case 0: 907 // sync right up 908 for _, block := range chain { 909 state = applyBlock(stateStore, state, block, proxyApp) 910 } 911 912 case 1, 2, 3, 4: 913 // sync up to the penultimate as if we stored the block. 914 // whether we commit or not depends on the appHash 915 for _, block := range chain[:len(chain)-1] { 916 state = applyBlock(stateStore, state, block, proxyApp) 917 } 918 919 // apply the final block to a state copy so we can 920 // get the right next appHash but keep the state back 921 applyBlock(stateStore, state, chain[len(chain)-1], proxyApp) 922 default: 923 panic(fmt.Sprintf("unknown mode %v", mode)) 924 } 925 926 return state 927 } 928 929 func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { 930 // 1. Initialize ostracon and commit 3 blocks with the following app hashes: 931 // - 0x01 932 // - 0x02 933 // - 0x03 934 config := ResetConfig("handshake_test_") 935 defer os.RemoveAll(config.RootDir) 936 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 937 pubKey, err := privVal.GetPubKey() 938 require.NoError(t, err) 939 stateDB, state, store := stateAndStore(config, pubKey, version.AppProtocol) 940 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 941 DiscardABCIResponses: false, 942 }) 943 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 944 state.LastValidators = state.Validators.Copy() 945 // mode = 0 for committing all the blocks 946 blocks := makeBlocks(3, &state, privVal) 947 store.chain = blocks 948 949 // 2. Ostracon must panic if app returns wrong hash for the first block 950 // - RANDOM HASH 951 // - 0x02 952 // - 0x03 953 { 954 app := &badApp{numBlocks: 3, allHashesAreWrong: true} 955 clientCreator := proxy.NewLocalClientCreator(app) 956 proxyApp := proxy.NewAppConns(clientCreator) 957 err := proxyApp.Start() 958 require.NoError(t, err) 959 t.Cleanup(func() { 960 if err := proxyApp.Stop(); err != nil { 961 t.Error(err) 962 } 963 }) 964 965 assert.Panics(t, func() { 966 h := NewHandshaker(stateStore, state, store, genDoc) 967 if err = h.Handshake(proxyApp); err != nil { 968 t.Log(err) 969 } 970 }) 971 } 972 973 // 3. Ostracon must panic if app returns wrong hash for the last block 974 // - 0x01 975 // - 0x02 976 // - RANDOM HASH 977 { 978 app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} 979 clientCreator := proxy.NewLocalClientCreator(app) 980 proxyApp := proxy.NewAppConns(clientCreator) 981 err := proxyApp.Start() 982 require.NoError(t, err) 983 t.Cleanup(func() { 984 if err := proxyApp.Stop(); err != nil { 985 t.Error(err) 986 } 987 }) 988 989 assert.Panics(t, func() { 990 h := NewHandshaker(stateStore, state, store, genDoc) 991 if err = h.Handshake(proxyApp); err != nil { 992 t.Log(err) 993 } 994 }) 995 } 996 } 997 998 func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { 999 blocks := make([]*types.Block, 0) 1000 1001 var ( 1002 prevBlock *types.Block 1003 prevBlockMeta *types.BlockMeta 1004 ) 1005 1006 appHeight := byte(0x01) 1007 for i := 0; i < n; i++ { 1008 height := int64(i + 1) 1009 1010 block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) 1011 blocks = append(blocks, block) 1012 1013 prevBlock = block 1014 prevBlockMeta = types.NewBlockMeta(block, parts) 1015 1016 // update state 1017 state.AppHash = []byte{appHeight} 1018 appHeight++ 1019 state.LastBlockHeight = height 1020 } 1021 1022 return blocks 1023 } 1024 1025 func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, 1026 privVal types.PrivValidator, height int64, 1027 ) (*types.Block, *types.PartSet) { 1028 lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) 1029 if height > 1 { 1030 vote, _ := types.MakeVote( 1031 lastBlock.Header.Height, 1032 lastBlockMeta.BlockID, 1033 state.Validators, 1034 privVal, 1035 lastBlock.Header.ChainID, 1036 time.Now()) 1037 lastCommit = types.NewCommit(vote.Height, vote.Round, 1038 lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) 1039 } 1040 1041 message := state.MakeHashMessage(0) 1042 proof, _ := privVal.GenerateVRFProof(message) 1043 return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, 1044 state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) 1045 } 1046 1047 type badApp struct { 1048 ocabci.BaseApplication 1049 numBlocks byte 1050 height byte 1051 allHashesAreWrong bool 1052 onlyLastHashIsWrong bool 1053 } 1054 1055 func (app *badApp) Commit() abci.ResponseCommit { 1056 app.height++ 1057 if app.onlyLastHashIsWrong { 1058 if app.height == app.numBlocks { 1059 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 1060 } 1061 return abci.ResponseCommit{Data: []byte{app.height}} 1062 } else if app.allHashesAreWrong { 1063 return abci.ResponseCommit{Data: tmrand.Bytes(8)} 1064 } 1065 1066 panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") 1067 } 1068 1069 //-------------------------- 1070 // utils for making blocks 1071 1072 func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { 1073 var height int64 1074 1075 // Search for height marker 1076 gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) 1077 if err != nil { 1078 return nil, nil, err 1079 } 1080 if !found { 1081 return nil, nil, fmt.Errorf("wal does not contain height %d", height) 1082 } 1083 defer gr.Close() 1084 1085 // log.Notice("Build a blockchain by reading from the WAL") 1086 1087 var ( 1088 blocks []*types.Block 1089 commits []*types.Commit 1090 thisBlockParts *types.PartSet 1091 thisBlockCommit *types.Commit 1092 ) 1093 1094 dec := NewWALDecoder(gr) 1095 for { 1096 msg, err := dec.Decode() 1097 if err == io.EOF { 1098 break 1099 } else if err != nil { 1100 return nil, nil, err 1101 } 1102 1103 piece := readPieceFromWAL(msg) 1104 if piece == nil { 1105 continue 1106 } 1107 1108 switch p := piece.(type) { 1109 case EndHeightMessage: 1110 // if its not the first one, we have a full block 1111 if thisBlockParts != nil { 1112 pbb := new(ocproto.Block) 1113 bz, err := io.ReadAll(thisBlockParts.GetReader()) 1114 if err != nil { 1115 panic(err) 1116 } 1117 err = proto.Unmarshal(bz, pbb) 1118 if err != nil { 1119 panic(err) 1120 } 1121 block, err := types.BlockFromProto(pbb) 1122 if err != nil { 1123 panic(err) 1124 } 1125 1126 if block.Height != height+1 { 1127 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1128 } 1129 commitHeight := thisBlockCommit.Height 1130 if commitHeight != height+1 { 1131 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1132 } 1133 blocks = append(blocks, block) 1134 commits = append(commits, thisBlockCommit) 1135 height++ 1136 } 1137 case *types.PartSetHeader: 1138 thisBlockParts = types.NewPartSetFromHeader(*p) 1139 case *types.Part: 1140 _, err := thisBlockParts.AddPart(p) 1141 if err != nil { 1142 return nil, nil, err 1143 } 1144 case *types.Vote: 1145 if p.Type == tmproto.PrecommitType { 1146 thisBlockCommit = types.NewCommit(p.Height, p.Round, 1147 p.BlockID, []types.CommitSig{p.CommitSig()}) 1148 } 1149 } 1150 } 1151 // grab the last block too 1152 bz, err := io.ReadAll(thisBlockParts.GetReader()) 1153 if err != nil { 1154 panic(err) 1155 } 1156 pbb := new(ocproto.Block) 1157 err = proto.Unmarshal(bz, pbb) 1158 if err != nil { 1159 panic(err) 1160 } 1161 block, err := types.BlockFromProto(pbb) 1162 if err != nil { 1163 panic(err) 1164 } 1165 if block.Height != height+1 { 1166 panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) 1167 } 1168 commitHeight := thisBlockCommit.Height 1169 if commitHeight != height+1 { 1170 panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) 1171 } 1172 blocks = append(blocks, block) 1173 commits = append(commits, thisBlockCommit) 1174 return blocks, commits, nil 1175 } 1176 1177 func readPieceFromWAL(msg *TimedWALMessage) interface{} { 1178 // for logging 1179 switch m := msg.Msg.(type) { 1180 case msgInfo: 1181 switch msg := m.Msg.(type) { 1182 case *ProposalMessage: 1183 return &msg.Proposal.BlockID.PartSetHeader 1184 case *BlockPartMessage: 1185 return msg.Part 1186 case *VoteMessage: 1187 return msg.Vote 1188 } 1189 case EndHeightMessage: 1190 return m 1191 } 1192 1193 return nil 1194 } 1195 1196 // fresh state and mock store 1197 func stateAndStore( 1198 config *cfg.Config, 1199 pubKey crypto.PubKey, 1200 appVersion uint64, 1201 ) (dbm.DB, sm.State, *mockBlockStore) { 1202 stateDB := dbm.NewMemDB() 1203 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 1204 DiscardABCIResponses: false, 1205 }) 1206 state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) 1207 state.ConsensusParams.Version.AppVersion = appVersion 1208 state.Version.Consensus.App = appVersion 1209 store := newMockBlockStore(config, state.ConsensusParams) 1210 if err := stateStore.Save(state); err != nil { 1211 panic(err) 1212 } 1213 return stateDB, state, store 1214 } 1215 1216 //---------------------------------- 1217 // mock block store 1218 1219 type mockBlockStore struct { 1220 config *cfg.Config 1221 params tmproto.ConsensusParams 1222 chain []*types.Block 1223 commits []*types.Commit 1224 base int64 1225 } 1226 1227 // TODO: NewBlockStore(db.NewMemDB) ... 1228 func newMockBlockStore(config *cfg.Config, params tmproto.ConsensusParams) *mockBlockStore { 1229 return &mockBlockStore{config, params, nil, nil, 0} 1230 } 1231 1232 func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } 1233 func (bs *mockBlockStore) Base() int64 { return bs.base } 1234 func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } 1235 func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } 1236 func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } 1237 func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { 1238 return bs.chain[int64(len(bs.chain))-1] 1239 } 1240 1241 func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { 1242 block := bs.chain[height-1] 1243 return &types.BlockMeta{ 1244 BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, 1245 Header: block.Header, 1246 } 1247 } 1248 func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } 1249 func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { 1250 } 1251 1252 func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { 1253 return bs.commits[height-1] 1254 } 1255 1256 func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { 1257 return bs.commits[height-1] 1258 } 1259 1260 func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { 1261 pruned := uint64(0) 1262 for i := int64(0); i < height-1; i++ { 1263 bs.chain[i] = nil 1264 bs.commits[i] = nil 1265 pruned++ 1266 } 1267 bs.base = height 1268 return pruned, nil 1269 } 1270 1271 //--------------------------------------- 1272 // Test handshake/init chain 1273 1274 func TestHandshakeUpdatesValidators(t *testing.T) { 1275 val, _ := types.RandValidator(true, 10) 1276 vals := types.NewValidatorSet([]*types.Validator{val}) 1277 app := &initChainApp{vals: types.OC2PB.ValidatorUpdates(vals)} 1278 clientCreator := proxy.NewLocalClientCreator(app) 1279 1280 config := ResetConfig("handshake_test_") 1281 defer os.RemoveAll(config.RootDir) 1282 privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) 1283 pubKey, err := privVal.GetPubKey() 1284 require.NoError(t, err) 1285 stateDB, state, store := stateAndStore(config, pubKey, version.AppProtocol) 1286 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 1287 DiscardABCIResponses: false, 1288 }) 1289 1290 oldValAddr := state.Validators.Validators[0].Address 1291 1292 // now start the app using the handshake - it should sync 1293 genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) 1294 handshaker := NewHandshaker(stateStore, state, store, genDoc) 1295 proxyApp := proxy.NewAppConns(clientCreator) 1296 if err := proxyApp.Start(); err != nil { 1297 t.Fatalf("Error starting proxy app connections: %v", err) 1298 } 1299 t.Cleanup(func() { 1300 if err := proxyApp.Stop(); err != nil { 1301 t.Error(err) 1302 } 1303 }) 1304 if err := handshaker.Handshake(proxyApp); err != nil { 1305 t.Fatalf("Error on abci handshake: %v", err) 1306 } 1307 // reload the state, check the validator set was updated 1308 state, err = stateStore.Load() 1309 require.NoError(t, err) 1310 1311 newValAddr := state.Validators.Validators[0].Address 1312 expectValAddr := val.Address 1313 assert.NotEqual(t, oldValAddr, newValAddr) 1314 assert.Equal(t, newValAddr, expectValAddr) 1315 } 1316 1317 // returns the vals on InitChain 1318 type initChainApp struct { 1319 ocabci.BaseApplication 1320 vals []abci.ValidatorUpdate 1321 } 1322 1323 func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { 1324 return abci.ResponseInitChain{ 1325 Validators: ica.vals, 1326 } 1327 }