github.com/Finschia/ostracon@v1.1.5/blockchain/v2/reactor_test.go (about) 1 package v2 2 3 import ( 4 "fmt" 5 "net" 6 "os" 7 "sort" 8 "sync" 9 "testing" 10 "time" 11 12 "github.com/gogo/protobuf/proto" 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/require" 15 16 bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" 17 dbm "github.com/tendermint/tm-db" 18 19 abci "github.com/Finschia/ostracon/abci/types" 20 "github.com/Finschia/ostracon/behaviour" 21 cfg "github.com/Finschia/ostracon/config" 22 "github.com/Finschia/ostracon/libs/log" 23 "github.com/Finschia/ostracon/libs/service" 24 "github.com/Finschia/ostracon/mempool/mock" 25 "github.com/Finschia/ostracon/p2p" 26 "github.com/Finschia/ostracon/p2p/conn" 27 "github.com/Finschia/ostracon/proxy" 28 sm "github.com/Finschia/ostracon/state" 29 "github.com/Finschia/ostracon/store" 30 "github.com/Finschia/ostracon/types" 31 tmtime "github.com/Finschia/ostracon/types/time" 32 ) 33 34 type mockPeer struct { 35 service.Service 36 id p2p.ID 37 } 38 39 func (mp mockPeer) FlushStop() {} 40 func (mp mockPeer) ID() p2p.ID { return mp.id } 41 func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } 42 func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } 43 44 func (mp mockPeer) IsOutbound() bool { return true } 45 func (mp mockPeer) IsPersistent() bool { return true } 46 func (mp mockPeer) CloseConn() error { return nil } 47 48 func (mp mockPeer) NodeInfo() p2p.NodeInfo { 49 return p2p.DefaultNodeInfo{ 50 DefaultNodeID: "", 51 ListenAddr: "", 52 } 53 } 54 func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } 55 func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } 56 57 func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true } 58 func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true } 59 60 func (mp mockPeer) Send(byte, []byte) bool { return true } 61 func (mp mockPeer) TrySend(byte, []byte) bool { return true } 62 63 func (mp mockPeer) Set(string, interface{}) {} 64 func (mp mockPeer) Get(string) interface{} { return struct{}{} } 65 66 func (mp mockPeer) String() string { return fmt.Sprintf("%v", mp.id) } 67 68 func (mp mockPeer) SetRemovalFailed() {} 69 func (mp mockPeer) GetRemovalFailed() bool { return false } 70 71 type mockBlockApplier struct{} 72 73 // XXX: Add whitelist/blacklist? 74 func (mba *mockBlockApplier) ApplyBlock( 75 state sm.State, blockID types.BlockID, block *types.Block, times *sm.CommitStepTimes, 76 ) (sm.State, int64, error) { 77 state.LastBlockHeight++ 78 return state, 0, nil 79 } 80 81 type mockSwitchIo struct { 82 mtx sync.Mutex 83 switchedToConsensus bool 84 numStatusResponse int 85 numBlockResponse int 86 numNoBlockResponse int 87 } 88 89 func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error { 90 return nil 91 } 92 93 func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error { 94 sio.mtx.Lock() 95 defer sio.mtx.Unlock() 96 sio.numStatusResponse++ 97 return nil 98 } 99 100 func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { 101 sio.mtx.Lock() 102 defer sio.mtx.Unlock() 103 sio.numBlockResponse++ 104 return nil 105 } 106 107 func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error { 108 sio.mtx.Lock() 109 defer sio.mtx.Unlock() 110 sio.numNoBlockResponse++ 111 return nil 112 } 113 114 func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool { 115 sio.mtx.Lock() 116 defer sio.mtx.Unlock() 117 sio.switchedToConsensus = true 118 return true 119 } 120 121 func (sio *mockSwitchIo) broadcastStatusRequest() { 122 } 123 124 type testReactorParams struct { 125 logger log.Logger 126 genDoc *types.GenesisDoc 127 privVals []types.PrivValidator 128 startHeight int64 129 mockA bool 130 } 131 132 func newTestReactor(p testReactorParams) *BlockchainReactor { 133 store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight) 134 reporter := behaviour.NewMockReporter() 135 136 var appl blockApplier 137 138 if p.mockA { 139 appl = &mockBlockApplier{} 140 } else { 141 app := &testApp{} 142 cc := proxy.NewLocalClientCreator(app) 143 proxyApp := proxy.NewAppConns(cc) 144 err := proxyApp.Start() 145 if err != nil { 146 panic(fmt.Errorf("error start app: %w", err)) 147 } 148 db := dbm.NewMemDB() 149 stateStore := sm.NewStore(db, sm.StoreOptions{ 150 DiscardABCIResponses: false, 151 }) 152 appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) 153 if err = stateStore.Save(state); err != nil { 154 panic(err) 155 } 156 } 157 158 r := newReactor(state, store, reporter, appl, true) 159 logger := log.TestingLogger() 160 r.SetLogger(logger.With("module", "blockchain")) 161 162 return r 163 } 164 165 // This test is left here and not deleted to retain the termination cases for 166 // future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). 167 // func TestReactorTerminationScenarios(t *testing.T) { 168 169 // config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 170 // defer os.RemoveAll(config.RootDir) 171 // genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 172 // refStore, _, _ := newReactorStore(genDoc, privVals, 20) 173 174 // params := testReactorParams{ 175 // logger: log.TestingLogger(), 176 // genDoc: genDoc, 177 // privVals: privVals, 178 // startHeight: 10, 179 // bufferSize: 100, 180 // mockA: true, 181 // } 182 183 // type testEvent struct { 184 // evType string 185 // peer string 186 // height int64 187 // } 188 189 // tests := []struct { 190 // name string 191 // params testReactorParams 192 // msgs []testEvent 193 // }{ 194 // { 195 // name: "simple termination on max peer height - one peer", 196 // params: params, 197 // msgs: []testEvent{ 198 // {evType: "AddPeer", peer: "P1"}, 199 // {evType: "ReceiveS", peer: "P1", height: 13}, 200 // {evType: "BlockReq"}, 201 // {evType: "ReceiveB", peer: "P1", height: 11}, 202 // {evType: "BlockReq"}, 203 // {evType: "BlockReq"}, 204 // {evType: "ReceiveB", peer: "P1", height: 12}, 205 // {evType: "Process"}, 206 // {evType: "ReceiveB", peer: "P1", height: 13}, 207 // {evType: "Process"}, 208 // }, 209 // }, 210 // { 211 // name: "simple termination on max peer height - two peers", 212 // params: params, 213 // msgs: []testEvent{ 214 // {evType: "AddPeer", peer: "P1"}, 215 // {evType: "AddPeer", peer: "P2"}, 216 // {evType: "ReceiveS", peer: "P1", height: 13}, 217 // {evType: "ReceiveS", peer: "P2", height: 15}, 218 // {evType: "BlockReq"}, 219 // {evType: "BlockReq"}, 220 // {evType: "ReceiveB", peer: "P1", height: 11}, 221 // {evType: "ReceiveB", peer: "P2", height: 12}, 222 // {evType: "Process"}, 223 // {evType: "BlockReq"}, 224 // {evType: "BlockReq"}, 225 // {evType: "ReceiveB", peer: "P1", height: 13}, 226 // {evType: "Process"}, 227 // {evType: "ReceiveB", peer: "P2", height: 14}, 228 // {evType: "Process"}, 229 // {evType: "BlockReq"}, 230 // {evType: "ReceiveB", peer: "P2", height: 15}, 231 // {evType: "Process"}, 232 // }, 233 // }, 234 // { 235 // name: "termination on max peer height - two peers, noBlock error", 236 // params: params, 237 // msgs: []testEvent{ 238 // {evType: "AddPeer", peer: "P1"}, 239 // {evType: "AddPeer", peer: "P2"}, 240 // {evType: "ReceiveS", peer: "P1", height: 13}, 241 // {evType: "ReceiveS", peer: "P2", height: 15}, 242 // {evType: "BlockReq"}, 243 // {evType: "BlockReq"}, 244 // {evType: "ReceiveNB", peer: "P1", height: 11}, 245 // {evType: "BlockReq"}, 246 // {evType: "ReceiveB", peer: "P2", height: 12}, 247 // {evType: "ReceiveB", peer: "P2", height: 11}, 248 // {evType: "Process"}, 249 // {evType: "BlockReq"}, 250 // {evType: "BlockReq"}, 251 // {evType: "ReceiveB", peer: "P2", height: 13}, 252 // {evType: "Process"}, 253 // {evType: "ReceiveB", peer: "P2", height: 14}, 254 // {evType: "Process"}, 255 // {evType: "BlockReq"}, 256 // {evType: "ReceiveB", peer: "P2", height: 15}, 257 // {evType: "Process"}, 258 // }, 259 // }, 260 // { 261 // name: "termination on max peer height - two peers, remove one peer", 262 // params: params, 263 // msgs: []testEvent{ 264 // {evType: "AddPeer", peer: "P1"}, 265 // {evType: "AddPeer", peer: "P2"}, 266 // {evType: "ReceiveS", peer: "P1", height: 13}, 267 // {evType: "ReceiveS", peer: "P2", height: 15}, 268 // {evType: "BlockReq"}, 269 // {evType: "BlockReq"}, 270 // {evType: "RemovePeer", peer: "P1"}, 271 // {evType: "BlockReq"}, 272 // {evType: "ReceiveB", peer: "P2", height: 12}, 273 // {evType: "ReceiveB", peer: "P2", height: 11}, 274 // {evType: "Process"}, 275 // {evType: "BlockReq"}, 276 // {evType: "BlockReq"}, 277 // {evType: "ReceiveB", peer: "P2", height: 13}, 278 // {evType: "Process"}, 279 // {evType: "ReceiveB", peer: "P2", height: 14}, 280 // {evType: "Process"}, 281 // {evType: "BlockReq"}, 282 // {evType: "ReceiveB", peer: "P2", height: 15}, 283 // {evType: "Process"}, 284 // }, 285 // }, 286 // } 287 288 // for _, tt := range tests { 289 // tt := tt 290 // t.Run(tt.name, func(t *testing.T) { 291 // reactor := newTestReactor(params) 292 // reactor.Start() 293 // reactor.reporter = behaviour.NewMockReporter() 294 // mockSwitch := &mockSwitchIo{switchedToConsensus: false} 295 // reactor.io = mockSwitch 296 // // time for go routines to start 297 // time.Sleep(time.Millisecond) 298 299 // for _, step := range tt.msgs { 300 // switch step.evType { 301 // case "AddPeer": 302 // reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) 303 // case "RemovePeer": 304 // reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) 305 // case "ReceiveS": 306 // reactor.scheduler.send(bcStatusResponse{ 307 // peerID: p2p.ID(step.peer), 308 // height: step.height, 309 // time: time.Now(), 310 // }) 311 // case "ReceiveB": 312 // reactor.scheduler.send(bcBlockResponse{ 313 // peerID: p2p.ID(step.peer), 314 // block: refStore.LoadBlock(step.height), 315 // size: 10, 316 // time: time.Now(), 317 // }) 318 // case "ReceiveNB": 319 // reactor.scheduler.send(bcNoBlockResponse{ 320 // peerID: p2p.ID(step.peer), 321 // height: step.height, 322 // time: time.Now(), 323 // }) 324 // case "BlockReq": 325 // reactor.scheduler.send(rTrySchedule{time: time.Now()}) 326 // case "Process": 327 // reactor.processor.send(rProcessBlock{}) 328 // } 329 // // give time for messages to propagate between routines 330 // time.Sleep(time.Millisecond) 331 // } 332 333 // // time for processor to finish and reactor to switch to consensus 334 // time.Sleep(20 * time.Millisecond) 335 // assert.True(t, mockSwitch.hasSwitchedToConsensus()) 336 // reactor.Stop() 337 // }) 338 // } 339 // } 340 341 func TestReactorHelperMode(t *testing.T) { 342 channelID := byte(0x40) 343 344 config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 345 defer os.RemoveAll(config.RootDir) 346 genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 347 348 params := testReactorParams{ 349 logger: log.TestingLogger(), 350 genDoc: genDoc, 351 privVals: privVals, 352 startHeight: 20, 353 mockA: true, 354 } 355 356 type testEvent struct { 357 peer string 358 event proto.Message 359 } 360 361 tests := []struct { 362 name string 363 params testReactorParams 364 msgs []testEvent 365 }{ 366 { 367 name: "status request", 368 params: params, 369 msgs: []testEvent{ 370 {"P1", &bcproto.StatusRequest{}}, 371 {"P1", &bcproto.BlockRequest{Height: 13}}, 372 {"P1", &bcproto.BlockRequest{Height: 20}}, 373 {"P1", &bcproto.BlockRequest{Height: 22}}, 374 }, 375 }, 376 } 377 378 for _, tt := range tests { 379 tt := tt 380 t.Run(tt.name, func(t *testing.T) { 381 reactor := newTestReactor(params) 382 mockSwitch := &mockSwitchIo{switchedToConsensus: false} 383 reactor.io = mockSwitch 384 err := reactor.Start() 385 require.NoError(t, err) 386 387 for i := 0; i < len(tt.msgs); i++ { 388 step := tt.msgs[i] 389 switch ev := step.event.(type) { 390 case *bcproto.StatusRequest: 391 old := mockSwitch.numStatusResponse 392 reactor.ReceiveEnvelope(p2p.Envelope{ 393 ChannelID: channelID, 394 Src: mockPeer{id: p2p.ID(step.peer)}, 395 Message: ev}) 396 assert.Equal(t, old+1, mockSwitch.numStatusResponse) 397 case *bcproto.BlockRequest: 398 if ev.Height > params.startHeight { 399 old := mockSwitch.numNoBlockResponse 400 reactor.ReceiveEnvelope(p2p.Envelope{ 401 ChannelID: channelID, 402 Src: mockPeer{id: p2p.ID(step.peer)}, 403 Message: ev}) 404 assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) 405 } else { 406 old := mockSwitch.numBlockResponse 407 reactor.ReceiveEnvelope(p2p.Envelope{ 408 ChannelID: channelID, 409 Src: mockPeer{id: p2p.ID(step.peer)}, 410 Message: ev}) 411 assert.Equal(t, old+1, mockSwitch.numBlockResponse) 412 } 413 } 414 } 415 err = reactor.Stop() 416 require.NoError(t, err) 417 }) 418 } 419 } 420 421 func TestLegacyReactorReceiveBasic(t *testing.T) { 422 config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 423 defer os.RemoveAll(config.RootDir) 424 genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 425 params := testReactorParams{ 426 logger: log.TestingLogger(), 427 genDoc: genDoc, 428 privVals: privVals, 429 startHeight: 20, 430 mockA: true, 431 } 432 reactor := newTestReactor(params) 433 mockSwitch := &mockSwitchIo{switchedToConsensus: false} 434 reactor.io = mockSwitch 435 peer := p2p.CreateRandomPeer(false) 436 437 reactor.InitPeer(peer) 438 reactor.AddPeer(peer) 439 m := &bcproto.StatusRequest{} 440 wm := m.Wrap() 441 msg, err := proto.Marshal(wm) 442 assert.NoError(t, err) 443 444 assert.NotPanics(t, func() { 445 reactor.Receive(BlockchainChannel, peer, msg) 446 }) 447 } 448 449 func TestReactorSetSwitchNil(t *testing.T) { 450 config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 451 defer os.RemoveAll(config.RootDir) 452 genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 453 454 reactor := newTestReactor(testReactorParams{ 455 logger: log.TestingLogger(), 456 genDoc: genDoc, 457 privVals: privVals, 458 }) 459 reactor.SetSwitch(nil) 460 461 assert.Nil(t, reactor.Switch) 462 assert.Nil(t, reactor.io) 463 } 464 465 //---------------------------------------------- 466 // utility funcs 467 468 func makeTxs(height int64) (txs []types.Tx) { 469 for i := 0; i < 10; i++ { 470 txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) 471 } 472 return txs 473 } 474 475 func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCommit *types.Commit) *types.Block { 476 message := state.MakeHashMessage(0) 477 proof, _ := privVal.GenerateVRFProof(message) 478 proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address 479 block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr, 0, proof) 480 return block 481 } 482 483 type testApp struct { 484 abci.BaseApplication 485 } 486 487 func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) ( 488 *types.GenesisDoc, []types.PrivValidator, 489 ) { 490 validators := make([]types.GenesisValidator, numValidators) 491 privValidators := make([]types.PrivValidator, numValidators) 492 for i := 0; i < numValidators; i++ { 493 val, privVal := types.RandValidator(randPower, minPower) 494 validators[i] = types.GenesisValidator{ 495 PubKey: val.PubKey, 496 Power: val.VotingPower, 497 } 498 privValidators[i] = privVal 499 } 500 sort.Sort(types.PrivValidatorsByAddress(privValidators)) 501 502 return &types.GenesisDoc{ 503 GenesisTime: tmtime.Now(), 504 ChainID: chainID, 505 Validators: validators, 506 }, privValidators 507 } 508 509 // Why are we importing the entire blockExecutor dependency graph here 510 // when we have the facilities to 511 func newReactorStore( 512 genDoc *types.GenesisDoc, 513 privVals []types.PrivValidator, 514 maxBlockHeight int64, 515 ) (*store.BlockStore, sm.State, *sm.BlockExecutor) { 516 if len(privVals) != 1 { 517 panic("only support one validator") 518 } 519 app := &testApp{} 520 cc := proxy.NewLocalClientCreator(app) 521 proxyApp := proxy.NewAppConns(cc) 522 err := proxyApp.Start() 523 if err != nil { 524 panic(fmt.Errorf("error start app: %w", err)) 525 } 526 527 stateDB := dbm.NewMemDB() 528 blockStore := store.NewBlockStore(dbm.NewMemDB()) 529 stateStore := sm.NewStore(stateDB, sm.StoreOptions{ 530 DiscardABCIResponses: false, 531 }) 532 state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) 533 if err != nil { 534 panic(fmt.Errorf("error constructing state from genesis file: %w", err)) 535 } 536 537 db := dbm.NewMemDB() 538 stateStore = sm.NewStore(db, sm.StoreOptions{ 539 DiscardABCIResponses: false, 540 }, 541 ) 542 blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), 543 mock.Mempool{}, sm.EmptyEvidencePool{}) 544 if err = stateStore.Save(state); err != nil { 545 panic(err) 546 } 547 548 // add blocks in 549 for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { 550 lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) 551 if blockHeight > 1 { 552 lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) 553 lastBlock := blockStore.LoadBlock(blockHeight - 1) 554 vote, err := types.MakeVote( 555 lastBlock.Header.Height, 556 lastBlockMeta.BlockID, 557 state.Validators, 558 privVals[0], 559 lastBlock.Header.ChainID, 560 time.Now(), 561 ) 562 if err != nil { 563 panic(err) 564 } 565 lastCommit = types.NewCommit(vote.Height, vote.Round, 566 lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) 567 } 568 569 thisBlock := makeBlock(privVals[0], blockHeight, state, lastCommit) 570 571 thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) 572 blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} 573 574 state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock, nil) 575 if err != nil { 576 panic(fmt.Errorf("error apply block: %w", err)) 577 } 578 579 blockStore.SaveBlock(thisBlock, thisParts, lastCommit) 580 } 581 return blockStore, state, blockExec 582 }