github.com/franono/tendermint@v0.32.2-0.20200527150959-749313264ce9/blockchain/v2/reactor_test.go (about) 1 package v2 2 3 import ( 4 "fmt" 5 "net" 6 "os" 7 "sort" 8 "sync" 9 "testing" 10 "time" 11 12 "github.com/stretchr/testify/assert" 13 dbm "github.com/tendermint/tm-db" 14 15 abci "github.com/franono/tendermint/abci/types" 16 "github.com/franono/tendermint/behaviour" 17 cfg "github.com/franono/tendermint/config" 18 "github.com/franono/tendermint/libs/log" 19 "github.com/franono/tendermint/libs/service" 20 "github.com/franono/tendermint/mempool/mock" 21 "github.com/franono/tendermint/p2p" 22 "github.com/franono/tendermint/p2p/conn" 23 "github.com/franono/tendermint/proxy" 24 sm "github.com/franono/tendermint/state" 25 "github.com/franono/tendermint/store" 26 "github.com/franono/tendermint/types" 27 tmtime "github.com/franono/tendermint/types/time" 28 ) 29 30 type mockPeer struct { 31 service.Service 32 id p2p.ID 33 } 34 35 func (mp mockPeer) FlushStop() {} 36 func (mp mockPeer) ID() p2p.ID { return mp.id } 37 func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } 38 func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } 39 40 func (mp mockPeer) IsOutbound() bool { return true } 41 func (mp mockPeer) IsPersistent() bool { return true } 42 func (mp mockPeer) CloseConn() error { return nil } 43 44 func (mp mockPeer) NodeInfo() p2p.NodeInfo { 45 return p2p.DefaultNodeInfo{ 46 DefaultNodeID: "", 47 ListenAddr: "", 48 } 49 } 50 func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } 51 func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } 52 53 func (mp mockPeer) Send(byte, []byte) bool { return true } 54 func (mp mockPeer) TrySend(byte, []byte) bool { return true } 55 56 func (mp mockPeer) Set(string, interface{}) {} 57 func (mp mockPeer) Get(string) interface{} { return struct{}{} } 58 59 //nolint:unused 60 type mockBlockStore struct { 61 blocks map[int64]*types.Block 62 } 63 64 func (ml *mockBlockStore) Height() int64 { 65 return int64(len(ml.blocks)) 66 } 67 68 func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { 69 return ml.blocks[height] 70 } 71 72 func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { 73 ml.blocks[block.Height] = block 74 } 75 76 type mockBlockApplier struct { 77 } 78 79 // XXX: Add whitelist/blacklist? 80 func (mba *mockBlockApplier) ApplyBlock( 81 state sm.State, blockID types.BlockID, block *types.Block, 82 ) (sm.State, int64, error) { 83 state.LastBlockHeight++ 84 return state, 0, nil 85 } 86 87 type mockSwitchIo struct { 88 mtx sync.Mutex 89 switchedToConsensus bool 90 numStatusResponse int 91 numBlockResponse int 92 numNoBlockResponse int 93 } 94 95 func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error { 96 return nil 97 } 98 99 func (sio *mockSwitchIo) sendStatusResponse(height int64, peerID p2p.ID) error { 100 sio.mtx.Lock() 101 defer sio.mtx.Unlock() 102 sio.numStatusResponse++ 103 return nil 104 } 105 106 func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { 107 sio.mtx.Lock() 108 defer sio.mtx.Unlock() 109 sio.numBlockResponse++ 110 return nil 111 } 112 113 func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error { 114 sio.mtx.Lock() 115 defer sio.mtx.Unlock() 116 sio.numNoBlockResponse++ 117 return nil 118 } 119 120 func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool { 121 sio.mtx.Lock() 122 defer sio.mtx.Unlock() 123 sio.switchedToConsensus = true 124 return true 125 } 126 127 func (sio *mockSwitchIo) broadcastStatusRequest(base int64, height int64) { 128 } 129 130 type testReactorParams struct { 131 logger log.Logger 132 genDoc *types.GenesisDoc 133 privVals []types.PrivValidator 134 startHeight int64 135 mockA bool 136 } 137 138 func newTestReactor(p testReactorParams) *BlockchainReactor { 139 store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight) 140 reporter := behaviour.NewMockReporter() 141 142 var appl blockApplier 143 144 if p.mockA { 145 appl = &mockBlockApplier{} 146 } else { 147 app := &testApp{} 148 cc := proxy.NewLocalClientCreator(app) 149 proxyApp := proxy.NewAppConns(cc) 150 err := proxyApp.Start() 151 if err != nil { 152 panic(fmt.Errorf("error start app: %w", err)) 153 } 154 db := dbm.NewMemDB() 155 appl = sm.NewBlockExecutor(db, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) 156 sm.SaveState(db, state) 157 } 158 159 r := newReactor(state, store, reporter, appl, true) 160 logger := log.TestingLogger() 161 r.SetLogger(logger.With("module", "blockchain")) 162 163 return r 164 } 165 166 // This test is left here and not deleted to retain the termination cases for 167 // future improvement in [#4482](https://github.com/franono/tendermint/issues/4482). 168 // func TestReactorTerminationScenarios(t *testing.T) { 169 170 // config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 171 // defer os.RemoveAll(config.RootDir) 172 // genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 173 // refStore, _, _ := newReactorStore(genDoc, privVals, 20) 174 175 // params := testReactorParams{ 176 // logger: log.TestingLogger(), 177 // genDoc: genDoc, 178 // privVals: privVals, 179 // startHeight: 10, 180 // bufferSize: 100, 181 // mockA: true, 182 // } 183 184 // type testEvent struct { 185 // evType string 186 // peer string 187 // height int64 188 // } 189 190 // tests := []struct { 191 // name string 192 // params testReactorParams 193 // msgs []testEvent 194 // }{ 195 // { 196 // name: "simple termination on max peer height - one peer", 197 // params: params, 198 // msgs: []testEvent{ 199 // {evType: "AddPeer", peer: "P1"}, 200 // {evType: "ReceiveS", peer: "P1", height: 13}, 201 // {evType: "BlockReq"}, 202 // {evType: "ReceiveB", peer: "P1", height: 11}, 203 // {evType: "BlockReq"}, 204 // {evType: "BlockReq"}, 205 // {evType: "ReceiveB", peer: "P1", height: 12}, 206 // {evType: "Process"}, 207 // {evType: "ReceiveB", peer: "P1", height: 13}, 208 // {evType: "Process"}, 209 // }, 210 // }, 211 // { 212 // name: "simple termination on max peer height - two peers", 213 // params: params, 214 // msgs: []testEvent{ 215 // {evType: "AddPeer", peer: "P1"}, 216 // {evType: "AddPeer", peer: "P2"}, 217 // {evType: "ReceiveS", peer: "P1", height: 13}, 218 // {evType: "ReceiveS", peer: "P2", height: 15}, 219 // {evType: "BlockReq"}, 220 // {evType: "BlockReq"}, 221 // {evType: "ReceiveB", peer: "P1", height: 11}, 222 // {evType: "ReceiveB", peer: "P2", height: 12}, 223 // {evType: "Process"}, 224 // {evType: "BlockReq"}, 225 // {evType: "BlockReq"}, 226 // {evType: "ReceiveB", peer: "P1", height: 13}, 227 // {evType: "Process"}, 228 // {evType: "ReceiveB", peer: "P2", height: 14}, 229 // {evType: "Process"}, 230 // {evType: "BlockReq"}, 231 // {evType: "ReceiveB", peer: "P2", height: 15}, 232 // {evType: "Process"}, 233 // }, 234 // }, 235 // { 236 // name: "termination on max peer height - two peers, noBlock error", 237 // params: params, 238 // msgs: []testEvent{ 239 // {evType: "AddPeer", peer: "P1"}, 240 // {evType: "AddPeer", peer: "P2"}, 241 // {evType: "ReceiveS", peer: "P1", height: 13}, 242 // {evType: "ReceiveS", peer: "P2", height: 15}, 243 // {evType: "BlockReq"}, 244 // {evType: "BlockReq"}, 245 // {evType: "ReceiveNB", peer: "P1", height: 11}, 246 // {evType: "BlockReq"}, 247 // {evType: "ReceiveB", peer: "P2", height: 12}, 248 // {evType: "ReceiveB", peer: "P2", height: 11}, 249 // {evType: "Process"}, 250 // {evType: "BlockReq"}, 251 // {evType: "BlockReq"}, 252 // {evType: "ReceiveB", peer: "P2", height: 13}, 253 // {evType: "Process"}, 254 // {evType: "ReceiveB", peer: "P2", height: 14}, 255 // {evType: "Process"}, 256 // {evType: "BlockReq"}, 257 // {evType: "ReceiveB", peer: "P2", height: 15}, 258 // {evType: "Process"}, 259 // }, 260 // }, 261 // { 262 // name: "termination on max peer height - two peers, remove one peer", 263 // params: params, 264 // msgs: []testEvent{ 265 // {evType: "AddPeer", peer: "P1"}, 266 // {evType: "AddPeer", peer: "P2"}, 267 // {evType: "ReceiveS", peer: "P1", height: 13}, 268 // {evType: "ReceiveS", peer: "P2", height: 15}, 269 // {evType: "BlockReq"}, 270 // {evType: "BlockReq"}, 271 // {evType: "RemovePeer", peer: "P1"}, 272 // {evType: "BlockReq"}, 273 // {evType: "ReceiveB", peer: "P2", height: 12}, 274 // {evType: "ReceiveB", peer: "P2", height: 11}, 275 // {evType: "Process"}, 276 // {evType: "BlockReq"}, 277 // {evType: "BlockReq"}, 278 // {evType: "ReceiveB", peer: "P2", height: 13}, 279 // {evType: "Process"}, 280 // {evType: "ReceiveB", peer: "P2", height: 14}, 281 // {evType: "Process"}, 282 // {evType: "BlockReq"}, 283 // {evType: "ReceiveB", peer: "P2", height: 15}, 284 // {evType: "Process"}, 285 // }, 286 // }, 287 // } 288 289 // for _, tt := range tests { 290 // tt := tt 291 // t.Run(tt.name, func(t *testing.T) { 292 // reactor := newTestReactor(params) 293 // reactor.Start() 294 // reactor.reporter = behaviour.NewMockReporter() 295 // mockSwitch := &mockSwitchIo{switchedToConsensus: false} 296 // reactor.io = mockSwitch 297 // // time for go routines to start 298 // time.Sleep(time.Millisecond) 299 300 // for _, step := range tt.msgs { 301 // switch step.evType { 302 // case "AddPeer": 303 // reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) 304 // case "RemovePeer": 305 // reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) 306 // case "ReceiveS": 307 // reactor.scheduler.send(bcStatusResponse{ 308 // peerID: p2p.ID(step.peer), 309 // height: step.height, 310 // time: time.Now(), 311 // }) 312 // case "ReceiveB": 313 // reactor.scheduler.send(bcBlockResponse{ 314 // peerID: p2p.ID(step.peer), 315 // block: refStore.LoadBlock(step.height), 316 // size: 10, 317 // time: time.Now(), 318 // }) 319 // case "ReceiveNB": 320 // reactor.scheduler.send(bcNoBlockResponse{ 321 // peerID: p2p.ID(step.peer), 322 // height: step.height, 323 // time: time.Now(), 324 // }) 325 // case "BlockReq": 326 // reactor.scheduler.send(rTrySchedule{time: time.Now()}) 327 // case "Process": 328 // reactor.processor.send(rProcessBlock{}) 329 // } 330 // // give time for messages to propagate between routines 331 // time.Sleep(time.Millisecond) 332 // } 333 334 // // time for processor to finish and reactor to switch to consensus 335 // time.Sleep(20 * time.Millisecond) 336 // assert.True(t, mockSwitch.hasSwitchedToConsensus()) 337 // reactor.Stop() 338 // }) 339 // } 340 // } 341 342 func TestReactorHelperMode(t *testing.T) { 343 var ( 344 channelID = byte(0x40) 345 ) 346 347 config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 348 defer os.RemoveAll(config.RootDir) 349 genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 350 351 params := testReactorParams{ 352 logger: log.TestingLogger(), 353 genDoc: genDoc, 354 privVals: privVals, 355 startHeight: 20, 356 mockA: true, 357 } 358 359 type testEvent struct { 360 peer string 361 event interface{} 362 } 363 364 tests := []struct { 365 name string 366 params testReactorParams 367 msgs []testEvent 368 }{ 369 { 370 name: "status request", 371 params: params, 372 msgs: []testEvent{ 373 {"P1", bcStatusRequestMessage{}}, 374 {"P1", bcBlockRequestMessage{Height: 13}}, 375 {"P1", bcBlockRequestMessage{Height: 20}}, 376 {"P1", bcBlockRequestMessage{Height: 22}}, 377 }, 378 }, 379 } 380 381 for _, tt := range tests { 382 tt := tt 383 t.Run(tt.name, func(t *testing.T) { 384 reactor := newTestReactor(params) 385 mockSwitch := &mockSwitchIo{switchedToConsensus: false} 386 reactor.io = mockSwitch 387 reactor.Start() 388 389 for i := 0; i < len(tt.msgs); i++ { 390 step := tt.msgs[i] 391 switch ev := step.event.(type) { 392 case bcStatusRequestMessage: 393 old := mockSwitch.numStatusResponse 394 reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) 395 assert.Equal(t, old+1, mockSwitch.numStatusResponse) 396 case bcBlockRequestMessage: 397 if ev.Height > params.startHeight { 398 old := mockSwitch.numNoBlockResponse 399 reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) 400 assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) 401 } else { 402 old := mockSwitch.numBlockResponse 403 reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) 404 assert.Equal(t, old+1, mockSwitch.numBlockResponse) 405 } 406 } 407 } 408 reactor.Stop() 409 }) 410 } 411 } 412 413 func TestReactorSetSwitchNil(t *testing.T) { 414 config := cfg.ResetTestRoot("blockchain_reactor_v2_test") 415 defer os.RemoveAll(config.RootDir) 416 genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) 417 418 reactor := newTestReactor(testReactorParams{ 419 logger: log.TestingLogger(), 420 genDoc: genDoc, 421 privVals: privVals, 422 }) 423 reactor.SetSwitch(nil) 424 425 assert.Nil(t, reactor.Switch) 426 assert.Nil(t, reactor.io) 427 } 428 429 //---------------------------------------------- 430 // utility funcs 431 432 func makeTxs(height int64) (txs []types.Tx) { 433 for i := 0; i < 10; i++ { 434 txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) 435 } 436 return txs 437 } 438 439 func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { 440 block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) 441 return block 442 } 443 444 type testApp struct { 445 abci.BaseApplication 446 } 447 448 func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) ( 449 *types.GenesisDoc, []types.PrivValidator) { 450 validators := make([]types.GenesisValidator, numValidators) 451 privValidators := make([]types.PrivValidator, numValidators) 452 for i := 0; i < numValidators; i++ { 453 val, privVal := types.RandValidator(randPower, minPower) 454 validators[i] = types.GenesisValidator{ 455 PubKey: val.PubKey, 456 Power: val.VotingPower, 457 } 458 privValidators[i] = privVal 459 } 460 sort.Sort(types.PrivValidatorsByAddress(privValidators)) 461 462 return &types.GenesisDoc{ 463 GenesisTime: tmtime.Now(), 464 ChainID: chainID, 465 Validators: validators, 466 }, privValidators 467 } 468 469 // Why are we importing the entire blockExecutor dependency graph here 470 // when we have the facilities to 471 func newReactorStore( 472 genDoc *types.GenesisDoc, 473 privVals []types.PrivValidator, 474 maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) { 475 if len(privVals) != 1 { 476 panic("only support one validator") 477 } 478 app := &testApp{} 479 cc := proxy.NewLocalClientCreator(app) 480 proxyApp := proxy.NewAppConns(cc) 481 err := proxyApp.Start() 482 if err != nil { 483 panic(fmt.Errorf("error start app: %w", err)) 484 } 485 486 stateDB := dbm.NewMemDB() 487 blockStore := store.NewBlockStore(dbm.NewMemDB()) 488 489 state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) 490 if err != nil { 491 panic(fmt.Errorf("error constructing state from genesis file: %w", err)) 492 } 493 494 db := dbm.NewMemDB() 495 blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), 496 mock.Mempool{}, sm.MockEvidencePool{}) 497 sm.SaveState(db, state) 498 499 // add blocks in 500 for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { 501 lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) 502 if blockHeight > 1 { 503 lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) 504 lastBlock := blockStore.LoadBlock(blockHeight - 1) 505 vote, err := types.MakeVote( 506 lastBlock.Header.Height, 507 lastBlockMeta.BlockID, 508 state.Validators, 509 privVals[0], 510 lastBlock.Header.ChainID, 511 time.Now(), 512 ) 513 if err != nil { 514 panic(err) 515 } 516 lastCommit = types.NewCommit(vote.Height, vote.Round, 517 lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) 518 } 519 520 thisBlock := makeBlock(blockHeight, state, lastCommit) 521 522 thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) 523 blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} 524 525 state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) 526 if err != nil { 527 panic(fmt.Errorf("error apply block: %w", err)) 528 } 529 530 blockStore.SaveBlock(thisBlock, thisParts, lastCommit) 531 } 532 return blockStore, state, blockExec 533 }