github.com/evdatsion/aphelion-dpos-bft@v0.32.1/consensus/reactor_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"runtime"
     9  	"runtime/pprof"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	abcicli "github.com/evdatsion/aphelion-dpos-bft/abci/client"
    18  	"github.com/evdatsion/aphelion-dpos-bft/abci/example/kvstore"
    19  	abci "github.com/evdatsion/aphelion-dpos-bft/abci/types"
    20  	bc "github.com/evdatsion/aphelion-dpos-bft/blockchain"
    21  	cfg "github.com/evdatsion/aphelion-dpos-bft/config"
    22  	dbm "github.com/evdatsion/aphelion-dpos-bft/libs/db"
    23  	"github.com/evdatsion/aphelion-dpos-bft/libs/log"
    24  	mempl "github.com/evdatsion/aphelion-dpos-bft/mempool"
    25  	"github.com/evdatsion/aphelion-dpos-bft/p2p"
    26  	"github.com/evdatsion/aphelion-dpos-bft/p2p/mock"
    27  	sm "github.com/evdatsion/aphelion-dpos-bft/state"
    28  	"github.com/evdatsion/aphelion-dpos-bft/types"
    29  )
    30  
    31  //----------------------------------------------
    32  // in-process testnets
    33  
    34  func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
    35  	[]*ConsensusReactor,
    36  	[]types.Subscription,
    37  	[]*types.EventBus,
    38  ) {
    39  	reactors := make([]*ConsensusReactor, N)
    40  	blocksSubs := make([]types.Subscription, 0)
    41  	eventBuses := make([]*types.EventBus, N)
    42  	for i := 0; i < N; i++ {
    43  		/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
    44  		if err != nil {	t.Fatal(err)}*/
    45  		reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
    46  		reactors[i].SetLogger(css[i].Logger)
    47  
    48  		// eventBus is already started with the cs
    49  		eventBuses[i] = css[i].eventBus
    50  		reactors[i].SetEventBus(eventBuses[i])
    51  
    52  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
    53  		require.NoError(t, err)
    54  		blocksSubs = append(blocksSubs, blocksSub)
    55  
    56  		if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake
    57  			sm.SaveState(css[i].blockExec.DB(), css[i].state)
    58  		}
    59  	}
    60  	// make connected switches and start all reactors
    61  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
    62  		s.AddReactor("CONSENSUS", reactors[i])
    63  		s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
    64  		return s
    65  	}, p2p.Connect2Switches)
    66  
    67  	// now that everyone is connected,  start the state machines
    68  	// If we started the state machines before everyone was connected,
    69  	// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
    70  	// TODO: is this still true with new pubsub?
    71  	for i := 0; i < N; i++ {
    72  		s := reactors[i].conS.GetState()
    73  		reactors[i].SwitchToConsensus(s, 0)
    74  	}
    75  	return reactors, blocksSubs, eventBuses
    76  }
    77  
    78  func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) {
    79  	logger.Info("stopConsensusNet", "n", len(reactors))
    80  	for i, r := range reactors {
    81  		logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i)
    82  		r.Switch.Stop()
    83  	}
    84  	for i, b := range eventBuses {
    85  		logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
    86  		b.Stop()
    87  	}
    88  	logger.Info("stopConsensusNet: DONE", "n", len(reactors))
    89  }
    90  
    91  // Ensure a testnet makes blocks
    92  func TestReactorBasic(t *testing.T) {
    93  	N := 4
    94  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
    95  	defer cleanup()
    96  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
    97  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
    98  	// wait till everyone makes the first new block
    99  	timeoutWaitGroup(t, N, func(j int) {
   100  		<-blocksSubs[j].Out()
   101  	}, css)
   102  }
   103  
   104  // Ensure we can process blocks with evidence
   105  func TestReactorWithEvidence(t *testing.T) {
   106  	types.RegisterMockEvidences(cdc)
   107  	types.RegisterMockEvidences(types.GetCodec())
   108  
   109  	nValidators := 4
   110  	testName := "consensus_reactor_test"
   111  	tickerFunc := newMockTickerFunc(true)
   112  	appFunc := newCounter
   113  
   114  	// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
   115  	// to unroll unwieldy abstractions. Here we duplicate the code from:
   116  	// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   117  
   118  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
   119  	css := make([]*ConsensusState, nValidators)
   120  	logger := consensusLogger()
   121  	for i := 0; i < nValidators; i++ {
   122  		stateDB := dbm.NewMemDB() // each state needs its own db
   123  		state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
   124  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
   125  		defer os.RemoveAll(thisConfig.RootDir)
   126  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
   127  		app := appFunc()
   128  		vals := types.TM2PB.ValidatorUpdates(state.Validators)
   129  		app.InitChain(abci.RequestInitChain{Validators: vals})
   130  
   131  		pv := privVals[i]
   132  		// duplicate code from:
   133  		// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
   134  
   135  		blockDB := dbm.NewMemDB()
   136  		blockStore := bc.NewBlockStore(blockDB)
   137  
   138  		// one for mempool, one for consensus
   139  		mtx := new(sync.Mutex)
   140  		proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
   141  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
   142  
   143  		// Make Mempool
   144  		mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
   145  		mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
   146  		if thisConfig.Consensus.WaitForTxs() {
   147  			mempool.EnableTxsAvailable()
   148  		}
   149  
   150  		// mock the evidence pool
   151  		// everyone includes evidence of another double signing
   152  		vIdx := (i + 1) % nValidators
   153  		addr := privVals[vIdx].GetPubKey().Address()
   154  		evpool := newMockEvidencePool(addr)
   155  
   156  		// Make ConsensusState
   157  		blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
   158  		cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
   159  		cs.SetLogger(log.TestingLogger().With("module", "consensus"))
   160  		cs.SetPrivValidator(pv)
   161  
   162  		eventBus := types.NewEventBus()
   163  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
   164  		eventBus.Start()
   165  		cs.SetEventBus(eventBus)
   166  
   167  		cs.SetTimeoutTicker(tickerFunc())
   168  		cs.SetLogger(logger.With("validator", i, "module", "consensus"))
   169  
   170  		css[i] = cs
   171  	}
   172  
   173  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
   174  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   175  
   176  	// wait till everyone makes the first new block with no evidence
   177  	timeoutWaitGroup(t, nValidators, func(j int) {
   178  		msg := <-blocksSubs[j].Out()
   179  		block := msg.Data().(types.EventDataNewBlock).Block
   180  		assert.True(t, len(block.Evidence.Evidence) == 0)
   181  	}, css)
   182  
   183  	// second block should have evidence
   184  	timeoutWaitGroup(t, nValidators, func(j int) {
   185  		msg := <-blocksSubs[j].Out()
   186  		block := msg.Data().(types.EventDataNewBlock).Block
   187  		assert.True(t, len(block.Evidence.Evidence) > 0)
   188  	}, css)
   189  }
   190  
   191  // mock evidence pool returns no evidence for block 1,
   192  // and returnes one piece for all higher blocks. The one piece
   193  // is for a given validator at block 1.
   194  type mockEvidencePool struct {
   195  	height int
   196  	ev     []types.Evidence
   197  }
   198  
   199  func newMockEvidencePool(val []byte) *mockEvidencePool {
   200  	return &mockEvidencePool{
   201  		ev: []types.Evidence{types.NewMockGoodEvidence(1, 1, val)},
   202  	}
   203  }
   204  
   205  // NOTE: maxBytes is ignored
   206  func (m *mockEvidencePool) PendingEvidence(maxBytes int64) []types.Evidence {
   207  	if m.height > 0 {
   208  		return m.ev
   209  	}
   210  	return nil
   211  }
   212  func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil }
   213  func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
   214  	if m.height > 0 {
   215  		if len(block.Evidence.Evidence) == 0 {
   216  			panic("block has no evidence")
   217  		}
   218  	}
   219  	m.height++
   220  }
   221  func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
   222  
   223  //------------------------------------
   224  
   225  // Ensure a testnet makes blocks when there are txs
   226  func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
   227  	N := 4
   228  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
   229  		func(c *cfg.Config) {
   230  			c.Consensus.CreateEmptyBlocks = false
   231  		})
   232  	defer cleanup()
   233  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   234  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   235  
   236  	// send a tx
   237  	if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil {
   238  		//t.Fatal(err)
   239  	}
   240  
   241  	// wait till everyone makes the first new block
   242  	timeoutWaitGroup(t, N, func(j int) {
   243  		<-blocksSubs[j].Out()
   244  	}, css)
   245  }
   246  
   247  func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
   248  	N := 1
   249  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   250  	defer cleanup()
   251  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   252  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   253  
   254  	var (
   255  		reactor = reactors[0]
   256  		peer    = mock.NewPeer(nil)
   257  		msg     = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
   258  	)
   259  
   260  	reactor.InitPeer(peer)
   261  
   262  	// simulate switch calling Receive before AddPeer
   263  	assert.NotPanics(t, func() {
   264  		reactor.Receive(StateChannel, peer, msg)
   265  		reactor.AddPeer(peer)
   266  	})
   267  }
   268  
   269  func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
   270  	N := 1
   271  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   272  	defer cleanup()
   273  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   274  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   275  
   276  	var (
   277  		reactor = reactors[0]
   278  		peer    = mock.NewPeer(nil)
   279  		msg     = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
   280  	)
   281  
   282  	// we should call InitPeer here
   283  
   284  	// simulate switch calling Receive before AddPeer
   285  	assert.Panics(t, func() {
   286  		reactor.Receive(StateChannel, peer, msg)
   287  	})
   288  }
   289  
   290  // Test we record stats about votes and block parts from other peers.
   291  func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
   292  	N := 4
   293  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   294  	defer cleanup()
   295  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   296  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   297  
   298  	// wait till everyone makes the first new block
   299  	timeoutWaitGroup(t, N, func(j int) {
   300  		<-blocksSubs[j].Out()
   301  	}, css)
   302  
   303  	// Get peer
   304  	peer := reactors[1].Switch.Peers().List()[0]
   305  	// Get peer state
   306  	ps := peer.Get(types.PeerStateKey).(*PeerState)
   307  
   308  	assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
   309  	assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased")
   310  }
   311  
   312  //-------------------------------------------------------------
   313  // ensure we can make blocks despite cycling a validator set
   314  
   315  func TestReactorVotingPowerChange(t *testing.T) {
   316  	nVals := 4
   317  	logger := log.TestingLogger()
   318  	css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
   319  	defer cleanup()
   320  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
   321  	defer stopConsensusNet(logger, reactors, eventBuses)
   322  
   323  	// map of active validators
   324  	activeVals := make(map[string]struct{})
   325  	for i := 0; i < nVals; i++ {
   326  		addr := css[i].privValidator.GetPubKey().Address()
   327  		activeVals[string(addr)] = struct{}{}
   328  	}
   329  
   330  	// wait till everyone makes block 1
   331  	timeoutWaitGroup(t, nVals, func(j int) {
   332  		<-blocksSubs[j].Out()
   333  	}, css)
   334  
   335  	//---------------------------------------------------------------------------
   336  	logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
   337  
   338  	val1PubKey := css[0].privValidator.GetPubKey()
   339  	val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey)
   340  	updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
   341  	previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
   342  
   343  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   344  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   345  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   346  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   347  
   348  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   349  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   350  	}
   351  
   352  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
   353  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   354  
   355  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   356  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   357  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   358  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   359  
   360  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   361  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   362  	}
   363  
   364  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
   365  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   366  
   367  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   368  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   369  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   370  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   371  
   372  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   373  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   374  	}
   375  }
   376  
   377  func TestReactorValidatorSetChanges(t *testing.T) {
   378  	nPeers := 7
   379  	nVals := 4
   380  	css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
   381  
   382  	defer cleanup()
   383  	logger := log.TestingLogger()
   384  
   385  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
   386  	defer stopConsensusNet(logger, reactors, eventBuses)
   387  
   388  	// map of active validators
   389  	activeVals := make(map[string]struct{})
   390  	for i := 0; i < nVals; i++ {
   391  		addr := css[i].privValidator.GetPubKey().Address()
   392  		activeVals[string(addr)] = struct{}{}
   393  	}
   394  
   395  	// wait till everyone makes block 1
   396  	timeoutWaitGroup(t, nPeers, func(j int) {
   397  		<-blocksSubs[j].Out()
   398  	}, css)
   399  
   400  	//---------------------------------------------------------------------------
   401  	logger.Info("---------------------------- Testing adding one validator")
   402  
   403  	newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
   404  	valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
   405  	newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
   406  
   407  	// wait till everyone makes block 2
   408  	// ensure the commit includes all validators
   409  	// send newValTx to change vals in block 3
   410  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   411  
   412  	// wait till everyone makes block 3.
   413  	// it includes the commit for block 2, which is by the original validator set
   414  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   415  
   416  	// wait till everyone makes block 4.
   417  	// it includes the commit for block 3, which is by the original validator set
   418  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   419  
   420  	// the commits for block 4 should be with the updated validator set
   421  	activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
   422  
   423  	// wait till everyone makes block 5
   424  	// it includes the commit for block 4, which should have the updated validator set
   425  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   426  
   427  	//---------------------------------------------------------------------------
   428  	logger.Info("---------------------------- Testing changing the voting power of one validator")
   429  
   430  	updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
   431  	updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
   432  	updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
   433  	previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
   434  
   435  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   436  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   437  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   438  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   439  
   440  	if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   441  		t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
   442  	}
   443  
   444  	//---------------------------------------------------------------------------
   445  	logger.Info("---------------------------- Testing adding two validators at once")
   446  
   447  	newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
   448  	newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
   449  	newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
   450  
   451  	newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
   452  	newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
   453  	newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
   454  
   455  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   456  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   457  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   458  	activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
   459  	activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
   460  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   461  
   462  	//---------------------------------------------------------------------------
   463  	logger.Info("---------------------------- Testing removing two validators at once")
   464  
   465  	removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
   466  	removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
   467  
   468  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   469  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   470  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   471  	delete(activeVals, string(newValidatorPubKey2.Address()))
   472  	delete(activeVals, string(newValidatorPubKey3.Address()))
   473  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   474  }
   475  
   476  // Check we can make blocks with skip_timeout_commit=false
   477  func TestReactorWithTimeoutCommit(t *testing.T) {
   478  	N := 4
   479  	css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
   480  	defer cleanup()
   481  	// override default SkipTimeoutCommit == true for tests
   482  	for i := 0; i < N; i++ {
   483  		css[i].config.SkipTimeoutCommit = false
   484  	}
   485  
   486  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
   487  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   488  
   489  	// wait till everyone makes the first new block
   490  	timeoutWaitGroup(t, N-1, func(j int) {
   491  		<-blocksSubs[j].Out()
   492  	}, css)
   493  }
   494  
   495  func waitForAndValidateBlock(
   496  	t *testing.T,
   497  	n int,
   498  	activeVals map[string]struct{},
   499  	blocksSubs []types.Subscription,
   500  	css []*ConsensusState,
   501  	txs ...[]byte,
   502  ) {
   503  	timeoutWaitGroup(t, n, func(j int) {
   504  		css[j].Logger.Debug("waitForAndValidateBlock")
   505  		msg := <-blocksSubs[j].Out()
   506  		newBlock := msg.Data().(types.EventDataNewBlock).Block
   507  		css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
   508  		err := validateBlock(newBlock, activeVals)
   509  		assert.Nil(t, err)
   510  		for _, tx := range txs {
   511  			err := assertMempool(css[j].txNotifier).CheckTx(tx, nil)
   512  			assert.Nil(t, err)
   513  		}
   514  	}, css)
   515  }
   516  
   517  func waitForAndValidateBlockWithTx(
   518  	t *testing.T,
   519  	n int,
   520  	activeVals map[string]struct{},
   521  	blocksSubs []types.Subscription,
   522  	css []*ConsensusState,
   523  	txs ...[]byte,
   524  ) {
   525  	timeoutWaitGroup(t, n, func(j int) {
   526  		ntxs := 0
   527  	BLOCK_TX_LOOP:
   528  		for {
   529  			css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
   530  			msg := <-blocksSubs[j].Out()
   531  			newBlock := msg.Data().(types.EventDataNewBlock).Block
   532  			css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
   533  			err := validateBlock(newBlock, activeVals)
   534  			assert.Nil(t, err)
   535  
   536  			// check that txs match the txs we're waiting for.
   537  			// note they could be spread over multiple blocks,
   538  			// but they should be in order.
   539  			for _, tx := range newBlock.Data.Txs {
   540  				assert.EqualValues(t, txs[ntxs], tx)
   541  				ntxs++
   542  			}
   543  
   544  			if ntxs == len(txs) {
   545  				break BLOCK_TX_LOOP
   546  			}
   547  		}
   548  
   549  	}, css)
   550  }
   551  
   552  func waitForBlockWithUpdatedValsAndValidateIt(
   553  	t *testing.T,
   554  	n int,
   555  	updatedVals map[string]struct{},
   556  	blocksSubs []types.Subscription,
   557  	css []*ConsensusState,
   558  ) {
   559  	timeoutWaitGroup(t, n, func(j int) {
   560  
   561  		var newBlock *types.Block
   562  	LOOP:
   563  		for {
   564  			css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
   565  			msg := <-blocksSubs[j].Out()
   566  			newBlock = msg.Data().(types.EventDataNewBlock).Block
   567  			if newBlock.LastCommit.Size() == len(updatedVals) {
   568  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
   569  				break LOOP
   570  			} else {
   571  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height)
   572  			}
   573  		}
   574  
   575  		err := validateBlock(newBlock, updatedVals)
   576  		assert.Nil(t, err)
   577  	}, css)
   578  }
   579  
   580  // expects high synchrony!
   581  func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
   582  	if block.LastCommit.Size() != len(activeVals) {
   583  		return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
   584  	}
   585  
   586  	for _, vote := range block.LastCommit.Precommits {
   587  		if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok {
   588  			return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress)
   589  		}
   590  	}
   591  	return nil
   592  }
   593  
   594  func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) {
   595  	wg := new(sync.WaitGroup)
   596  	wg.Add(n)
   597  	for i := 0; i < n; i++ {
   598  		go func(j int) {
   599  			f(j)
   600  			wg.Done()
   601  		}(i)
   602  	}
   603  
   604  	done := make(chan struct{})
   605  	go func() {
   606  		wg.Wait()
   607  		close(done)
   608  	}()
   609  
   610  	// we're running many nodes in-process, possibly in in a virtual machine,
   611  	// and spewing debug messages - making a block could take a while,
   612  	timeout := time.Second * 300
   613  
   614  	select {
   615  	case <-done:
   616  	case <-time.After(timeout):
   617  		for i, cs := range css {
   618  			t.Log("#################")
   619  			t.Log("Validator", i)
   620  			t.Log(cs.GetRoundState())
   621  			t.Log("")
   622  		}
   623  		os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
   624  		pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
   625  		capture()
   626  		panic("Timed out waiting for all validators to commit a block")
   627  	}
   628  }
   629  
   630  func capture() {
   631  	trace := make([]byte, 10240000)
   632  	count := runtime.Stack(trace, true)
   633  	fmt.Printf("Stack of %d bytes: %s\n", count, trace)
   634  }