github.com/arcology-network/consensus-engine@v1.9.0/consensus/reactor_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"runtime"
     9  	"runtime/pprof"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/mock"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	dbm "github.com/tendermint/tm-db"
    19  
    20  	abcicli "github.com/arcology-network/consensus-engine/abci/client"
    21  	"github.com/arcology-network/consensus-engine/abci/example/kvstore"
    22  	abci "github.com/arcology-network/consensus-engine/abci/types"
    23  	cfg "github.com/arcology-network/consensus-engine/config"
    24  	cstypes "github.com/arcology-network/consensus-engine/consensus/types"
    25  	cryptoenc "github.com/arcology-network/consensus-engine/crypto/encoding"
    26  	"github.com/arcology-network/consensus-engine/crypto/tmhash"
    27  	"github.com/arcology-network/consensus-engine/libs/bits"
    28  	"github.com/arcology-network/consensus-engine/libs/bytes"
    29  	"github.com/arcology-network/consensus-engine/libs/log"
    30  	tmsync "github.com/arcology-network/consensus-engine/libs/sync"
    31  	mempl "github.com/arcology-network/consensus-engine/mempool"
    32  	"github.com/arcology-network/consensus-engine/p2p"
    33  	p2pmock "github.com/arcology-network/consensus-engine/p2p/mock"
    34  	tmproto "github.com/arcology-network/consensus-engine/proto/tendermint/types"
    35  	sm "github.com/arcology-network/consensus-engine/state"
    36  	statemocks "github.com/arcology-network/consensus-engine/state/mocks"
    37  	"github.com/arcology-network/consensus-engine/store"
    38  	"github.com/arcology-network/consensus-engine/types"
    39  )
    40  
    41  //----------------------------------------------
    42  // in-process testnets
    43  
    44  var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
    45  
    46  func startConsensusNet(t *testing.T, css []*State, n int) (
    47  	[]*Reactor,
    48  	[]types.Subscription,
    49  	[]*types.EventBus,
    50  ) {
    51  	reactors := make([]*Reactor, n)
    52  	blocksSubs := make([]types.Subscription, 0)
    53  	eventBuses := make([]*types.EventBus, n)
    54  	for i := 0; i < n; i++ {
    55  		/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
    56  		if err != nil {	t.Fatal(err)}*/
    57  		reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
    58  		reactors[i].SetLogger(css[i].Logger)
    59  
    60  		// eventBus is already started with the cs
    61  		eventBuses[i] = css[i].eventBus
    62  		reactors[i].SetEventBus(eventBuses[i])
    63  
    64  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
    65  		require.NoError(t, err)
    66  		blocksSubs = append(blocksSubs, blocksSub)
    67  
    68  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
    69  			if err := css[i].blockExec.Store().Save(css[i].state); err != nil {
    70  				t.Error(err)
    71  			}
    72  
    73  		}
    74  	}
    75  	// make connected switches and start all reactors
    76  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
    77  		s.AddReactor("CONSENSUS", reactors[i])
    78  		s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
    79  		return s
    80  	}, p2p.Connect2Switches)
    81  
    82  	// now that everyone is connected,  start the state machines
    83  	// If we started the state machines before everyone was connected,
    84  	// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
    85  	// TODO: is this still true with new pubsub?
    86  	for i := 0; i < n; i++ {
    87  		s := reactors[i].conS.GetState()
    88  		reactors[i].SwitchToConsensus(s, false)
    89  	}
    90  	return reactors, blocksSubs, eventBuses
    91  }
    92  
    93  func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) {
    94  	logger.Info("stopConsensusNet", "n", len(reactors))
    95  	for i, r := range reactors {
    96  		logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
    97  		if err := r.Switch.Stop(); err != nil {
    98  			logger.Error("error trying to stop switch", "error", err)
    99  		}
   100  	}
   101  	for i, b := range eventBuses {
   102  		logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
   103  		if err := b.Stop(); err != nil {
   104  			logger.Error("error trying to stop eventbus", "error", err)
   105  		}
   106  	}
   107  	logger.Info("stopConsensusNet: DONE", "n", len(reactors))
   108  }
   109  
   110  // Ensure a testnet makes blocks
   111  func TestReactorBasic(t *testing.T) {
   112  	N := 4
   113  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   114  	defer cleanup()
   115  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   116  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   117  	// wait till everyone makes the first new block
   118  	timeoutWaitGroup(t, N, func(j int) {
   119  		<-blocksSubs[j].Out()
   120  		<-blocksSubs[j].Out()
   121  	}, css)
   122  }
   123  
   124  // Ensure we can process blocks with evidence
   125  func TestReactorWithEvidence(t *testing.T) {
   126  	nValidators := 4
   127  	testName := "consensus_reactor_test"
   128  	tickerFunc := newMockTickerFunc(true)
   129  	appFunc := newCounter
   130  
   131  	// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
   132  	// to unroll unwieldy abstractions. Here we duplicate the code from:
   133  	// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   134  
   135  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
   136  	css := make([]*State, nValidators)
   137  	logger := consensusLogger()
   138  	for i := 0; i < nValidators; i++ {
   139  		stateDB := dbm.NewMemDB() // each state needs its own db
   140  		stateStore := sm.NewStore(stateDB)
   141  		state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
   142  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
   143  		defer os.RemoveAll(thisConfig.RootDir)
   144  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
   145  		app := appFunc()
   146  		vals := types.TM2PB.ValidatorUpdates(state.Validators)
   147  		app.InitChain(abci.RequestInitChain{Validators: vals})
   148  
   149  		pv := privVals[i]
   150  		// duplicate code from:
   151  		// css[i] = newStateWithConfig(thisConfig, state, privVals[i], app)
   152  
   153  		blockDB := dbm.NewMemDB()
   154  		blockStore := store.NewBlockStore(blockDB)
   155  
   156  		// one for mempool, one for consensus
   157  		mtx := new(tmsync.Mutex)
   158  		proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
   159  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
   160  
   161  		// Make Mempool
   162  		mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
   163  		mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
   164  		if thisConfig.Consensus.WaitForTxs() {
   165  			mempool.EnableTxsAvailable()
   166  		}
   167  
   168  		// mock the evidence pool
   169  		// everyone includes evidence of another double signing
   170  		vIdx := (i + 1) % nValidators
   171  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
   172  		evpool := &statemocks.EvidencePool{}
   173  		evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
   174  		evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
   175  			ev}, int64(len(ev.Bytes())))
   176  		evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
   177  
   178  		evpool2 := sm.EmptyEvidencePool{}
   179  
   180  		// Make State
   181  		blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
   182  		cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
   183  		cs.SetLogger(log.TestingLogger().With("module", "consensus"))
   184  		cs.SetPrivValidator(pv)
   185  
   186  		eventBus := types.NewEventBus()
   187  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
   188  		err := eventBus.Start()
   189  		require.NoError(t, err)
   190  		cs.SetEventBus(eventBus)
   191  
   192  		cs.SetTimeoutTicker(tickerFunc())
   193  		cs.SetLogger(logger.With("validator", i, "module", "consensus"))
   194  
   195  		css[i] = cs
   196  	}
   197  
   198  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
   199  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   200  
   201  	// we expect for each validator that is the proposer to propose one piece of evidence.
   202  	for i := 0; i < nValidators; i++ {
   203  		timeoutWaitGroup(t, nValidators, func(j int) {
   204  			msg := <-blocksSubs[j].Out()
   205  			block := msg.Data().(types.EventDataNewBlock).Block
   206  			assert.Len(t, block.Evidence.Evidence, 1)
   207  		}, css)
   208  	}
   209  }
   210  
   211  //------------------------------------
   212  
   213  // Ensure a testnet makes blocks when there are txs
   214  func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
   215  	N := 4
   216  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
   217  		func(c *cfg.Config) {
   218  			c.Consensus.CreateEmptyBlocks = false
   219  		})
   220  	defer cleanup()
   221  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   222  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   223  
   224  	// send a tx
   225  	if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil {
   226  		t.Error(err)
   227  	}
   228  
   229  	// wait till everyone makes the first new block
   230  	timeoutWaitGroup(t, N, func(j int) {
   231  		<-blocksSubs[j].Out()
   232  	}, css)
   233  }
   234  
   235  func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
   236  	N := 1
   237  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   238  	defer cleanup()
   239  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   240  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   241  
   242  	var (
   243  		reactor = reactors[0]
   244  		peer    = p2pmock.NewPeer(nil)
   245  		msg     = MustEncode(&HasVoteMessage{Height: 1,
   246  			Round: 1, Index: 1, Type: tmproto.PrevoteType})
   247  	)
   248  
   249  	reactor.InitPeer(peer)
   250  
   251  	// simulate switch calling Receive before AddPeer
   252  	assert.NotPanics(t, func() {
   253  		reactor.Receive(StateChannel, peer, msg)
   254  		reactor.AddPeer(peer)
   255  	})
   256  }
   257  
   258  func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
   259  	N := 1
   260  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   261  	defer cleanup()
   262  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   263  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   264  
   265  	var (
   266  		reactor = reactors[0]
   267  		peer    = p2pmock.NewPeer(nil)
   268  		msg     = MustEncode(&HasVoteMessage{Height: 1,
   269  			Round: 1, Index: 1, Type: tmproto.PrevoteType})
   270  	)
   271  
   272  	// we should call InitPeer here
   273  
   274  	// simulate switch calling Receive before AddPeer
   275  	assert.Panics(t, func() {
   276  		reactor.Receive(StateChannel, peer, msg)
   277  	})
   278  }
   279  
   280  // Test we record stats about votes and block parts from other peers.
   281  func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
   282  	N := 4
   283  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   284  	defer cleanup()
   285  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   286  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   287  
   288  	// wait till everyone makes the first new block
   289  	timeoutWaitGroup(t, N, func(j int) {
   290  		<-blocksSubs[j].Out()
   291  	}, css)
   292  
   293  	// Get peer
   294  	peer := reactors[1].Switch.Peers().List()[0]
   295  	// Get peer state
   296  	ps := peer.Get(types.PeerStateKey).(*PeerState)
   297  
   298  	assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
   299  	assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased")
   300  }
   301  
   302  //-------------------------------------------------------------
   303  // ensure we can make blocks despite cycling a validator set
   304  
   305  func TestReactorVotingPowerChange(t *testing.T) {
   306  	nVals := 4
   307  	logger := log.TestingLogger()
   308  	css, cleanup := randConsensusNet(
   309  		nVals,
   310  		"consensus_voting_power_changes_test",
   311  		newMockTickerFunc(true),
   312  		newPersistentKVStore)
   313  	defer cleanup()
   314  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
   315  	defer stopConsensusNet(logger, reactors, eventBuses)
   316  
   317  	// map of active validators
   318  	activeVals := make(map[string]struct{})
   319  	for i := 0; i < nVals; i++ {
   320  		pubKey, err := css[i].privValidator.GetPubKey()
   321  		require.NoError(t, err)
   322  		addr := pubKey.Address()
   323  		activeVals[string(addr)] = struct{}{}
   324  	}
   325  
   326  	// wait till everyone makes block 1
   327  	timeoutWaitGroup(t, nVals, func(j int) {
   328  		<-blocksSubs[j].Out()
   329  	}, css)
   330  
   331  	//---------------------------------------------------------------------------
   332  	logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
   333  
   334  	val1PubKey, err := css[0].privValidator.GetPubKey()
   335  	require.NoError(t, err)
   336  
   337  	val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey)
   338  	require.NoError(t, err)
   339  	updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
   340  	previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
   341  
   342  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   343  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   344  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   345  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   346  
   347  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   348  		t.Fatalf(
   349  			"expected voting power to change (before: %d, after: %d)",
   350  			previousTotalVotingPower,
   351  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   352  	}
   353  
   354  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
   355  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   356  
   357  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   358  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   359  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   360  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   361  
   362  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   363  		t.Fatalf(
   364  			"expected voting power to change (before: %d, after: %d)",
   365  			previousTotalVotingPower,
   366  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   367  	}
   368  
   369  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
   370  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   371  
   372  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   373  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   374  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   375  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   376  
   377  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   378  		t.Fatalf(
   379  			"expected voting power to change (before: %d, after: %d)",
   380  			previousTotalVotingPower,
   381  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   382  	}
   383  }
   384  
   385  func TestReactorValidatorSetChanges(t *testing.T) {
   386  	nPeers := 7
   387  	nVals := 4
   388  	css, _, _, cleanup := randConsensusNetWithPeers(
   389  		nVals,
   390  		nPeers,
   391  		"consensus_val_set_changes_test",
   392  		newMockTickerFunc(true),
   393  		newPersistentKVStoreWithPath)
   394  
   395  	defer cleanup()
   396  	logger := log.TestingLogger()
   397  
   398  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
   399  	defer stopConsensusNet(logger, reactors, eventBuses)
   400  
   401  	// map of active validators
   402  	activeVals := make(map[string]struct{})
   403  	for i := 0; i < nVals; i++ {
   404  		pubKey, err := css[i].privValidator.GetPubKey()
   405  		require.NoError(t, err)
   406  		activeVals[string(pubKey.Address())] = struct{}{}
   407  	}
   408  
   409  	// wait till everyone makes block 1
   410  	timeoutWaitGroup(t, nPeers, func(j int) {
   411  		<-blocksSubs[j].Out()
   412  	}, css)
   413  
   414  	//---------------------------------------------------------------------------
   415  	logger.Info("---------------------------- Testing adding one validator")
   416  
   417  	newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
   418  	assert.NoError(t, err)
   419  	valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
   420  	assert.NoError(t, err)
   421  	newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
   422  
   423  	// wait till everyone makes block 2
   424  	// ensure the commit includes all validators
   425  	// send newValTx to change vals in block 3
   426  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   427  
   428  	// wait till everyone makes block 3.
   429  	// it includes the commit for block 2, which is by the original validator set
   430  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   431  
   432  	// wait till everyone makes block 4.
   433  	// it includes the commit for block 3, which is by the original validator set
   434  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   435  
   436  	// the commits for block 4 should be with the updated validator set
   437  	activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
   438  
   439  	// wait till everyone makes block 5
   440  	// it includes the commit for block 4, which should have the updated validator set
   441  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   442  
   443  	//---------------------------------------------------------------------------
   444  	logger.Info("---------------------------- Testing changing the voting power of one validator")
   445  
   446  	updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
   447  	require.NoError(t, err)
   448  	updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
   449  	require.NoError(t, err)
   450  	updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
   451  	previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
   452  
   453  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   454  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   455  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   456  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   457  
   458  	if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   459  		t.Errorf(
   460  			"expected voting power to change (before: %d, after: %d)",
   461  			previousTotalVotingPower,
   462  			css[nVals].GetRoundState().LastValidators.TotalVotingPower())
   463  	}
   464  
   465  	//---------------------------------------------------------------------------
   466  	logger.Info("---------------------------- Testing adding two validators at once")
   467  
   468  	newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey()
   469  	require.NoError(t, err)
   470  	newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
   471  	require.NoError(t, err)
   472  	newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
   473  
   474  	newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey()
   475  	require.NoError(t, err)
   476  	newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
   477  	require.NoError(t, err)
   478  	newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
   479  
   480  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   481  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   482  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   483  	activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
   484  	activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
   485  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   486  
   487  	//---------------------------------------------------------------------------
   488  	logger.Info("---------------------------- Testing removing two validators at once")
   489  
   490  	removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
   491  	removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
   492  
   493  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   494  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   495  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   496  	delete(activeVals, string(newValidatorPubKey2.Address()))
   497  	delete(activeVals, string(newValidatorPubKey3.Address()))
   498  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   499  }
   500  
   501  // Check we can make blocks with skip_timeout_commit=false
   502  func TestReactorWithTimeoutCommit(t *testing.T) {
   503  	N := 4
   504  	css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
   505  	defer cleanup()
   506  	// override default SkipTimeoutCommit == true for tests
   507  	for i := 0; i < N; i++ {
   508  		css[i].config.SkipTimeoutCommit = false
   509  	}
   510  
   511  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
   512  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   513  
   514  	// wait till everyone makes the first new block
   515  	timeoutWaitGroup(t, N-1, func(j int) {
   516  		<-blocksSubs[j].Out()
   517  	}, css)
   518  }
   519  
   520  func waitForAndValidateBlock(
   521  	t *testing.T,
   522  	n int,
   523  	activeVals map[string]struct{},
   524  	blocksSubs []types.Subscription,
   525  	css []*State,
   526  	txs ...[]byte,
   527  ) {
   528  	timeoutWaitGroup(t, n, func(j int) {
   529  		css[j].Logger.Debug("waitForAndValidateBlock")
   530  		msg := <-blocksSubs[j].Out()
   531  		newBlock := msg.Data().(types.EventDataNewBlock).Block
   532  		css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
   533  		err := validateBlock(newBlock, activeVals)
   534  		assert.Nil(t, err)
   535  		for _, tx := range txs {
   536  			err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{})
   537  			assert.Nil(t, err)
   538  		}
   539  	}, css)
   540  }
   541  
   542  func waitForAndValidateBlockWithTx(
   543  	t *testing.T,
   544  	n int,
   545  	activeVals map[string]struct{},
   546  	blocksSubs []types.Subscription,
   547  	css []*State,
   548  	txs ...[]byte,
   549  ) {
   550  	timeoutWaitGroup(t, n, func(j int) {
   551  		ntxs := 0
   552  	BLOCK_TX_LOOP:
   553  		for {
   554  			css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
   555  			msg := <-blocksSubs[j].Out()
   556  			newBlock := msg.Data().(types.EventDataNewBlock).Block
   557  			css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
   558  			err := validateBlock(newBlock, activeVals)
   559  			assert.Nil(t, err)
   560  
   561  			// check that txs match the txs we're waiting for.
   562  			// note they could be spread over multiple blocks,
   563  			// but they should be in order.
   564  			for _, tx := range newBlock.Data.Txs {
   565  				assert.EqualValues(t, txs[ntxs], tx)
   566  				ntxs++
   567  			}
   568  
   569  			if ntxs == len(txs) {
   570  				break BLOCK_TX_LOOP
   571  			}
   572  		}
   573  
   574  	}, css)
   575  }
   576  
   577  func waitForBlockWithUpdatedValsAndValidateIt(
   578  	t *testing.T,
   579  	n int,
   580  	updatedVals map[string]struct{},
   581  	blocksSubs []types.Subscription,
   582  	css []*State,
   583  ) {
   584  	timeoutWaitGroup(t, n, func(j int) {
   585  
   586  		var newBlock *types.Block
   587  	LOOP:
   588  		for {
   589  			css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
   590  			msg := <-blocksSubs[j].Out()
   591  			newBlock = msg.Data().(types.EventDataNewBlock).Block
   592  			if newBlock.LastCommit.Size() == len(updatedVals) {
   593  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
   594  				break LOOP
   595  			} else {
   596  				css[j].Logger.Debug(
   597  					"waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping",
   598  					"height",
   599  					newBlock.Height)
   600  			}
   601  		}
   602  
   603  		err := validateBlock(newBlock, updatedVals)
   604  		assert.Nil(t, err)
   605  	}, css)
   606  }
   607  
   608  // expects high synchrony!
   609  func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
   610  	if block.LastCommit.Size() != len(activeVals) {
   611  		return fmt.Errorf(
   612  			"commit size doesn't match number of active validators. Got %d, expected %d",
   613  			block.LastCommit.Size(),
   614  			len(activeVals))
   615  	}
   616  
   617  	for _, commitSig := range block.LastCommit.Signatures {
   618  		if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok {
   619  			return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress)
   620  		}
   621  	}
   622  	return nil
   623  }
   624  
   625  func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
   626  	wg := new(sync.WaitGroup)
   627  	wg.Add(n)
   628  	for i := 0; i < n; i++ {
   629  		go func(j int) {
   630  			f(j)
   631  			wg.Done()
   632  		}(i)
   633  	}
   634  
   635  	done := make(chan struct{})
   636  	go func() {
   637  		wg.Wait()
   638  		close(done)
   639  	}()
   640  
   641  	// we're running many nodes in-process, possibly in in a virtual machine,
   642  	// and spewing debug messages - making a block could take a while,
   643  	timeout := time.Second * 120
   644  
   645  	select {
   646  	case <-done:
   647  	case <-time.After(timeout):
   648  		for i, cs := range css {
   649  			t.Log("#################")
   650  			t.Log("Validator", i)
   651  			t.Log(cs.GetRoundState())
   652  			t.Log("")
   653  		}
   654  		os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
   655  		err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
   656  		require.NoError(t, err)
   657  		capture()
   658  		panic("Timed out waiting for all validators to commit a block")
   659  	}
   660  }
   661  
   662  func capture() {
   663  	trace := make([]byte, 10240000)
   664  	count := runtime.Stack(trace, true)
   665  	fmt.Printf("Stack of %d bytes: %s\n", count, trace)
   666  }
   667  
   668  //-------------------------------------------------------------
   669  // Ensure basic validation of structs is functioning
   670  
   671  func TestNewRoundStepMessageValidateBasic(t *testing.T) {
   672  	testCases := []struct { // nolint: maligned
   673  		expectErr              bool
   674  		messageRound           int32
   675  		messageLastCommitRound int32
   676  		messageHeight          int64
   677  		testName               string
   678  		messageStep            cstypes.RoundStepType
   679  	}{
   680  		{false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight},
   681  		{true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight},
   682  		{true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight},
   683  		{true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1},
   684  		// The following cases will be handled by ValidateHeight
   685  		{false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight},
   686  		{false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight},
   687  	}
   688  
   689  	for _, tc := range testCases {
   690  		tc := tc
   691  		t.Run(tc.testName, func(t *testing.T) {
   692  			message := NewRoundStepMessage{
   693  				Height:          tc.messageHeight,
   694  				Round:           tc.messageRound,
   695  				Step:            tc.messageStep,
   696  				LastCommitRound: tc.messageLastCommitRound,
   697  			}
   698  
   699  			err := message.ValidateBasic()
   700  			if tc.expectErr {
   701  				require.Error(t, err)
   702  			} else {
   703  				require.NoError(t, err)
   704  			}
   705  		})
   706  	}
   707  }
   708  
   709  func TestNewRoundStepMessageValidateHeight(t *testing.T) {
   710  	initialHeight := int64(10)
   711  	testCases := []struct { // nolint: maligned
   712  		expectErr              bool
   713  		messageLastCommitRound int32
   714  		messageHeight          int64
   715  		testName               string
   716  	}{
   717  		{false, 0, 11, "Valid Message"},
   718  		{true, 0, -1, "Negative height"},
   719  		{true, 0, 0, "Zero height"},
   720  		{true, 0, 10, "Initial height but LCR != -1 "},
   721  		{true, -1, 11, "Normal height but LCR < 0"},
   722  	}
   723  
   724  	for _, tc := range testCases {
   725  		tc := tc
   726  		t.Run(tc.testName, func(t *testing.T) {
   727  			message := NewRoundStepMessage{
   728  				Height:          tc.messageHeight,
   729  				Round:           0,
   730  				Step:            cstypes.RoundStepNewHeight,
   731  				LastCommitRound: tc.messageLastCommitRound,
   732  			}
   733  
   734  			err := message.ValidateHeight(initialHeight)
   735  			if tc.expectErr {
   736  				require.Error(t, err)
   737  			} else {
   738  				require.NoError(t, err)
   739  			}
   740  		})
   741  	}
   742  }
   743  
   744  func TestNewValidBlockMessageValidateBasic(t *testing.T) {
   745  	testCases := []struct {
   746  		malleateFn func(*NewValidBlockMessage)
   747  		expErr     string
   748  	}{
   749  		{func(msg *NewValidBlockMessage) {}, ""},
   750  		{func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"},
   751  		{func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"},
   752  		{
   753  			func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 },
   754  			"blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2",
   755  		},
   756  		{
   757  			func(msg *NewValidBlockMessage) {
   758  				msg.BlockPartSetHeader.Total = 0
   759  				msg.BlockParts = bits.NewBitArray(0)
   760  			},
   761  			"empty blockParts",
   762  		},
   763  		{
   764  			func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) },
   765  			"blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1",
   766  		},
   767  	}
   768  
   769  	for i, tc := range testCases {
   770  		tc := tc
   771  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   772  			msg := &NewValidBlockMessage{
   773  				Height: 1,
   774  				Round:  0,
   775  				BlockPartSetHeader: types.PartSetHeader{
   776  					Total: 1,
   777  				},
   778  				BlockParts: bits.NewBitArray(1),
   779  			}
   780  
   781  			tc.malleateFn(msg)
   782  			err := msg.ValidateBasic()
   783  			if tc.expErr != "" && assert.Error(t, err) {
   784  				assert.Contains(t, err.Error(), tc.expErr)
   785  			}
   786  		})
   787  	}
   788  }
   789  
   790  func TestProposalPOLMessageValidateBasic(t *testing.T) {
   791  	testCases := []struct {
   792  		malleateFn func(*ProposalPOLMessage)
   793  		expErr     string
   794  	}{
   795  		{func(msg *ProposalPOLMessage) {}, ""},
   796  		{func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"},
   797  		{func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"},
   798  		{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"},
   799  		{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) },
   800  			"proposalPOL bit array is too big: 10001, max: 10000"},
   801  	}
   802  
   803  	for i, tc := range testCases {
   804  		tc := tc
   805  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   806  			msg := &ProposalPOLMessage{
   807  				Height:           1,
   808  				ProposalPOLRound: 1,
   809  				ProposalPOL:      bits.NewBitArray(1),
   810  			}
   811  
   812  			tc.malleateFn(msg)
   813  			err := msg.ValidateBasic()
   814  			if tc.expErr != "" && assert.Error(t, err) {
   815  				assert.Contains(t, err.Error(), tc.expErr)
   816  			}
   817  		})
   818  	}
   819  }
   820  
   821  func TestBlockPartMessageValidateBasic(t *testing.T) {
   822  	testPart := new(types.Part)
   823  	testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf"))
   824  	testCases := []struct {
   825  		testName      string
   826  		messageHeight int64
   827  		messageRound  int32
   828  		messagePart   *types.Part
   829  		expectErr     bool
   830  	}{
   831  		{"Valid Message", 0, 0, testPart, false},
   832  		{"Invalid Message", -1, 0, testPart, true},
   833  		{"Invalid Message", 0, -1, testPart, true},
   834  	}
   835  
   836  	for _, tc := range testCases {
   837  		tc := tc
   838  		t.Run(tc.testName, func(t *testing.T) {
   839  			message := BlockPartMessage{
   840  				Height: tc.messageHeight,
   841  				Round:  tc.messageRound,
   842  				Part:   tc.messagePart,
   843  			}
   844  
   845  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   846  		})
   847  	}
   848  
   849  	message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
   850  	message.Part.Index = 1
   851  
   852  	assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   853  }
   854  
   855  func TestHasVoteMessageValidateBasic(t *testing.T) {
   856  	const (
   857  		validSignedMsgType   tmproto.SignedMsgType = 0x01
   858  		invalidSignedMsgType tmproto.SignedMsgType = 0x03
   859  	)
   860  
   861  	testCases := []struct { // nolint: maligned
   862  		expectErr     bool
   863  		messageRound  int32
   864  		messageIndex  int32
   865  		messageHeight int64
   866  		testName      string
   867  		messageType   tmproto.SignedMsgType
   868  	}{
   869  		{false, 0, 0, 0, "Valid Message", validSignedMsgType},
   870  		{true, -1, 0, 0, "Invalid Message", validSignedMsgType},
   871  		{true, 0, -1, 0, "Invalid Message", validSignedMsgType},
   872  		{true, 0, 0, 0, "Invalid Message", invalidSignedMsgType},
   873  		{true, 0, 0, -1, "Invalid Message", validSignedMsgType},
   874  	}
   875  
   876  	for _, tc := range testCases {
   877  		tc := tc
   878  		t.Run(tc.testName, func(t *testing.T) {
   879  			message := HasVoteMessage{
   880  				Height: tc.messageHeight,
   881  				Round:  tc.messageRound,
   882  				Type:   tc.messageType,
   883  				Index:  tc.messageIndex,
   884  			}
   885  
   886  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   887  		})
   888  	}
   889  }
   890  
   891  func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
   892  	const (
   893  		validSignedMsgType   tmproto.SignedMsgType = 0x01
   894  		invalidSignedMsgType tmproto.SignedMsgType = 0x03
   895  	)
   896  
   897  	validBlockID := types.BlockID{}
   898  	invalidBlockID := types.BlockID{
   899  		Hash: bytes.HexBytes{},
   900  		PartSetHeader: types.PartSetHeader{
   901  			Total: 1,
   902  			Hash:  []byte{0},
   903  		},
   904  	}
   905  
   906  	testCases := []struct { // nolint: maligned
   907  		expectErr      bool
   908  		messageRound   int32
   909  		messageHeight  int64
   910  		testName       string
   911  		messageType    tmproto.SignedMsgType
   912  		messageBlockID types.BlockID
   913  	}{
   914  		{false, 0, 0, "Valid Message", validSignedMsgType, validBlockID},
   915  		{true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID},
   916  		{true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID},
   917  		{true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID},
   918  		{true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID},
   919  	}
   920  
   921  	for _, tc := range testCases {
   922  		tc := tc
   923  		t.Run(tc.testName, func(t *testing.T) {
   924  			message := VoteSetMaj23Message{
   925  				Height:  tc.messageHeight,
   926  				Round:   tc.messageRound,
   927  				Type:    tc.messageType,
   928  				BlockID: tc.messageBlockID,
   929  			}
   930  
   931  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   932  		})
   933  	}
   934  }
   935  
   936  func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
   937  	testCases := []struct {
   938  		malleateFn func(*VoteSetBitsMessage)
   939  		expErr     string
   940  	}{
   941  		{func(msg *VoteSetBitsMessage) {}, ""},
   942  		{func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"},
   943  		{func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"},
   944  		{func(msg *VoteSetBitsMessage) {
   945  			msg.BlockID = types.BlockID{
   946  				Hash: bytes.HexBytes{},
   947  				PartSetHeader: types.PartSetHeader{
   948  					Total: 1,
   949  					Hash:  []byte{0},
   950  				},
   951  			}
   952  		}, "wrong BlockID: wrong PartSetHeader: wrong Hash:"},
   953  		{func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) },
   954  			"votes bit array is too big: 10001, max: 10000"},
   955  	}
   956  
   957  	for i, tc := range testCases {
   958  		tc := tc
   959  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   960  			msg := &VoteSetBitsMessage{
   961  				Height:  1,
   962  				Round:   0,
   963  				Type:    0x01,
   964  				Votes:   bits.NewBitArray(1),
   965  				BlockID: types.BlockID{},
   966  			}
   967  
   968  			tc.malleateFn(msg)
   969  			err := msg.ValidateBasic()
   970  			if tc.expErr != "" && assert.Error(t, err) {
   971  				assert.Contains(t, err.Error(), tc.expErr)
   972  			}
   973  		})
   974  	}
   975  }