github.com/line/ostracon@v1.0.10-0.20230328032236-7f20145f065d/consensus/reactor_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"path"
     8  	"runtime"
     9  	"runtime/pprof"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/mock"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	abci "github.com/tendermint/tendermint/abci/types"
    19  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    20  	dbm "github.com/tendermint/tm-db"
    21  
    22  	abcicli "github.com/line/ostracon/abci/client"
    23  	"github.com/line/ostracon/abci/example/kvstore"
    24  	cfg "github.com/line/ostracon/config"
    25  	cstypes "github.com/line/ostracon/consensus/types"
    26  	cryptoenc "github.com/line/ostracon/crypto/encoding"
    27  	"github.com/line/ostracon/crypto/tmhash"
    28  	"github.com/line/ostracon/libs/bits"
    29  	"github.com/line/ostracon/libs/bytes"
    30  	"github.com/line/ostracon/libs/log"
    31  	tmsync "github.com/line/ostracon/libs/sync"
    32  	mempl "github.com/line/ostracon/mempool"
    33  	"github.com/line/ostracon/p2p"
    34  	p2pmock "github.com/line/ostracon/p2p/mock"
    35  	sm "github.com/line/ostracon/state"
    36  	statemocks "github.com/line/ostracon/state/mocks"
    37  	"github.com/line/ostracon/store"
    38  	"github.com/line/ostracon/types"
    39  )
    40  
    41  //----------------------------------------------
    42  // in-process testnets
    43  
    44  var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
    45  
    46  func startConsensusNet(t *testing.T, css []*State, n int) (
    47  	[]*Reactor,
    48  	[]types.Subscription,
    49  	[]*types.EventBus,
    50  ) {
    51  	reactors := make([]*Reactor, n)
    52  	blocksSubs := make([]types.Subscription, 0)
    53  	eventBuses := make([]*types.EventBus, n)
    54  	for i := 0; i < n; i++ {
    55  		/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
    56  		if err != nil {	t.Fatal(err)}*/
    57  		reactors[i] = NewReactor(css[i], true, true, 1000) // so we dont start the consensus states
    58  		reactors[i].SetLogger(css[i].Logger)
    59  
    60  		// eventBus is already started with the cs
    61  		eventBuses[i] = css[i].eventBus
    62  		reactors[i].SetEventBus(eventBuses[i])
    63  
    64  		blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
    65  		require.NoError(t, err)
    66  		blocksSubs = append(blocksSubs, blocksSub)
    67  
    68  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
    69  			if err := css[i].blockExec.Store().Save(css[i].state); err != nil {
    70  				t.Error(err)
    71  			}
    72  
    73  		}
    74  	}
    75  	// make connected switches and start all reactors
    76  	p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
    77  		s.AddReactor("CONSENSUS", reactors[i])
    78  		s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
    79  		return s
    80  	}, p2p.Connect2Switches)
    81  
    82  	// now that everyone is connected,  start the state machines
    83  	// If we started the state machines before everyone was connected,
    84  	// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
    85  	// TODO: is this still true with new pubsub?
    86  	for i := 0; i < n; i++ {
    87  		s := reactors[i].conS.GetState()
    88  		reactors[i].SwitchToConsensus(s, false)
    89  	}
    90  	return reactors, blocksSubs, eventBuses
    91  }
    92  
    93  func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) {
    94  	logger.Info("stopConsensusNet", "n", len(reactors))
    95  	for i, r := range reactors {
    96  		logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
    97  		if err := r.Switch.Stop(); err != nil {
    98  			logger.Error("error trying to stop switch", "error", err)
    99  		}
   100  	}
   101  	for i, b := range eventBuses {
   102  		logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
   103  		if err := b.Stop(); err != nil {
   104  			logger.Error("error trying to stop eventbus", "error", err)
   105  		}
   106  	}
   107  	logger.Info("stopConsensusNet: DONE", "n", len(reactors))
   108  }
   109  
   110  // Ensure a testnet makes blocks
   111  func TestReactorBasic(t *testing.T) {
   112  	N := 4
   113  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   114  	defer cleanup()
   115  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   116  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   117  	// wait till everyone makes the first new block
   118  	timeoutWaitGroup(t, N, func(j int) {
   119  		<-blocksSubs[j].Out()
   120  	}, css)
   121  }
   122  
   123  // Ensure we can process blocks with evidence
   124  func TestReactorWithEvidence(t *testing.T) {
   125  	nValidators := 4
   126  	testName := "consensus_reactor_test"
   127  	tickerFunc := newMockTickerFunc(true)
   128  	appFunc := newCounter
   129  
   130  	// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
   131  	// to unroll unwieldy abstractions. Here we duplicate the code from:
   132  	// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   133  
   134  	genDoc, privVals := randGenesisDoc(nValidators, false, 30)
   135  	css := make([]*State, nValidators)
   136  	logger := consensusLogger()
   137  	for i := 0; i < nValidators; i++ {
   138  		stateDB := dbm.NewMemDB() // each state needs its own db
   139  		stateStore := sm.NewStore(stateDB)
   140  		state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
   141  		thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
   142  		defer os.RemoveAll(thisConfig.RootDir)
   143  		ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
   144  		app := appFunc()
   145  		vals := types.OC2PB.ValidatorUpdates(state.Validators)
   146  		app.InitChain(abci.RequestInitChain{Validators: vals})
   147  
   148  		pv := privVals[i]
   149  		// duplicate code from:
   150  		// css[i] = newStateWithConfig(thisConfig, state, privVals[i], app)
   151  
   152  		blockDB := dbm.NewMemDB()
   153  		blockStore := store.NewBlockStore(blockDB)
   154  
   155  		// one for mempool, one for consensus
   156  		mtx := new(tmsync.Mutex)
   157  		proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
   158  		proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
   159  
   160  		// Make Mempool
   161  		mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
   162  		mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
   163  		if thisConfig.Consensus.WaitForTxs() {
   164  			mempool.EnableTxsAvailable()
   165  		}
   166  
   167  		// mock the evidence pool
   168  		// everyone includes evidence of another double signing
   169  		vIdx := (i + 1) % nValidators
   170  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
   171  		evpool := &statemocks.EvidencePool{}
   172  		evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
   173  		evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
   174  			ev}, int64(len(ev.Bytes())))
   175  		evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
   176  
   177  		evpool2 := sm.EmptyEvidencePool{}
   178  
   179  		// Make State
   180  		blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
   181  		cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
   182  		cs.SetLogger(log.TestingLogger().With("module", "consensus"))
   183  		cs.SetPrivValidator(pv)
   184  
   185  		eventBus := types.NewEventBus()
   186  		eventBus.SetLogger(log.TestingLogger().With("module", "events"))
   187  		err := eventBus.Start()
   188  		require.NoError(t, err)
   189  		cs.SetEventBus(eventBus)
   190  
   191  		cs.SetTimeoutTicker(tickerFunc())
   192  		cs.SetLogger(logger.With("validator", i, "module", "consensus"))
   193  
   194  		css[i] = cs
   195  	}
   196  
   197  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
   198  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   199  
   200  	// we expect for each validator that is the proposer to propose one piece of evidence.
   201  	for i := 0; i < nValidators; i++ {
   202  		timeoutWaitGroup(t, nValidators, func(j int) {
   203  			msg := <-blocksSubs[j].Out()
   204  			block := msg.Data().(types.EventDataNewBlock).Block
   205  			assert.Len(t, block.Evidence.Evidence, 1)
   206  		}, css)
   207  	}
   208  }
   209  
   210  //------------------------------------
   211  
   212  // Ensure a testnet makes blocks when there are txs
   213  func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
   214  	N := 4
   215  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
   216  		func(c *cfg.Config) {
   217  			c.Consensus.CreateEmptyBlocks = false
   218  		})
   219  	defer cleanup()
   220  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   221  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   222  
   223  	// send a tx
   224  	if _, err := assertMempool(css[3].txNotifier).CheckTxSync([]byte{1, 2, 3}, mempl.TxInfo{}); err != nil {
   225  		t.Error(err)
   226  	}
   227  
   228  	// wait till everyone makes the first new block
   229  	timeoutWaitGroup(t, N, func(j int) {
   230  		<-blocksSubs[j].Out()
   231  	}, css)
   232  }
   233  
   234  func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
   235  	N := 1
   236  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   237  	defer cleanup()
   238  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   239  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   240  
   241  	var (
   242  		reactor = reactors[0]
   243  		peer    = p2pmock.NewPeer(nil)
   244  		msg     = MustEncode(&HasVoteMessage{Height: 1,
   245  			Round: 1, Index: 1, Type: tmproto.PrevoteType})
   246  	)
   247  
   248  	reactor.InitPeer(peer)
   249  
   250  	// simulate switch calling Receive before AddPeer
   251  	assert.NotPanics(t, func() {
   252  		reactor.Receive(StateChannel, peer, msg)
   253  		reactor.AddPeer(peer)
   254  	})
   255  }
   256  
   257  func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
   258  	N := 1
   259  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   260  	defer cleanup()
   261  	reactors, _, eventBuses := startConsensusNet(t, css, N)
   262  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   263  
   264  	var (
   265  		reactor = reactors[0]
   266  		peer    = p2pmock.NewPeer(nil)
   267  		msg     = MustEncode(&HasVoteMessage{Height: 1,
   268  			Round: 1, Index: 1, Type: tmproto.PrevoteType})
   269  	)
   270  
   271  	// we should call InitPeer here
   272  
   273  	// simulate switch calling Receive before AddPeer
   274  	assert.Panics(t, func() {
   275  		reactor.Receive(StateChannel, peer, msg)
   276  	})
   277  }
   278  
   279  // Test we record stats about votes and block parts from other peers.
   280  func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
   281  	N := 4
   282  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   283  	defer cleanup()
   284  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
   285  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   286  
   287  	// the proposer idx is always 0, because the LastProofHash is []byte{2}
   288  	proposerIdx := int32(0)
   289  
   290  	// wait till everyone makes the first new block
   291  	timeoutWaitGroup(t, N, func(j int) {
   292  		<-blocksSubs[j].Out()
   293  	}, css)
   294  
   295  	// look up proposer index in the validator not proposer
   296  	// 0:[1,2,3], 1:[0,2,3], 2:[0,1,3], 3:[0,1,2]
   297  	var otherIdx int
   298  	var proposerIdxInOtherPeer int32
   299  	if proposerIdx == 0 {
   300  		otherIdx = 1
   301  		proposerIdxInOtherPeer = 0
   302  	} else {
   303  		otherIdx = 0
   304  		proposerIdxInOtherPeer = proposerIdx - 1
   305  	}
   306  
   307  	// Get peer
   308  	peer := reactors[otherIdx].Switch.Peers().List()[proposerIdxInOtherPeer]
   309  
   310  	// Get peer state
   311  	ps := peer.Get(types.PeerStateKey).(*PeerState)
   312  
   313  	assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
   314  	assert.Equal(t, true, ps.BlockPartsSent() > 0,
   315  		fmt.Sprintf("number of votes sent should have increased: %d", ps.BlockPartsSent()))
   316  }
   317  
   318  //-------------------------------------------------------------
   319  // ensure we can make blocks despite cycling a validator set
   320  
   321  func TestReactorVotingPowerChange(t *testing.T) {
   322  	nVals := 4
   323  	logger := log.TestingLogger()
   324  	css, cleanup := randConsensusNet(
   325  		nVals,
   326  		"consensus_voting_power_changes_test",
   327  		newMockTickerFunc(true),
   328  		newPersistentKVStore)
   329  	defer cleanup()
   330  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
   331  	defer stopConsensusNet(logger, reactors, eventBuses)
   332  
   333  	// map of active validators
   334  	activeVals := make(map[string]struct{})
   335  	for i := 0; i < nVals; i++ {
   336  		pubKey, err := css[i].privValidator.GetPubKey()
   337  		require.NoError(t, err)
   338  		addr := pubKey.Address()
   339  		activeVals[string(addr)] = struct{}{}
   340  	}
   341  
   342  	// wait till everyone makes block 1
   343  	timeoutWaitGroup(t, nVals, func(j int) {
   344  		<-blocksSubs[j].Out()
   345  	}, css)
   346  
   347  	//---------------------------------------------------------------------------
   348  	logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
   349  
   350  	val1PubKey, err := css[0].privValidator.GetPubKey()
   351  	require.NoError(t, err)
   352  
   353  	val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey)
   354  	require.NoError(t, err)
   355  	updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
   356  	previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
   357  
   358  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   359  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   360  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   361  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   362  
   363  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   364  		t.Fatalf(
   365  			"expected voting power to change (before: %d, after: %d)",
   366  			previousTotalVotingPower,
   367  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   368  	}
   369  
   370  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
   371  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   372  
   373  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   374  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   375  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   376  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   377  
   378  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   379  		t.Fatalf(
   380  			"expected voting power to change (before: %d, after: %d)",
   381  			previousTotalVotingPower,
   382  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   383  	}
   384  
   385  	updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
   386  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   387  
   388  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   389  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
   390  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   391  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   392  
   393  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   394  		t.Fatalf(
   395  			"expected voting power to change (before: %d, after: %d)",
   396  			previousTotalVotingPower,
   397  			css[0].GetRoundState().LastValidators.TotalVotingPower())
   398  	}
   399  }
   400  
   401  func TestReactorValidatorSetChanges(t *testing.T) {
   402  	nPeers := 7
   403  	nVals := 4
   404  	css, _, _, cleanup := randConsensusNetWithPeers(
   405  		nVals,
   406  		nPeers,
   407  		"consensus_val_set_changes_test",
   408  		newMockTickerFunc(true),
   409  		newPersistentKVStoreWithPath)
   410  
   411  	defer cleanup()
   412  	logger := log.TestingLogger()
   413  
   414  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
   415  	defer stopConsensusNet(logger, reactors, eventBuses)
   416  
   417  	// map of active validators
   418  	activeVals := make(map[string]struct{})
   419  	for i := 0; i < nVals; i++ {
   420  		pubKey, err := css[i].privValidator.GetPubKey()
   421  		require.NoError(t, err)
   422  		activeVals[string(pubKey.Address())] = struct{}{}
   423  	}
   424  
   425  	// wait till everyone makes block 1
   426  	timeoutWaitGroup(t, nPeers, func(j int) {
   427  		<-blocksSubs[j].Out()
   428  	}, css)
   429  
   430  	//---------------------------------------------------------------------------
   431  	logger.Info("---------------------------- Testing adding one validator")
   432  
   433  	newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
   434  	assert.NoError(t, err)
   435  	valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
   436  	assert.NoError(t, err)
   437  	newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
   438  
   439  	// wait till everyone makes block 2
   440  	// ensure the commit includes all validators
   441  	// send newValTx to change vals in block 3
   442  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   443  
   444  	// wait till everyone makes block 3.
   445  	// it includes the commit for block 2, which is by the original validator set
   446  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
   447  
   448  	// wait till everyone makes block 4.
   449  	// it includes the commit for block 3, which is by the original validator set
   450  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   451  
   452  	// the commits for block 4 should be with the updated validator set
   453  	activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
   454  
   455  	// wait till everyone makes block 5
   456  	// it includes the commit for block 4, which should have the updated validator set
   457  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   458  
   459  	//---------------------------------------------------------------------------
   460  	logger.Info("---------------------------- Testing changing the voting power of one validator")
   461  
   462  	updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
   463  	require.NoError(t, err)
   464  	updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
   465  	require.NoError(t, err)
   466  	updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
   467  	previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
   468  
   469  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   470  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
   471  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   472  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   473  
   474  	if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   475  		t.Errorf(
   476  			"expected voting power to change (before: %d, after: %d)",
   477  			previousTotalVotingPower,
   478  			css[nVals].GetRoundState().LastValidators.TotalVotingPower())
   479  	}
   480  
   481  	//---------------------------------------------------------------------------
   482  	logger.Info("---------------------------- Testing adding two validators at once")
   483  
   484  	newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey()
   485  	require.NoError(t, err)
   486  	newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
   487  	require.NoError(t, err)
   488  	newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
   489  
   490  	newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey()
   491  	require.NoError(t, err)
   492  	newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
   493  	require.NoError(t, err)
   494  	newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
   495  
   496  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   497  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
   498  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   499  	activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
   500  	activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
   501  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   502  
   503  	//---------------------------------------------------------------------------
   504  	logger.Info("---------------------------- Testing removing two validators at once")
   505  
   506  	removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
   507  	removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
   508  
   509  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   510  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   511  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   512  	delete(activeVals, string(newValidatorPubKey2.Address()))
   513  	delete(activeVals, string(newValidatorPubKey3.Address()))
   514  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   515  }
   516  
   517  // Check we can make blocks with skip_timeout_commit=false
   518  func TestReactorWithTimeoutCommit(t *testing.T) {
   519  	N := 4
   520  	css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
   521  	defer cleanup()
   522  	// override default SkipTimeoutCommit == true for tests
   523  	for i := 0; i < N; i++ {
   524  		css[i].config.SkipTimeoutCommit = false
   525  	}
   526  
   527  	reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
   528  	defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
   529  
   530  	// wait till everyone makes the first new block
   531  	timeoutWaitGroup(t, N-1, func(j int) {
   532  		<-blocksSubs[j].Out()
   533  	}, css)
   534  }
   535  
   536  func waitForAndValidateBlock(
   537  	t *testing.T,
   538  	n int,
   539  	activeVals map[string]struct{},
   540  	blocksSubs []types.Subscription,
   541  	css []*State,
   542  	txs ...[]byte,
   543  ) {
   544  	timeoutWaitGroup(t, n, func(j int) {
   545  		css[j].Logger.Debug("waitForAndValidateBlock")
   546  		msg := <-blocksSubs[j].Out()
   547  		newBlock := msg.Data().(types.EventDataNewBlock).Block
   548  		css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
   549  		err := validateBlock(newBlock, activeVals)
   550  		assert.Nil(t, err)
   551  		for _, tx := range txs {
   552  			_, err := assertMempool(css[j].txNotifier).CheckTxSync(tx, mempl.TxInfo{})
   553  			assert.Nil(t, err)
   554  		}
   555  	}, css)
   556  }
   557  
   558  func waitForAndValidateBlockWithTx(
   559  	t *testing.T,
   560  	n int,
   561  	activeVals map[string]struct{},
   562  	blocksSubs []types.Subscription,
   563  	css []*State,
   564  	txs ...[]byte,
   565  ) {
   566  	timeoutWaitGroup(t, n, func(j int) {
   567  		ntxs := 0
   568  	BLOCK_TX_LOOP:
   569  		for {
   570  			css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
   571  			msg := <-blocksSubs[j].Out()
   572  			newBlock := msg.Data().(types.EventDataNewBlock).Block
   573  			css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
   574  			err := validateBlock(newBlock, activeVals)
   575  			assert.Nil(t, err)
   576  
   577  			// check that txs match the txs we're waiting for.
   578  			// note they could be spread over multiple blocks,
   579  			// but they should be in order.
   580  			for _, tx := range newBlock.Data.Txs {
   581  				assert.EqualValues(t, txs[ntxs], tx)
   582  				ntxs++
   583  			}
   584  
   585  			if ntxs == len(txs) {
   586  				break BLOCK_TX_LOOP
   587  			}
   588  		}
   589  
   590  	}, css)
   591  }
   592  
   593  func waitForBlockWithUpdatedValsAndValidateIt(
   594  	t *testing.T,
   595  	n int,
   596  	updatedVals map[string]struct{},
   597  	blocksSubs []types.Subscription,
   598  	css []*State,
   599  ) {
   600  	timeoutWaitGroup(t, n, func(j int) {
   601  
   602  		var newBlock *types.Block
   603  	LOOP:
   604  		for {
   605  			css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
   606  			msg := <-blocksSubs[j].Out()
   607  			newBlock = msg.Data().(types.EventDataNewBlock).Block
   608  			if newBlock.LastCommit.Size() == len(updatedVals) {
   609  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
   610  				break LOOP
   611  			} else {
   612  				css[j].Logger.Debug(
   613  					"waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping",
   614  					"height",
   615  					newBlock.Height)
   616  			}
   617  		}
   618  
   619  		err := validateBlock(newBlock, updatedVals)
   620  		assert.Nil(t, err)
   621  	}, css)
   622  }
   623  
   624  // expects high synchrony!
   625  func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
   626  	if block.LastCommit.Size() != len(activeVals) {
   627  		return fmt.Errorf(
   628  			"commit size doesn't match number of active validators. Got %d, expected %d",
   629  			block.LastCommit.Size(),
   630  			len(activeVals))
   631  	}
   632  
   633  	for _, commitSig := range block.LastCommit.Signatures {
   634  		if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok {
   635  			return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress)
   636  		}
   637  	}
   638  	return nil
   639  }
   640  
   641  func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
   642  	wg := new(sync.WaitGroup)
   643  	wg.Add(n)
   644  	for i := 0; i < n; i++ {
   645  		go func(j int) {
   646  			f(j)
   647  			wg.Done()
   648  		}(i)
   649  	}
   650  
   651  	done := make(chan struct{})
   652  	go func() {
   653  		wg.Wait()
   654  		close(done)
   655  	}()
   656  
   657  	// we're running many nodes in-process, possibly in in a virtual machine,
   658  	// and spewing debug messages - making a block could take a while,
   659  	timeout := time.Second * 120
   660  
   661  	select {
   662  	case <-done:
   663  	case <-time.After(timeout):
   664  		for i, cs := range css {
   665  			t.Log("#################")
   666  			t.Log("Validator", i)
   667  			t.Log(cs.GetRoundState())
   668  			t.Log("")
   669  		}
   670  		os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
   671  		err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
   672  		require.NoError(t, err)
   673  		capture()
   674  		panic("Timed out waiting for all validators to commit a block")
   675  	}
   676  }
   677  
   678  func capture() {
   679  	trace := make([]byte, 10240000)
   680  	count := runtime.Stack(trace, true)
   681  	fmt.Printf("Stack of %d bytes: %s\n", count, trace)
   682  }
   683  
   684  //-------------------------------------------------------------
   685  // Ensure basic validation of structs is functioning
   686  
   687  func TestNewRoundStepMessageValidateBasic(t *testing.T) {
   688  	testCases := []struct { // nolint: maligned
   689  		expectErr              bool
   690  		messageRound           int32
   691  		messageLastCommitRound int32
   692  		messageHeight          int64
   693  		testName               string
   694  		messageStep            cstypes.RoundStepType
   695  	}{
   696  		{false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight},
   697  		{true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight},
   698  		{true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight},
   699  		{true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1},
   700  		// The following cases will be handled by ValidateHeight
   701  		{false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight},
   702  		{false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight},
   703  	}
   704  
   705  	for _, tc := range testCases {
   706  		tc := tc
   707  		t.Run(tc.testName, func(t *testing.T) {
   708  			message := NewRoundStepMessage{
   709  				Height:          tc.messageHeight,
   710  				Round:           tc.messageRound,
   711  				Step:            tc.messageStep,
   712  				LastCommitRound: tc.messageLastCommitRound,
   713  			}
   714  
   715  			err := message.ValidateBasic()
   716  			if tc.expectErr {
   717  				require.Error(t, err)
   718  			} else {
   719  				require.NoError(t, err)
   720  			}
   721  		})
   722  	}
   723  }
   724  
   725  func TestNewRoundStepMessageValidateHeight(t *testing.T) {
   726  	initialHeight := int64(10)
   727  	testCases := []struct { // nolint: maligned
   728  		expectErr              bool
   729  		messageLastCommitRound int32
   730  		messageHeight          int64
   731  		testName               string
   732  	}{
   733  		{false, 0, 11, "Valid Message"},
   734  		{true, 0, -1, "Negative height"},
   735  		{true, 0, 0, "Zero height"},
   736  		{true, 0, 10, "Initial height but LCR != -1 "},
   737  		{true, -1, 11, "Normal height but LCR < 0"},
   738  	}
   739  
   740  	for _, tc := range testCases {
   741  		tc := tc
   742  		t.Run(tc.testName, func(t *testing.T) {
   743  			message := NewRoundStepMessage{
   744  				Height:          tc.messageHeight,
   745  				Round:           0,
   746  				Step:            cstypes.RoundStepNewHeight,
   747  				LastCommitRound: tc.messageLastCommitRound,
   748  			}
   749  
   750  			err := message.ValidateHeight(initialHeight)
   751  			if tc.expectErr {
   752  				require.Error(t, err)
   753  			} else {
   754  				require.NoError(t, err)
   755  			}
   756  		})
   757  	}
   758  }
   759  
   760  func TestNewValidBlockMessageValidateBasic(t *testing.T) {
   761  	testCases := []struct {
   762  		malleateFn func(*NewValidBlockMessage)
   763  		expErr     string
   764  	}{
   765  		{func(msg *NewValidBlockMessage) {}, ""},
   766  		{func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"},
   767  		{func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"},
   768  		{
   769  			func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 },
   770  			"blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2",
   771  		},
   772  		{
   773  			func(msg *NewValidBlockMessage) {
   774  				msg.BlockPartSetHeader.Total = 0
   775  				msg.BlockParts = bits.NewBitArray(0)
   776  			},
   777  			"empty blockParts",
   778  		},
   779  		{
   780  			func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) },
   781  			"blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1",
   782  		},
   783  	}
   784  
   785  	for i, tc := range testCases {
   786  		tc := tc
   787  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   788  			msg := &NewValidBlockMessage{
   789  				Height: 1,
   790  				Round:  0,
   791  				BlockPartSetHeader: types.PartSetHeader{
   792  					Total: 1,
   793  				},
   794  				BlockParts: bits.NewBitArray(1),
   795  			}
   796  
   797  			tc.malleateFn(msg)
   798  			err := msg.ValidateBasic()
   799  			if tc.expErr != "" && assert.Error(t, err) {
   800  				assert.Contains(t, err.Error(), tc.expErr)
   801  			}
   802  		})
   803  	}
   804  }
   805  
   806  func TestProposalPOLMessageValidateBasic(t *testing.T) {
   807  	testCases := []struct {
   808  		malleateFn func(*ProposalPOLMessage)
   809  		expErr     string
   810  	}{
   811  		{func(msg *ProposalPOLMessage) {}, ""},
   812  		{func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"},
   813  		{func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"},
   814  		{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"},
   815  		{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) },
   816  			"proposalPOL bit array is too big: 10001, max: 10000"},
   817  	}
   818  
   819  	for i, tc := range testCases {
   820  		tc := tc
   821  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   822  			msg := &ProposalPOLMessage{
   823  				Height:           1,
   824  				ProposalPOLRound: 1,
   825  				ProposalPOL:      bits.NewBitArray(1),
   826  			}
   827  
   828  			tc.malleateFn(msg)
   829  			err := msg.ValidateBasic()
   830  			if tc.expErr != "" && assert.Error(t, err) {
   831  				assert.Contains(t, err.Error(), tc.expErr)
   832  			}
   833  		})
   834  	}
   835  }
   836  
   837  func TestBlockPartMessageValidateBasic(t *testing.T) {
   838  	testPart := new(types.Part)
   839  	testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf"))
   840  	testCases := []struct {
   841  		testName      string
   842  		messageHeight int64
   843  		messageRound  int32
   844  		messagePart   *types.Part
   845  		expectErr     bool
   846  	}{
   847  		{"Valid Message", 0, 0, testPart, false},
   848  		{"Invalid Message", -1, 0, testPart, true},
   849  		{"Invalid Message", 0, -1, testPart, true},
   850  	}
   851  
   852  	for _, tc := range testCases {
   853  		tc := tc
   854  		t.Run(tc.testName, func(t *testing.T) {
   855  			message := BlockPartMessage{
   856  				Height: tc.messageHeight,
   857  				Round:  tc.messageRound,
   858  				Part:   tc.messagePart,
   859  			}
   860  
   861  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   862  		})
   863  	}
   864  
   865  	message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
   866  	message.Part.Index = 1
   867  
   868  	assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   869  }
   870  
   871  func TestHasVoteMessageValidateBasic(t *testing.T) {
   872  	const (
   873  		validSignedMsgType   tmproto.SignedMsgType = 0x01
   874  		invalidSignedMsgType tmproto.SignedMsgType = 0x03
   875  	)
   876  
   877  	testCases := []struct { // nolint: maligned
   878  		expectErr     bool
   879  		messageRound  int32
   880  		messageIndex  int32
   881  		messageHeight int64
   882  		testName      string
   883  		messageType   tmproto.SignedMsgType
   884  	}{
   885  		{false, 0, 0, 0, "Valid Message", validSignedMsgType},
   886  		{true, -1, 0, 0, "Invalid Message", validSignedMsgType},
   887  		{true, 0, -1, 0, "Invalid Message", validSignedMsgType},
   888  		{true, 0, 0, 0, "Invalid Message", invalidSignedMsgType},
   889  		{true, 0, 0, -1, "Invalid Message", validSignedMsgType},
   890  	}
   891  
   892  	for _, tc := range testCases {
   893  		tc := tc
   894  		t.Run(tc.testName, func(t *testing.T) {
   895  			message := HasVoteMessage{
   896  				Height: tc.messageHeight,
   897  				Round:  tc.messageRound,
   898  				Type:   tc.messageType,
   899  				Index:  tc.messageIndex,
   900  			}
   901  
   902  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   903  		})
   904  	}
   905  }
   906  
   907  func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
   908  	const (
   909  		validSignedMsgType   tmproto.SignedMsgType = 0x01
   910  		invalidSignedMsgType tmproto.SignedMsgType = 0x03
   911  	)
   912  
   913  	validBlockID := types.BlockID{}
   914  	invalidBlockID := types.BlockID{
   915  		Hash: bytes.HexBytes{},
   916  		PartSetHeader: types.PartSetHeader{
   917  			Total: 1,
   918  			Hash:  []byte{0},
   919  		},
   920  	}
   921  
   922  	testCases := []struct { // nolint: maligned
   923  		expectErr      bool
   924  		messageRound   int32
   925  		messageHeight  int64
   926  		testName       string
   927  		messageType    tmproto.SignedMsgType
   928  		messageBlockID types.BlockID
   929  	}{
   930  		{false, 0, 0, "Valid Message", validSignedMsgType, validBlockID},
   931  		{true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID},
   932  		{true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID},
   933  		{true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID},
   934  		{true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID},
   935  	}
   936  
   937  	for _, tc := range testCases {
   938  		tc := tc
   939  		t.Run(tc.testName, func(t *testing.T) {
   940  			message := VoteSetMaj23Message{
   941  				Height:  tc.messageHeight,
   942  				Round:   tc.messageRound,
   943  				Type:    tc.messageType,
   944  				BlockID: tc.messageBlockID,
   945  			}
   946  
   947  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   948  		})
   949  	}
   950  }
   951  
   952  func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
   953  	testCases := []struct {
   954  		malleateFn func(*VoteSetBitsMessage)
   955  		expErr     string
   956  	}{
   957  		{func(msg *VoteSetBitsMessage) {}, ""},
   958  		{func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"},
   959  		{func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"},
   960  		{func(msg *VoteSetBitsMessage) {
   961  			msg.BlockID = types.BlockID{
   962  				Hash: bytes.HexBytes{},
   963  				PartSetHeader: types.PartSetHeader{
   964  					Total: 1,
   965  					Hash:  []byte{0},
   966  				},
   967  			}
   968  		}, "wrong BlockID: wrong PartSetHeader: wrong Hash:"},
   969  		{func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) },
   970  			"votes bit array is too big: 10001, max: 10000"},
   971  	}
   972  
   973  	for i, tc := range testCases {
   974  		tc := tc
   975  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   976  			msg := &VoteSetBitsMessage{
   977  				Height:  1,
   978  				Round:   0,
   979  				Type:    0x01,
   980  				Votes:   bits.NewBitArray(1),
   981  				BlockID: types.BlockID{},
   982  			}
   983  
   984  			tc.malleateFn(msg)
   985  			err := msg.ValidateBasic()
   986  			if tc.expErr != "" && assert.Error(t, err) {
   987  				assert.Contains(t, err.Error(), tc.expErr)
   988  			}
   989  		})
   990  	}
   991  }