github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/bft/consensus/reactor_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"fmt"
     5  	"log/slog"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/stretchr/testify/assert"
    11  
    12  	"github.com/gnolang/gno/tm2/pkg/amino"
    13  	"github.com/gnolang/gno/tm2/pkg/bft/abci/example/kvstore"
    14  	cfg "github.com/gnolang/gno/tm2/pkg/bft/config"
    15  	cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types"
    16  	sm "github.com/gnolang/gno/tm2/pkg/bft/state"
    17  	"github.com/gnolang/gno/tm2/pkg/bft/types"
    18  	"github.com/gnolang/gno/tm2/pkg/bitarray"
    19  	"github.com/gnolang/gno/tm2/pkg/crypto/tmhash"
    20  	"github.com/gnolang/gno/tm2/pkg/events"
    21  	"github.com/gnolang/gno/tm2/pkg/log"
    22  	osm "github.com/gnolang/gno/tm2/pkg/os"
    23  	"github.com/gnolang/gno/tm2/pkg/p2p"
    24  	"github.com/gnolang/gno/tm2/pkg/p2p/mock"
    25  	"github.com/gnolang/gno/tm2/pkg/testutils"
    26  )
    27  
    28  // ----------------------------------------------
    29  // in-process testnets
    30  
    31  func startConsensusNet(css []*ConsensusState, n int) ([]*ConsensusReactor, []<-chan events.Event, []events.EventSwitch, []*p2p.Switch) {
    32  	reactors := make([]*ConsensusReactor, n)
    33  	blocksSubs := make([]<-chan events.Event, 0)
    34  	eventSwitches := make([]events.EventSwitch, n)
    35  	p2pSwitches := ([]*p2p.Switch)(nil)
    36  	for i := 0; i < n; i++ {
    37  		/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
    38  		if err != nil {	t.Fatal(err)}*/
    39  		reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
    40  		reactors[i].SetLogger(css[i].Logger)
    41  
    42  		// evsw is already started with the cs
    43  		eventSwitches[i] = css[i].evsw
    44  		reactors[i].SetEventSwitch(eventSwitches[i])
    45  
    46  		blocksSub := subscribe(eventSwitches[i], types.EventNewBlock{})
    47  		blocksSubs = append(blocksSubs, blocksSub)
    48  
    49  		if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
    50  			sm.SaveState(css[i].blockExec.DB(), css[i].state)
    51  		}
    52  	}
    53  	// make connected switches and start all reactors
    54  	p2pSwitches = p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
    55  		s.AddReactor("CONSENSUS", reactors[i])
    56  		s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
    57  		return s
    58  	}, p2p.Connect2Switches)
    59  
    60  	// now that everyone is connected,  start the state machines
    61  	// If we started the state machines before everyone was connected,
    62  	// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
    63  	// TODO: is this still true with new pubsub?
    64  	for i := 0; i < n; i++ {
    65  		s := reactors[i].conS.GetState()
    66  		reactors[i].SwitchToConsensus(s, 0)
    67  	}
    68  	return reactors, blocksSubs, eventSwitches, p2pSwitches
    69  }
    70  
    71  func stopConsensusNet(logger *slog.Logger, reactors []*ConsensusReactor, eventSwitches []events.EventSwitch, p2pSwitches []*p2p.Switch) {
    72  	logger.Info("stopConsensusNet", "n", len(reactors))
    73  	for i, r := range reactors {
    74  		logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i)
    75  		r.Switch.Stop()
    76  	}
    77  	for i, b := range eventSwitches {
    78  		logger.Info("stopConsensusNet: Stopping evsw", "i", i)
    79  		b.Stop()
    80  	}
    81  	for i, p := range p2pSwitches {
    82  		logger.Info("stopConsensusNet: Stopping p2p switch", "i", i)
    83  		p.Stop()
    84  	}
    85  	logger.Info("stopConsensusNet: DONE", "n", len(reactors))
    86  }
    87  
    88  // Ensure a testnet makes blocks
    89  func TestReactorBasic(t *testing.T) {
    90  	t.Parallel()
    91  
    92  	N := 4
    93  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
    94  	defer cleanup()
    95  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
    96  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
    97  	// wait till everyone makes the first new block
    98  	timeoutWaitGroup(t, N, func(j int) {
    99  		<-blocksSubs[j]
   100  	}, css)
   101  }
   102  
   103  // ------------------------------------
   104  
   105  // Ensure a testnet makes blocks when there are txs
   106  func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
   107  	t.Parallel()
   108  
   109  	N := 4
   110  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
   111  		func(c *cfg.Config) {
   112  			c.Consensus.CreateEmptyBlocks = false
   113  		})
   114  	defer cleanup()
   115  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
   116  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
   117  
   118  	// send a tx
   119  	if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil {
   120  		t.Error(err)
   121  	}
   122  
   123  	// wait till everyone makes the first new block
   124  	timeoutWaitGroup(t, N, func(j int) {
   125  		<-blocksSubs[j]
   126  	}, css)
   127  }
   128  
   129  func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
   130  	t.Parallel()
   131  
   132  	N := 1
   133  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   134  	defer cleanup()
   135  	reactors, _, eventSwitches, p2pSwitches := startConsensusNet(css, N)
   136  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
   137  
   138  	var (
   139  		reactor = reactors[0]
   140  		peer    = mock.NewPeer(nil)
   141  		msg     = amino.MustMarshalAny(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
   142  	)
   143  
   144  	reactor.InitPeer(peer)
   145  
   146  	// simulate switch calling Receive before AddPeer
   147  	assert.NotPanics(t, func() {
   148  		reactor.Receive(StateChannel, peer, msg)
   149  		reactor.AddPeer(peer)
   150  	})
   151  }
   152  
   153  func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
   154  	t.Parallel()
   155  
   156  	N := 1
   157  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   158  	defer cleanup()
   159  	reactors, _, eventSwitches, p2pSwitches := startConsensusNet(css, N)
   160  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
   161  
   162  	var (
   163  		reactor = reactors[0]
   164  		peer    = mock.NewPeer(nil)
   165  		msg     = amino.MustMarshalAny(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
   166  	)
   167  
   168  	// we should call InitPeer here
   169  
   170  	// simulate switch calling Receive before AddPeer
   171  	assert.Panics(t, func() {
   172  		reactor.Receive(StateChannel, peer, msg)
   173  	})
   174  }
   175  
   176  // Test we record stats about votes and block parts from other peers.
   177  func TestFlappyReactorRecordsVotesAndBlockParts(t *testing.T) {
   178  	t.Parallel()
   179  
   180  	testutils.FilterStability(t, testutils.Flappy)
   181  
   182  	N := 4
   183  	css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
   184  	defer cleanup()
   185  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N)
   186  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
   187  
   188  	// wait till everyone makes the first new block
   189  	timeoutWaitGroup(t, N, func(j int) {
   190  		<-blocksSubs[j]
   191  	}, css)
   192  
   193  	// Get peer
   194  	peer := reactors[1].Switch.Peers().List()[0]
   195  	// Get peer state
   196  	ps := peer.Get(types.PeerStateKey).(*PeerState)
   197  
   198  	assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
   199  	assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased")
   200  }
   201  
   202  // -------------------------------------------------------------
   203  // ensure we can make blocks despite cycling a validator set
   204  
   205  func TestReactorVotingPowerChange(t *testing.T) {
   206  	t.Parallel()
   207  
   208  	nVals := 4
   209  	logger := log.NewTestingLogger(t)
   210  	css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
   211  	defer cleanup()
   212  
   213  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, nVals)
   214  	defer stopConsensusNet(logger, reactors, eventSwitches, p2pSwitches)
   215  
   216  	// map of active validators
   217  	activeVals := make(map[string]struct{})
   218  	for i := 0; i < nVals; i++ {
   219  		addr := css[i].privValidator.GetPubKey().Address()
   220  		activeVals[addr.String()] = struct{}{}
   221  	}
   222  
   223  	// wait till everyone makes block 1
   224  	timeoutWaitGroup(t, nVals, func(j int) {
   225  		<-blocksSubs[j]
   226  	}, css)
   227  
   228  	// ---------------------------------------------------------------------------
   229  	logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
   230  
   231  	val1PubKey := css[0].privValidator.GetPubKey()
   232  	updateValTx := kvstore.MakeValSetChangeTx(val1PubKey, 25)
   233  	previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
   234  
   235  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValTx)
   236  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValTx)
   237  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   238  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   239  
   240  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   241  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   242  	}
   243  
   244  	updateValTx = kvstore.MakeValSetChangeTx(val1PubKey, 2)
   245  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   246  
   247  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValTx)
   248  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValTx)
   249  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   250  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   251  
   252  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   253  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   254  	}
   255  
   256  	updateValTx = kvstore.MakeValSetChangeTx(val1PubKey, 26)
   257  	previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
   258  
   259  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValTx)
   260  	waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValTx)
   261  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   262  	waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
   263  
   264  	if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   265  		t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
   266  	}
   267  }
   268  
   269  func TestReactorValidatorSetChanges(t *testing.T) {
   270  	t.Parallel()
   271  
   272  	nPeers := 7
   273  	nVals := 4
   274  	css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
   275  	defer cleanup()
   276  
   277  	logger := log.NewTestingLogger(t)
   278  
   279  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, nPeers)
   280  	defer stopConsensusNet(logger, reactors, eventSwitches, p2pSwitches)
   281  
   282  	// map of active validators
   283  	activeVals := make(map[string]struct{})
   284  	for i := 0; i < nVals; i++ {
   285  		addr := css[i].privValidator.GetPubKey().Address()
   286  		activeVals[addr.String()] = struct{}{}
   287  	}
   288  
   289  	// wait till everyone makes block 1
   290  	timeoutWaitGroup(t, nPeers, func(j int) {
   291  		<-blocksSubs[j]
   292  	}, css)
   293  
   294  	// ---------------------------------------------------------------------------
   295  	logger.Info("---------------------------- Testing adding one validator")
   296  
   297  	newValPubKey1 := css[nVals].privValidator.GetPubKey()
   298  	newValTx1 := kvstore.MakeValSetChangeTx(newValPubKey1, testMinPower)
   299  
   300  	// wait till everyone makes block 2
   301  	// ensure the commit includes all validators
   302  	// send newValTx to change vals in block 3
   303  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValTx1)
   304  
   305  	// wait till everyone makes block 3.
   306  	// it includes the commit for block 2, which is by the original validator set
   307  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValTx1)
   308  
   309  	// wait till everyone makes block 4.
   310  	// it includes the commit for block 3, which is by the original validator set
   311  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   312  
   313  	// the commits for block 4 should be with the updated validator set
   314  	activeVals[newValPubKey1.Address().String()] = struct{}{}
   315  
   316  	// wait till everyone makes block 5
   317  	// it includes the commit for block 4, which should have the updated validator set
   318  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   319  
   320  	// ---------------------------------------------------------------------------
   321  	logger.Info("---------------------------- Testing changing the voting power of one validator")
   322  
   323  	updateValPubKey1 := css[nVals].privValidator.GetPubKey()
   324  	updateValTx1 := kvstore.MakeValSetChangeTx(updateValPubKey1, 25)
   325  	previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
   326  
   327  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValTx1)
   328  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValTx1)
   329  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   330  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   331  
   332  	if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
   333  		t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
   334  	}
   335  
   336  	// ---------------------------------------------------------------------------
   337  	logger.Info("---------------------------- Testing adding two validators at once")
   338  
   339  	newValPubKey2 := css[nVals+1].privValidator.GetPubKey()
   340  	newValTx2 := kvstore.MakeValSetChangeTx(newValPubKey2, testMinPower)
   341  
   342  	newValPubKey3 := css[nVals+2].privValidator.GetPubKey()
   343  	newValTx3 := kvstore.MakeValSetChangeTx(newValPubKey3, testMinPower)
   344  
   345  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValTx2, newValTx3)
   346  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValTx2, newValTx3)
   347  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   348  	activeVals[newValPubKey2.Address().String()] = struct{}{}
   349  	activeVals[newValPubKey3.Address().String()] = struct{}{}
   350  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   351  
   352  	// ---------------------------------------------------------------------------
   353  	logger.Info("---------------------------- Testing removing two validators at once")
   354  
   355  	removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValPubKey2, 0)
   356  	removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValPubKey3, 0)
   357  
   358  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   359  	waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
   360  	waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
   361  	delete(activeVals, newValPubKey2.Address().String())
   362  	delete(activeVals, newValPubKey3.Address().String())
   363  	waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
   364  }
   365  
   366  // Check we can make blocks with skip_timeout_commit=false
   367  func TestReactorWithTimeoutCommit(t *testing.T) {
   368  	t.Parallel()
   369  
   370  	N := 4
   371  	css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
   372  	defer cleanup()
   373  	// override default SkipTimeoutCommit == true for tests
   374  	for i := 0; i < N; i++ {
   375  		css[i].config.SkipTimeoutCommit = false
   376  	}
   377  
   378  	reactors, blocksSubs, eventSwitches, p2pSwitches := startConsensusNet(css, N-1)
   379  	defer stopConsensusNet(log.NewTestingLogger(t), reactors, eventSwitches, p2pSwitches)
   380  
   381  	// wait till everyone makes the first new block
   382  	timeoutWaitGroup(t, N-1, func(j int) {
   383  		<-blocksSubs[j]
   384  	}, css)
   385  }
   386  
   387  func waitForAndValidateBlock(
   388  	t *testing.T,
   389  	n int,
   390  	activeVals map[string]struct{},
   391  	blocksSubs []<-chan events.Event,
   392  	css []*ConsensusState,
   393  	txs ...[]byte,
   394  ) {
   395  	t.Helper()
   396  
   397  	timeoutWaitGroup(t, n, func(j int) {
   398  		css[j].Logger.Debug("waitForAndValidateBlock")
   399  		msg := <-blocksSubs[j]
   400  		newBlock := msg.(types.EventNewBlock).Block
   401  		css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
   402  		err := validateBlock(newBlock, activeVals)
   403  		assert.Nil(t, err)
   404  		for _, tx := range txs {
   405  			err := assertMempool(css[j].txNotifier).CheckTx(tx, nil)
   406  			assert.Nil(t, err)
   407  		}
   408  	}, css)
   409  }
   410  
   411  func waitForAndValidateBlockWithTx(
   412  	t *testing.T,
   413  	n int,
   414  	activeVals map[string]struct{},
   415  	blocksSubs []<-chan events.Event,
   416  	css []*ConsensusState,
   417  	txs ...[]byte,
   418  ) {
   419  	t.Helper()
   420  
   421  	timeoutWaitGroup(t, n, func(j int) {
   422  		ntxs := 0
   423  	BLOCK_TX_LOOP:
   424  		for {
   425  			css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
   426  			msg := <-blocksSubs[j]
   427  			newBlock := msg.(types.EventNewBlock).Block
   428  			css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
   429  			err := validateBlock(newBlock, activeVals)
   430  			assert.Nil(t, err)
   431  
   432  			// check that txs match the txs we're waiting for.
   433  			// note they could be spread over multiple blocks,
   434  			// but they should be in order.
   435  			for _, tx := range newBlock.Data.Txs {
   436  				assert.EqualValues(t, txs[ntxs], tx)
   437  				ntxs++
   438  			}
   439  
   440  			if ntxs == len(txs) {
   441  				break BLOCK_TX_LOOP
   442  			}
   443  		}
   444  	}, css)
   445  }
   446  
   447  func waitForBlockWithUpdatedValsAndValidateIt(
   448  	t *testing.T,
   449  	n int,
   450  	updatedVals map[string]struct{},
   451  	blocksSubs []<-chan events.Event,
   452  	css []*ConsensusState,
   453  ) {
   454  	t.Helper()
   455  
   456  	timeoutWaitGroup(t, n, func(j int) {
   457  		var newBlock *types.Block
   458  	LOOP:
   459  		for {
   460  			css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
   461  			msg := <-blocksSubs[j]
   462  			newBlock = msg.(types.EventNewBlock).Block
   463  			if newBlock.LastCommit.Size() == len(updatedVals) {
   464  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
   465  				break LOOP
   466  			} else {
   467  				css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height)
   468  			}
   469  		}
   470  
   471  		err := validateBlock(newBlock, updatedVals)
   472  		assert.Nil(t, err)
   473  	}, css)
   474  }
   475  
   476  // expects high synchrony!
   477  func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
   478  	if block.LastCommit.Size() != len(activeVals) {
   479  		return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
   480  	}
   481  
   482  	for _, vote := range block.LastCommit.Precommits {
   483  		if _, ok := activeVals[vote.ValidatorAddress.String()]; !ok {
   484  			return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress)
   485  		}
   486  	}
   487  	return nil
   488  }
   489  
   490  func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) {
   491  	t.Helper()
   492  
   493  	wg := new(sync.WaitGroup)
   494  	wg.Add(n)
   495  	for i := 0; i < n; i++ {
   496  		go func(j int) {
   497  			f(j)
   498  			wg.Done()
   499  		}(i)
   500  	}
   501  
   502  	done := make(chan struct{})
   503  	go func() {
   504  		wg.Wait()
   505  		close(done)
   506  	}()
   507  
   508  	// we're running many nodes in-process, possibly in in a virtual machine,
   509  	// and spewing debug messages - making a block could take a while,
   510  	timeout := time.Second * 300
   511  
   512  	select {
   513  	case <-done:
   514  	case <-time.After(timeout):
   515  		for i, cs := range css {
   516  			t.Log("#################")
   517  			t.Log("Validator", i)
   518  			t.Log(cs.GetRoundState())
   519  			t.Log("")
   520  		}
   521  		osm.PrintAllGoroutines()
   522  		panic("Timed out waiting for all validators to commit a block")
   523  	}
   524  }
   525  
   526  // -------------------------------------------------------------
   527  // Ensure basic validation of structs is functioning
   528  
   529  func TestNewRoundStepMessageValidateBasic(t *testing.T) {
   530  	t.Parallel()
   531  
   532  	testCases := []struct {
   533  		testName               string
   534  		messageHeight          int64
   535  		messageRound           int
   536  		messageStep            cstypes.RoundStepType
   537  		messageLastCommitRound int
   538  		expectErr              bool
   539  	}{
   540  		{"Valid Message", 0, 0, 0x01, 1, false},
   541  		{"Invalid Message", -1, 0, 0x01, 1, true},
   542  		{"Invalid Message", 0, -1, 0x01, 1, true},
   543  		{"Invalid Message", 0, 0, 0x00, 1, true},
   544  		{"Invalid Message", 0, 0, 0x00, 0, true},
   545  		{"Invalid Message", 1, 0, 0x01, 0, true},
   546  	}
   547  
   548  	for _, tc := range testCases {
   549  		tc := tc
   550  		t.Run(tc.testName, func(t *testing.T) {
   551  			t.Parallel()
   552  
   553  			message := NewRoundStepMessage{
   554  				Height:          tc.messageHeight,
   555  				Round:           tc.messageRound,
   556  				Step:            tc.messageStep,
   557  				LastCommitRound: tc.messageLastCommitRound,
   558  			}
   559  
   560  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   561  		})
   562  	}
   563  }
   564  
   565  func TestNewValidBlockMessageValidateBasic(t *testing.T) {
   566  	t.Parallel()
   567  
   568  	testCases := []struct {
   569  		malleateFn func(*NewValidBlockMessage)
   570  		expErr     string
   571  	}{
   572  		{func(msg *NewValidBlockMessage) {}, ""},
   573  		{func(msg *NewValidBlockMessage) { msg.Height = -1 }, "Negative Height"},
   574  		{func(msg *NewValidBlockMessage) { msg.Round = -1 }, "Negative Round"},
   575  		{
   576  			func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 2 },
   577  			"BlockParts bit array size 1 not equal to BlockPartsHeader.Total 2",
   578  		},
   579  		{
   580  			func(msg *NewValidBlockMessage) {
   581  				msg.BlockPartsHeader.Total = 0
   582  				msg.BlockParts = bitarray.NewBitArray(0)
   583  			},
   584  			"Empty BlockParts",
   585  		},
   586  		{
   587  			func(msg *NewValidBlockMessage) { msg.BlockParts = bitarray.NewBitArray(types.MaxBlockPartsCount + 1) },
   588  			"BlockParts bit array size 1602 not equal to BlockPartsHeader.Total 1",
   589  		},
   590  	}
   591  
   592  	for i, tc := range testCases {
   593  		tc := tc
   594  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   595  			t.Parallel()
   596  
   597  			msg := &NewValidBlockMessage{
   598  				Height: 1,
   599  				Round:  0,
   600  				BlockPartsHeader: types.PartSetHeader{
   601  					Total: 1,
   602  				},
   603  				BlockParts: bitarray.NewBitArray(1),
   604  			}
   605  
   606  			tc.malleateFn(msg)
   607  			err := msg.ValidateBasic()
   608  			if tc.expErr != "" && assert.Error(t, err) {
   609  				assert.Contains(t, err.Error(), tc.expErr)
   610  			}
   611  		})
   612  	}
   613  }
   614  
   615  func TestProposalPOLMessageValidateBasic(t *testing.T) {
   616  	t.Parallel()
   617  
   618  	testCases := []struct {
   619  		malleateFn func(*ProposalPOLMessage)
   620  		expErr     string
   621  	}{
   622  		{func(msg *ProposalPOLMessage) {}, ""},
   623  		{func(msg *ProposalPOLMessage) { msg.Height = -1 }, "Negative Height"},
   624  		{func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "Negative ProposalPOLRound"},
   625  		{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bitarray.NewBitArray(0) }, "Empty ProposalPOL bit array"},
   626  		{
   627  			func(msg *ProposalPOLMessage) { msg.ProposalPOL = bitarray.NewBitArray(types.MaxVotesCount + 1) },
   628  			"ProposalPOL bit array is too big: 10001, max: 10000",
   629  		},
   630  	}
   631  
   632  	for i, tc := range testCases {
   633  		tc := tc
   634  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   635  			t.Parallel()
   636  
   637  			msg := &ProposalPOLMessage{
   638  				Height:           1,
   639  				ProposalPOLRound: 1,
   640  				ProposalPOL:      bitarray.NewBitArray(1),
   641  			}
   642  
   643  			tc.malleateFn(msg)
   644  			err := msg.ValidateBasic()
   645  			if tc.expErr != "" && assert.Error(t, err) {
   646  				assert.Contains(t, err.Error(), tc.expErr)
   647  			}
   648  		})
   649  	}
   650  }
   651  
   652  func TestBlockPartMessageValidateBasic(t *testing.T) {
   653  	t.Parallel()
   654  
   655  	testPart := new(types.Part)
   656  	testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf"))
   657  	testCases := []struct {
   658  		testName      string
   659  		messageHeight int64
   660  		messageRound  int
   661  		messagePart   *types.Part
   662  		expectErr     bool
   663  	}{
   664  		{"Valid Message", 0, 0, testPart, false},
   665  		{"Invalid Message", -1, 0, testPart, true},
   666  		{"Invalid Message", 0, -1, testPart, true},
   667  	}
   668  
   669  	for _, tc := range testCases {
   670  		tc := tc
   671  		t.Run(tc.testName, func(t *testing.T) {
   672  			t.Parallel()
   673  
   674  			message := BlockPartMessage{
   675  				Height: tc.messageHeight,
   676  				Round:  tc.messageRound,
   677  				Part:   tc.messagePart,
   678  			}
   679  
   680  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   681  		})
   682  	}
   683  
   684  	message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
   685  	message.Part.Index = -1
   686  
   687  	assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   688  }
   689  
   690  func TestHasVoteMessageValidateBasic(t *testing.T) {
   691  	t.Parallel()
   692  
   693  	const (
   694  		validSignedMsgType   types.SignedMsgType = 0x01
   695  		invalidSignedMsgType types.SignedMsgType = 0x03
   696  	)
   697  
   698  	testCases := []struct {
   699  		testName      string
   700  		messageHeight int64
   701  		messageRound  int
   702  		messageType   types.SignedMsgType
   703  		messageIndex  int
   704  		expectErr     bool
   705  	}{
   706  		{"Valid Message", 0, 0, validSignedMsgType, 0, false},
   707  		{"Invalid Message", -1, 0, validSignedMsgType, 0, true},
   708  		{"Invalid Message", 0, -1, validSignedMsgType, 0, true},
   709  		{"Invalid Message", 0, 0, invalidSignedMsgType, 0, true},
   710  		{"Invalid Message", 0, 0, validSignedMsgType, -1, true},
   711  	}
   712  
   713  	for _, tc := range testCases {
   714  		tc := tc
   715  		t.Run(tc.testName, func(t *testing.T) {
   716  			t.Parallel()
   717  
   718  			message := HasVoteMessage{
   719  				Height: tc.messageHeight,
   720  				Round:  tc.messageRound,
   721  				Type:   tc.messageType,
   722  				Index:  tc.messageIndex,
   723  			}
   724  
   725  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   726  		})
   727  	}
   728  }
   729  
   730  func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
   731  	t.Parallel()
   732  
   733  	const (
   734  		validSignedMsgType   types.SignedMsgType = 0x01
   735  		invalidSignedMsgType types.SignedMsgType = 0x03
   736  	)
   737  
   738  	validBlockID := types.BlockID{}
   739  	invalidBlockID := types.BlockID{
   740  		Hash: []byte{},
   741  		PartsHeader: types.PartSetHeader{
   742  			Total: -1,
   743  			Hash:  []byte{},
   744  		},
   745  	}
   746  
   747  	testCases := []struct {
   748  		testName       string
   749  		messageHeight  int64
   750  		messageRound   int
   751  		messageType    types.SignedMsgType
   752  		messageBlockID types.BlockID
   753  		expectErr      bool
   754  	}{
   755  		{"Valid Message", 0, 0, validSignedMsgType, validBlockID, false},
   756  		{"Invalid Message", -1, 0, validSignedMsgType, validBlockID, true},
   757  		{"Invalid Message", 0, -1, validSignedMsgType, validBlockID, true},
   758  		{"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, true},
   759  		{"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, true},
   760  	}
   761  
   762  	for _, tc := range testCases {
   763  		tc := tc
   764  		t.Run(tc.testName, func(t *testing.T) {
   765  			t.Parallel()
   766  
   767  			message := VoteSetMaj23Message{
   768  				Height:  tc.messageHeight,
   769  				Round:   tc.messageRound,
   770  				Type:    tc.messageType,
   771  				BlockID: tc.messageBlockID,
   772  			}
   773  
   774  			assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
   775  		})
   776  	}
   777  }
   778  
   779  func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
   780  	t.Parallel()
   781  
   782  	testCases := []struct { //nolint: maligned
   783  		malleateFn func(*VoteSetBitsMessage)
   784  		expErr     string
   785  	}{
   786  		{func(msg *VoteSetBitsMessage) {}, ""},
   787  		{func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "Negative Height"},
   788  		{func(msg *VoteSetBitsMessage) { msg.Round = -1 }, "Negative Round"},
   789  		{func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "Invalid Type"},
   790  		{func(msg *VoteSetBitsMessage) {
   791  			msg.BlockID = types.BlockID{
   792  				Hash: []byte{},
   793  				PartsHeader: types.PartSetHeader{
   794  					Total: -1,
   795  					Hash:  []byte{},
   796  				},
   797  			}
   798  		}, "wrong BlockID: wrong PartsHeader: Negative Total"},
   799  		{
   800  			func(msg *VoteSetBitsMessage) { msg.Votes = bitarray.NewBitArray(types.MaxVotesCount + 1) },
   801  			"votes bit array is too big: 10001, max: 10000",
   802  		},
   803  	}
   804  
   805  	for i, tc := range testCases {
   806  		tc := tc
   807  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   808  			t.Parallel()
   809  
   810  			msg := &VoteSetBitsMessage{
   811  				Height:  1,
   812  				Round:   0,
   813  				Type:    0x01,
   814  				Votes:   bitarray.NewBitArray(1),
   815  				BlockID: types.BlockID{},
   816  			}
   817  
   818  			tc.malleateFn(msg)
   819  			err := msg.ValidateBasic()
   820  			if tc.expErr != "" && assert.Error(t, err) {
   821  				assert.Contains(t, err.Error(), tc.expErr)
   822  			}
   823  		})
   824  	}
   825  }