github.com/DFWallet/tendermint-cosmos@v0.0.2/evidence/reactor_test.go (about)

     1  package evidence_test
     2  
     3  import (
     4  	"encoding/hex"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/fortytw2/leaktest"
    11  	"github.com/go-kit/kit/log/term"
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/mock"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	dbm "github.com/tendermint/tm-db"
    17  
    18  	cfg "github.com/DFWallet/tendermint-cosmos/config"
    19  	"github.com/DFWallet/tendermint-cosmos/crypto"
    20  	"github.com/DFWallet/tendermint-cosmos/crypto/tmhash"
    21  	"github.com/DFWallet/tendermint-cosmos/evidence"
    22  	"github.com/DFWallet/tendermint-cosmos/evidence/mocks"
    23  	"github.com/DFWallet/tendermint-cosmos/libs/log"
    24  	"github.com/DFWallet/tendermint-cosmos/p2p"
    25  	p2pmocks "github.com/DFWallet/tendermint-cosmos/p2p/mocks"
    26  	tmproto "github.com/DFWallet/tendermint-cosmos/proto/tendermint/types"
    27  	sm "github.com/DFWallet/tendermint-cosmos/state"
    28  	"github.com/DFWallet/tendermint-cosmos/types"
    29  )
    30  
    31  var (
    32  	numEvidence = 10
    33  	timeout     = 120 * time.Second // ridiculously high because CircleCI is slow
    34  )
    35  
    36  // We have N evidence reactors connected to one another. The first reactor
    37  // receives a number of evidence at varying heights. We test that all
    38  // other reactors receive the evidence and add it to their own respective
    39  // evidence pools.
    40  func TestReactorBroadcastEvidence(t *testing.T) {
    41  	config := cfg.TestConfig()
    42  	N := 7
    43  
    44  	// create statedb for everyone
    45  	stateDBs := make([]sm.Store, N)
    46  	val := types.NewMockPV()
    47  	// we need validators saved for heights at least as high as we have evidence for
    48  	height := int64(numEvidence) + 10
    49  	for i := 0; i < N; i++ {
    50  		stateDBs[i] = initializeValidatorState(val, height)
    51  	}
    52  
    53  	// make reactors from statedb
    54  	reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs)
    55  
    56  	// set the peer height on each reactor
    57  	for _, r := range reactors {
    58  		for _, peer := range r.Switch.Peers().List() {
    59  			ps := peerState{height}
    60  			peer.Set(types.PeerStateKey, ps)
    61  		}
    62  	}
    63  
    64  	// send a bunch of valid evidence to the first reactor's evpool
    65  	// and wait for them all to be received in the others
    66  	evList := sendEvidence(t, pools[0], val, numEvidence)
    67  	waitForEvidence(t, evList, pools)
    68  }
    69  
    70  // We have two evidence reactors connected to one another but are at different heights.
    71  // Reactor 1 which is ahead receives a number of evidence. It should only send the evidence
    72  // that is below the height of the peer to that peer.
    73  func TestReactorSelectiveBroadcast(t *testing.T) {
    74  	config := cfg.TestConfig()
    75  
    76  	val := types.NewMockPV()
    77  	height1 := int64(numEvidence) + 10
    78  	height2 := int64(numEvidence) / 2
    79  
    80  	// DB1 is ahead of DB2
    81  	stateDB1 := initializeValidatorState(val, height1)
    82  	stateDB2 := initializeValidatorState(val, height2)
    83  
    84  	// make reactors from statedb
    85  	reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2})
    86  
    87  	// set the peer height on each reactor
    88  	for _, r := range reactors {
    89  		for _, peer := range r.Switch.Peers().List() {
    90  			ps := peerState{height1}
    91  			peer.Set(types.PeerStateKey, ps)
    92  		}
    93  	}
    94  
    95  	// update the first reactor peer's height to be very small
    96  	peer := reactors[0].Switch.Peers().List()[0]
    97  	ps := peerState{height2}
    98  	peer.Set(types.PeerStateKey, ps)
    99  
   100  	// send a bunch of valid evidence to the first reactor's evpool
   101  	evList := sendEvidence(t, pools[0], val, numEvidence)
   102  
   103  	// only ones less than the peers height should make it through
   104  	waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]})
   105  
   106  	// peers should still be connected
   107  	peers := reactors[1].Switch.Peers().List()
   108  	assert.Equal(t, 1, len(peers))
   109  }
   110  
   111  // This tests aims to ensure that reactors don't send evidence that they have committed or that ar
   112  // not ready for the peer through three scenarios.
   113  // First, committed evidence to a newly connected peer
   114  // Second, evidence to a peer that is behind
   115  // Third, evidence that was pending and became committed just before the peer caught up
   116  func TestReactorsGossipNoCommittedEvidence(t *testing.T) {
   117  	config := cfg.TestConfig()
   118  
   119  	val := types.NewMockPV()
   120  	var height int64 = 10
   121  
   122  	// DB1 is ahead of DB2
   123  	stateDB1 := initializeValidatorState(val, height-1)
   124  	stateDB2 := initializeValidatorState(val, height-2)
   125  	state, err := stateDB1.Load()
   126  	require.NoError(t, err)
   127  	state.LastBlockHeight++
   128  
   129  	// make reactors from statedb
   130  	reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2})
   131  
   132  	evList := sendEvidence(t, pools[0], val, 2)
   133  	pools[0].Update(state, evList)
   134  	require.EqualValues(t, uint32(0), pools[0].Size())
   135  
   136  	time.Sleep(100 * time.Millisecond)
   137  
   138  	peer := reactors[0].Switch.Peers().List()[0]
   139  	ps := peerState{height - 2}
   140  	peer.Set(types.PeerStateKey, ps)
   141  
   142  	peer = reactors[1].Switch.Peers().List()[0]
   143  	ps = peerState{height}
   144  	peer.Set(types.PeerStateKey, ps)
   145  
   146  	// wait to see that no evidence comes through
   147  	time.Sleep(300 * time.Millisecond)
   148  
   149  	// the second pool should not have received any evidence because it has already been committed
   150  	assert.Equal(t, uint32(0), pools[1].Size(), "second reactor should not have received evidence")
   151  
   152  	// the first reactor receives three more evidence
   153  	evList = make([]types.Evidence, 3)
   154  	for i := 0; i < 3; i++ {
   155  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(height-3+int64(i),
   156  			time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, state.ChainID)
   157  		err := pools[0].AddEvidence(ev)
   158  		require.NoError(t, err)
   159  		evList[i] = ev
   160  	}
   161  
   162  	// wait to see that only one evidence is sent
   163  	time.Sleep(300 * time.Millisecond)
   164  
   165  	// the second pool should only have received the first evidence because it is behind
   166  	peerEv, _ := pools[1].PendingEvidence(10000)
   167  	assert.EqualValues(t, []types.Evidence{evList[0]}, peerEv)
   168  
   169  	// the last evidence is committed and the second reactor catches up in state to the first
   170  	// reactor. We therefore expect that the second reactor only receives one more evidence, the
   171  	// one that is still pending and not the evidence that has already been committed.
   172  	state.LastBlockHeight++
   173  	pools[0].Update(state, []types.Evidence{evList[2]})
   174  	// the first reactor should have the two remaining pending evidence
   175  	require.EqualValues(t, uint32(2), pools[0].Size())
   176  
   177  	// now update the state of the second reactor
   178  	pools[1].Update(state, types.EvidenceList{})
   179  	peer = reactors[0].Switch.Peers().List()[0]
   180  	ps = peerState{height}
   181  	peer.Set(types.PeerStateKey, ps)
   182  
   183  	// wait to see that only two evidence is sent
   184  	time.Sleep(300 * time.Millisecond)
   185  
   186  	peerEv, _ = pools[1].PendingEvidence(1000)
   187  	assert.EqualValues(t, []types.Evidence{evList[0], evList[1]}, peerEv)
   188  }
   189  
   190  func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) {
   191  	evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
   192  	evidenceDB := dbm.NewMemDB()
   193  	blockStore := &mocks.BlockStore{}
   194  	blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
   195  		&types.BlockMeta{Header: types.Header{Time: evidenceTime}},
   196  	)
   197  	val := types.NewMockPV()
   198  	stateStore := initializeValidatorState(val, 1)
   199  	pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   200  	require.NoError(t, err)
   201  
   202  	p := &p2pmocks.Peer{}
   203  
   204  	p.On("IsRunning").Once().Return(true)
   205  	p.On("IsRunning").Return(false)
   206  	// check that we are not leaking any go-routines
   207  	// i.e. broadcastEvidenceRoutine finishes when peer is stopped
   208  	defer leaktest.CheckTimeout(t, 10*time.Second)()
   209  
   210  	p.On("Send", evidence.EvidenceChannel, mock.AnythingOfType("[]uint8")).Return(false)
   211  	quitChan := make(<-chan struct{})
   212  	p.On("Quit").Return(quitChan)
   213  	ps := peerState{2}
   214  	p.On("Get", types.PeerStateKey).Return(ps)
   215  	p.On("ID").Return("ABC")
   216  	p.On("String").Return("mock")
   217  
   218  	r := evidence.NewReactor(pool)
   219  	r.SetLogger(log.TestingLogger())
   220  	r.AddPeer(p)
   221  
   222  	_ = sendEvidence(t, pool, val, 2)
   223  }
   224  
   225  // evidenceLogger is a TestingLogger which uses a different
   226  // color for each validator ("validator" key must exist).
   227  func evidenceLogger() log.Logger {
   228  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   229  		for i := 0; i < len(keyvals)-1; i += 2 {
   230  			if keyvals[i] == "validator" {
   231  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   232  			}
   233  		}
   234  		return term.FgBgColor{}
   235  	})
   236  }
   237  
   238  // connect N evidence reactors through N switches
   239  func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) ([]*evidence.Reactor,
   240  	[]*evidence.Pool) {
   241  	N := len(stateStores)
   242  
   243  	reactors := make([]*evidence.Reactor, N)
   244  	pools := make([]*evidence.Pool, N)
   245  	logger := evidenceLogger()
   246  	evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
   247  
   248  	for i := 0; i < N; i++ {
   249  		evidenceDB := dbm.NewMemDB()
   250  		blockStore := &mocks.BlockStore{}
   251  		blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
   252  			&types.BlockMeta{Header: types.Header{Time: evidenceTime}},
   253  		)
   254  		pool, err := evidence.NewPool(evidenceDB, stateStores[i], blockStore)
   255  		if err != nil {
   256  			panic(err)
   257  		}
   258  		pools[i] = pool
   259  		reactors[i] = evidence.NewReactor(pool)
   260  		reactors[i].SetLogger(logger.With("validator", i))
   261  	}
   262  
   263  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
   264  		s.AddReactor("EVIDENCE", reactors[i])
   265  		return s
   266  
   267  	}, p2p.Connect2Switches)
   268  
   269  	return reactors, pools
   270  }
   271  
   272  // wait for all evidence on all reactors
   273  func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Pool) {
   274  	// wait for the evidence in all evpools
   275  	wg := new(sync.WaitGroup)
   276  	for i := 0; i < len(pools); i++ {
   277  		wg.Add(1)
   278  		go _waitForEvidence(t, wg, evs, i, pools)
   279  	}
   280  
   281  	done := make(chan struct{})
   282  	go func() {
   283  		wg.Wait()
   284  		close(done)
   285  	}()
   286  
   287  	timer := time.After(timeout)
   288  	select {
   289  	case <-timer:
   290  		t.Fatal("Timed out waiting for evidence")
   291  	case <-done:
   292  	}
   293  }
   294  
   295  // wait for all evidence on a single evpool
   296  func _waitForEvidence(
   297  	t *testing.T,
   298  	wg *sync.WaitGroup,
   299  	evs types.EvidenceList,
   300  	poolIdx int,
   301  	pools []*evidence.Pool,
   302  ) {
   303  	evpool := pools[poolIdx]
   304  	var evList []types.Evidence
   305  	currentPoolSize := 0
   306  	for currentPoolSize != len(evs) {
   307  		evList, _ = evpool.PendingEvidence(int64(len(evs) * 500)) // each evidence should not be more than 500 bytes
   308  		currentPoolSize = len(evList)
   309  		time.Sleep(time.Millisecond * 100)
   310  	}
   311  
   312  	// put the reaped evidence in a map so we can quickly check we got everything
   313  	evMap := make(map[string]types.Evidence)
   314  	for _, e := range evList {
   315  		evMap[string(e.Hash())] = e
   316  	}
   317  	for i, expectedEv := range evs {
   318  		gotEv := evMap[string(expectedEv.Hash())]
   319  		assert.Equal(t, expectedEv, gotEv,
   320  			fmt.Sprintf("evidence at index %d on pool %d don't match: %v vs %v",
   321  				i, poolIdx, expectedEv, gotEv))
   322  	}
   323  
   324  	wg.Done()
   325  }
   326  
   327  func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, n int) types.EvidenceList {
   328  	evList := make([]types.Evidence, n)
   329  	for i := 0; i < n; i++ {
   330  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(int64(i+1),
   331  			time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, evidenceChainID)
   332  		err := evpool.AddEvidence(ev)
   333  		require.NoError(t, err)
   334  		evList[i] = ev
   335  	}
   336  	return evList
   337  }
   338  
   339  type peerState struct {
   340  	height int64
   341  }
   342  
   343  func (ps peerState) GetHeight() int64 {
   344  	return ps.height
   345  }
   346  
   347  func exampleVote(t byte) *types.Vote {
   348  	var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z")
   349  	if err != nil {
   350  		panic(err)
   351  	}
   352  
   353  	return &types.Vote{
   354  		Type:      tmproto.SignedMsgType(t),
   355  		Height:    3,
   356  		Round:     2,
   357  		Timestamp: stamp,
   358  		BlockID: types.BlockID{
   359  			Hash: tmhash.Sum([]byte("blockID_hash")),
   360  			PartSetHeader: types.PartSetHeader{
   361  				Total: 1000000,
   362  				Hash:  tmhash.Sum([]byte("blockID_part_set_header_hash")),
   363  			},
   364  		},
   365  		ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
   366  		ValidatorIndex:   56789,
   367  	}
   368  }
   369  
   370  // nolint:lll //ignore line length for tests
   371  func TestEvidenceVectors(t *testing.T) {
   372  
   373  	val := &types.Validator{
   374  		Address:     crypto.AddressHash([]byte("validator_address")),
   375  		VotingPower: 10,
   376  	}
   377  
   378  	valSet := types.NewValidatorSet([]*types.Validator{val})
   379  
   380  	dupl := types.NewDuplicateVoteEvidence(
   381  		exampleVote(1),
   382  		exampleVote(2),
   383  		defaultEvidenceTime,
   384  		valSet,
   385  	)
   386  
   387  	testCases := []struct {
   388  		testName     string
   389  		evidenceList []types.Evidence
   390  		expBytes     string
   391  	}{
   392  		{"DuplicateVoteEvidence", []types.Evidence{dupl}, "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105"},
   393  	}
   394  
   395  	for _, tc := range testCases {
   396  		tc := tc
   397  
   398  		evi := make([]tmproto.Evidence, len(tc.evidenceList))
   399  		for i := 0; i < len(tc.evidenceList); i++ {
   400  			ev, err := types.EvidenceToProto(tc.evidenceList[i])
   401  			require.NoError(t, err, tc.testName)
   402  			evi[i] = *ev
   403  		}
   404  
   405  		epl := tmproto.EvidenceList{
   406  			Evidence: evi,
   407  		}
   408  
   409  		bz, err := epl.Marshal()
   410  		require.NoError(t, err, tc.testName)
   411  
   412  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   413  
   414  	}
   415  
   416  }