github.com/Finschia/ostracon@v1.1.5/evidence/reactor_test.go (about)

     1  package evidence_test
     2  
     3  import (
     4  	"encoding/hex"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/fortytw2/leaktest"
    11  	"github.com/go-kit/log/term"
    12  	"github.com/gogo/protobuf/proto"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/mock"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
    18  	dbm "github.com/tendermint/tm-db"
    19  
    20  	cfg "github.com/Finschia/ostracon/config"
    21  	"github.com/Finschia/ostracon/crypto"
    22  	"github.com/Finschia/ostracon/crypto/tmhash"
    23  	"github.com/Finschia/ostracon/evidence"
    24  	"github.com/Finschia/ostracon/evidence/mocks"
    25  	"github.com/Finschia/ostracon/libs/log"
    26  	"github.com/Finschia/ostracon/p2p"
    27  	p2pmocks "github.com/Finschia/ostracon/p2p/mocks"
    28  	sm "github.com/Finschia/ostracon/state"
    29  	"github.com/Finschia/ostracon/types"
    30  )
    31  
    32  type Peer struct {
    33  	*p2pmocks.Peer
    34  	*p2pmocks.EnvelopeSender
    35  }
    36  
    37  var (
    38  	numEvidence = 10
    39  	timeout     = 120 * time.Second // ridiculously high because CircleCI is slow
    40  )
    41  
    42  // We have N evidence reactors connected to one another. The first reactor
    43  // receives a number of evidence at varying heights. We test that all
    44  // other reactors receive the evidence and add it to their own respective
    45  // evidence pools.
    46  func TestReactorBroadcastEvidence(t *testing.T) {
    47  	config := cfg.TestConfig()
    48  	N := 7
    49  
    50  	// create statedb for everyone
    51  	stateDBs := make([]sm.Store, N)
    52  	val := types.NewMockPV()
    53  	// we need validators saved for heights at least as high as we have evidence for
    54  	height := int64(numEvidence) + 10
    55  	for i := 0; i < N; i++ {
    56  		stateDBs[i] = initializeValidatorState(val, height)
    57  	}
    58  
    59  	// make reactors from statedb
    60  	reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs)
    61  
    62  	// set the peer height on each reactor
    63  	for _, r := range reactors {
    64  		for _, peer := range r.Switch.Peers().List() {
    65  			ps := peerState{height}
    66  			peer.Set(types.PeerStateKey, ps)
    67  		}
    68  	}
    69  
    70  	// send a bunch of valid evidence to the first reactor's evpool
    71  	// and wait for them all to be received in the others
    72  	evList := sendEvidence(t, pools[0], val, numEvidence)
    73  	waitForEvidence(t, evList, pools)
    74  }
    75  
    76  // We have two evidence reactors connected to one another but are at different heights.
    77  // Reactor 1 which is ahead receives a number of evidence. It should only send the evidence
    78  // that is below the height of the peer to that peer.
    79  func TestReactorSelectiveBroadcast(t *testing.T) {
    80  	config := cfg.TestConfig()
    81  
    82  	val := types.NewMockPV()
    83  	height1 := int64(numEvidence) + 10
    84  	height2 := int64(numEvidence) / 2
    85  
    86  	// DB1 is ahead of DB2
    87  	stateDB1 := initializeValidatorState(val, height1)
    88  	stateDB2 := initializeValidatorState(val, height2)
    89  
    90  	// make reactors from statedb
    91  	reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2})
    92  
    93  	// set the peer height on each reactor
    94  	for _, r := range reactors {
    95  		for _, peer := range r.Switch.Peers().List() {
    96  			ps := peerState{height1}
    97  			peer.Set(types.PeerStateKey, ps)
    98  		}
    99  	}
   100  
   101  	// update the first reactor peer's height to be very small
   102  	peer := reactors[0].Switch.Peers().List()[0]
   103  	ps := peerState{height2}
   104  	peer.Set(types.PeerStateKey, ps)
   105  
   106  	// send a bunch of valid evidence to the first reactor's evpool
   107  	evList := sendEvidence(t, pools[0], val, numEvidence)
   108  
   109  	// only ones less than the peers height should make it through
   110  	waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]})
   111  
   112  	// peers should still be connected
   113  	peers := reactors[1].Switch.Peers().List()
   114  	assert.Equal(t, 1, len(peers))
   115  }
   116  
   117  // This tests aims to ensure that reactors don't send evidence that they have committed or that ar
   118  // not ready for the peer through three scenarios.
   119  // First, committed evidence to a newly connected peer
   120  // Second, evidence to a peer that is behind
   121  // Third, evidence that was pending and became committed just before the peer caught up
   122  func TestReactorsGossipNoCommittedEvidence(t *testing.T) {
   123  	config := cfg.TestConfig()
   124  
   125  	val := types.NewMockPV()
   126  	var height int64 = 10
   127  
   128  	// DB1 is ahead of DB2
   129  	stateDB1 := initializeValidatorState(val, height-1)
   130  	stateDB2 := initializeValidatorState(val, height-2)
   131  	state, err := stateDB1.Load()
   132  	require.NoError(t, err)
   133  	state.LastBlockHeight++
   134  
   135  	// make reactors from statedb
   136  	reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2})
   137  
   138  	evList := sendEvidence(t, pools[0], val, 2)
   139  	pools[0].Update(state, evList)
   140  	require.EqualValues(t, uint32(0), pools[0].Size())
   141  
   142  	time.Sleep(100 * time.Millisecond)
   143  
   144  	peer := reactors[0].Switch.Peers().List()[0]
   145  	ps := peerState{height - 2}
   146  	peer.Set(types.PeerStateKey, ps)
   147  
   148  	peer = reactors[1].Switch.Peers().List()[0]
   149  	ps = peerState{height}
   150  	peer.Set(types.PeerStateKey, ps)
   151  
   152  	// wait to see that no evidence comes through
   153  	time.Sleep(300 * time.Millisecond)
   154  
   155  	// the second pool should not have received any evidence because it has already been committed
   156  	assert.Equal(t, uint32(0), pools[1].Size(), "second reactor should not have received evidence")
   157  
   158  	// the first reactor receives three more evidence
   159  	evList = make([]types.Evidence, 3)
   160  	for i := 0; i < 3; i++ {
   161  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(height-3+int64(i),
   162  			time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, state.ChainID)
   163  		err := pools[0].AddEvidence(ev)
   164  		require.NoError(t, err)
   165  		evList[i] = ev
   166  	}
   167  
   168  	// wait to see that only one evidence is sent
   169  	time.Sleep(300 * time.Millisecond)
   170  
   171  	// the second pool should only have received the first evidence because it is behind
   172  	peerEv, _ := pools[1].PendingEvidence(10000)
   173  	assert.EqualValues(t, []types.Evidence{evList[0]}, peerEv)
   174  
   175  	// the last evidence is committed and the second reactor catches up in state to the first
   176  	// reactor. We therefore expect that the second reactor only receives one more evidence, the
   177  	// one that is still pending and not the evidence that has already been committed.
   178  	state.LastBlockHeight++
   179  	pools[0].Update(state, []types.Evidence{evList[2]})
   180  	// the first reactor should have the two remaining pending evidence
   181  	require.EqualValues(t, uint32(2), pools[0].Size())
   182  
   183  	// now update the state of the second reactor
   184  	pools[1].Update(state, types.EvidenceList{})
   185  	peer = reactors[0].Switch.Peers().List()[0]
   186  	ps = peerState{height}
   187  	peer.Set(types.PeerStateKey, ps)
   188  
   189  	// wait to see that only two evidence is sent
   190  	time.Sleep(300 * time.Millisecond)
   191  
   192  	peerEv, _ = pools[1].PendingEvidence(1000)
   193  	assert.EqualValues(t, []types.Evidence{evList[0], evList[1]}, peerEv)
   194  }
   195  
   196  func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) {
   197  	config := cfg.TestConfig()
   198  	evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
   199  	evidenceDB := dbm.NewMemDB()
   200  	blockStore := &mocks.BlockStore{}
   201  	blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
   202  		&types.BlockMeta{Header: types.Header{Time: evidenceTime}},
   203  	)
   204  	val := types.NewMockPV()
   205  	stateStore := initializeValidatorState(val, 1)
   206  	pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
   207  	require.NoError(t, err)
   208  
   209  	p := &Peer{Peer: &p2pmocks.Peer{}, EnvelopeSender: &p2pmocks.EnvelopeSender{}}
   210  
   211  	p.Peer.On("IsRunning").Once().Return(true)
   212  	p.Peer.On("IsRunning").Return(false)
   213  	// check that we are not leaking any go-routines
   214  	// i.e. broadcastEvidenceRoutine finishes when peer is stopped
   215  	defer leaktest.CheckTimeout(t, 10*time.Second)()
   216  
   217  	p.EnvelopeSender.On("SendEnvelope", mock.MatchedBy(func(i interface{}) bool {
   218  		e, ok := i.(p2p.Envelope)
   219  		return ok && e.ChannelID == evidence.EvidenceChannel
   220  	})).Return(false)
   221  	quitChan := make(<-chan struct{})
   222  	p.Peer.On("Quit").Return(quitChan)
   223  	ps := peerState{2}
   224  	p.Peer.On("Get", types.PeerStateKey).Return(ps)
   225  	p.Peer.On("ID").Return("ABC")
   226  	p.Peer.On("String").Return("mock")
   227  
   228  	r := evidence.NewReactor(pool, config.P2P.RecvAsync, config.P2P.EvidenceRecvBufSize)
   229  	r.SetLogger(log.TestingLogger())
   230  	r.AddPeer(p)
   231  
   232  	_ = sendEvidence(t, pool, val, 2)
   233  }
   234  
   235  // evidenceLogger is a TestingLogger which uses a different
   236  // color for each validator ("validator" key must exist).
   237  func evidenceLogger() log.Logger {
   238  	return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
   239  		for i := 0; i < len(keyvals)-1; i += 2 {
   240  			if keyvals[i] == "validator" {
   241  				return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
   242  			}
   243  		}
   244  		return term.FgBgColor{}
   245  	})
   246  }
   247  
   248  // connect N evidence reactors through N switches
   249  func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) ([]*evidence.Reactor,
   250  	[]*evidence.Pool) {
   251  	N := len(stateStores)
   252  
   253  	reactors := make([]*evidence.Reactor, N)
   254  	pools := make([]*evidence.Pool, N)
   255  	logger := evidenceLogger()
   256  	evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
   257  
   258  	for i := 0; i < N; i++ {
   259  		evidenceDB := dbm.NewMemDB()
   260  		blockStore := &mocks.BlockStore{}
   261  		blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
   262  			&types.BlockMeta{Header: types.Header{Time: evidenceTime}},
   263  		)
   264  		pool, err := evidence.NewPool(evidenceDB, stateStores[i], blockStore)
   265  		if err != nil {
   266  			panic(err)
   267  		}
   268  		pools[i] = pool
   269  		reactors[i] = evidence.NewReactor(pool, config.P2P.RecvAsync, config.P2P.EvidenceRecvBufSize)
   270  		reactors[i].SetLogger(logger.With("validator", i))
   271  	}
   272  
   273  	p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch, config *cfg.P2PConfig) *p2p.Switch {
   274  		s.AddReactor("EVIDENCE", reactors[i])
   275  		return s
   276  
   277  	}, p2p.Connect2Switches)
   278  
   279  	return reactors, pools
   280  }
   281  
   282  // wait for all evidence on all reactors
   283  func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Pool) {
   284  	// wait for the evidence in all evpools
   285  	wg := new(sync.WaitGroup)
   286  	for i := 0; i < len(pools); i++ {
   287  		wg.Add(1)
   288  		go _waitForEvidence(t, wg, evs, i, pools)
   289  	}
   290  
   291  	done := make(chan struct{})
   292  	go func() {
   293  		wg.Wait()
   294  		close(done)
   295  	}()
   296  
   297  	timer := time.After(timeout)
   298  	select {
   299  	case <-timer:
   300  		t.Fatal("Timed out waiting for evidence")
   301  	case <-done:
   302  	}
   303  }
   304  
   305  // wait for all evidence on a single evpool
   306  func _waitForEvidence(
   307  	t *testing.T,
   308  	wg *sync.WaitGroup,
   309  	evs types.EvidenceList,
   310  	poolIdx int,
   311  	pools []*evidence.Pool,
   312  ) {
   313  	evpool := pools[poolIdx]
   314  	var evList []types.Evidence
   315  	currentPoolSize := 0
   316  	for currentPoolSize != len(evs) {
   317  		evList, _ = evpool.PendingEvidence(int64(len(evs) * 500)) // each evidence should not be more than 500 bytes
   318  		currentPoolSize = len(evList)
   319  		time.Sleep(time.Millisecond * 100)
   320  	}
   321  
   322  	// put the reaped evidence in a map so we can quickly check we got everything
   323  	evMap := make(map[string]types.Evidence)
   324  	for _, e := range evList {
   325  		evMap[string(e.Hash())] = e
   326  	}
   327  	for i, expectedEv := range evs {
   328  		gotEv := evMap[string(expectedEv.Hash())]
   329  		assert.Equal(t, expectedEv, gotEv,
   330  			fmt.Sprintf("evidence at index %d on pool %d don't match: %v vs %v",
   331  				i, poolIdx, expectedEv, gotEv))
   332  	}
   333  
   334  	wg.Done()
   335  }
   336  
   337  func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, n int) types.EvidenceList {
   338  	evList := make([]types.Evidence, n)
   339  	for i := 0; i < n; i++ {
   340  		ev := types.NewMockDuplicateVoteEvidenceWithValidator(int64(i+1),
   341  			time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, evidenceChainID)
   342  		err := evpool.AddEvidence(ev)
   343  		require.NoError(t, err)
   344  		evList[i] = ev
   345  	}
   346  	return evList
   347  }
   348  
   349  type peerState struct {
   350  	height int64
   351  }
   352  
   353  func (ps peerState) GetHeight() int64 {
   354  	return ps.height
   355  }
   356  
   357  func exampleVote(t byte) *types.Vote {
   358  	var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z")
   359  	if err != nil {
   360  		panic(err)
   361  	}
   362  
   363  	return &types.Vote{
   364  		Type:      tmproto.SignedMsgType(t),
   365  		Height:    3,
   366  		Round:     2,
   367  		Timestamp: stamp,
   368  		BlockID: types.BlockID{
   369  			Hash: tmhash.Sum([]byte("blockID_hash")),
   370  			PartSetHeader: types.PartSetHeader{
   371  				Total: 1000000,
   372  				Hash:  tmhash.Sum([]byte("blockID_part_set_header_hash")),
   373  			},
   374  		},
   375  		ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
   376  		ValidatorIndex:   56789,
   377  	}
   378  }
   379  func TestLegacyReactorReceiveBasic(t *testing.T) {
   380  	config := cfg.TestConfig()
   381  	N := 1
   382  
   383  	stateDBs := make([]sm.Store, N)
   384  	val := types.NewMockPV()
   385  	stateDBs[0] = initializeValidatorState(val, 1)
   386  
   387  	reactors, _ := makeAndConnectReactorsAndPools(config, stateDBs)
   388  
   389  	var (
   390  		reactor = reactors[0]
   391  		peer    = &p2pmocks.Peer{}
   392  	)
   393  	quitChan := make(<-chan struct{})
   394  	peer.On("Quit").Return(quitChan)
   395  
   396  	reactor.InitPeer(peer)
   397  	reactor.AddPeer(peer)
   398  	e := &tmproto.EvidenceList{}
   399  	msg, err := proto.Marshal(e)
   400  	assert.NoError(t, err)
   401  
   402  	assert.NotPanics(t, func() {
   403  		reactor.Receive(evidence.EvidenceChannel, peer, msg)
   404  	})
   405  }
   406  
   407  // nolint:lll //ignore line length for tests
   408  func TestEvidenceVectors(t *testing.T) {
   409  
   410  	val := &types.Validator{
   411  		Address:     crypto.AddressHash([]byte("validator_address")),
   412  		VotingPower: 10,
   413  	}
   414  
   415  	valSet := types.NewValidatorSet([]*types.Validator{val})
   416  
   417  	dupl := types.NewDuplicateVoteEvidence(
   418  		exampleVote(1),
   419  		exampleVote(2),
   420  		defaultEvidenceTime,
   421  		valSet,
   422  	)
   423  
   424  	testCases := []struct {
   425  		testName     string
   426  		evidenceList []types.Evidence
   427  		expBytes     string
   428  	}{
   429  		{"DuplicateVoteEvidence", []types.Evidence{dupl}, "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105"},
   430  	}
   431  
   432  	for _, tc := range testCases {
   433  		tc := tc
   434  
   435  		evi := make([]tmproto.Evidence, len(tc.evidenceList))
   436  		for i := 0; i < len(tc.evidenceList); i++ {
   437  			ev, err := types.EvidenceToProto(tc.evidenceList[i])
   438  			require.NoError(t, err, tc.testName)
   439  			evi[i] = *ev
   440  		}
   441  
   442  		epl := tmproto.EvidenceList{
   443  			Evidence: evi,
   444  		}
   445  
   446  		bz, err := epl.Marshal()
   447  		require.NoError(t, err, tc.testName)
   448  
   449  		require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
   450  
   451  	}
   452  
   453  }