github.com/status-im/status-go@v1.1.0/peers/peerpool_test.go (about)

     1  package peers
     2  
     3  import (
     4  	"encoding/json"
     5  	"net"
     6  	"strconv"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/stretchr/testify/assert"
    11  	"github.com/stretchr/testify/require"
    12  	"github.com/stretchr/testify/suite"
    13  
    14  	"github.com/ethereum/go-ethereum/common"
    15  	"github.com/ethereum/go-ethereum/crypto"
    16  	"github.com/ethereum/go-ethereum/p2p"
    17  	"github.com/ethereum/go-ethereum/p2p/discv5"
    18  	"github.com/ethereum/go-ethereum/p2p/enode"
    19  
    20  	"github.com/status-im/status-go/discovery"
    21  	"github.com/status-im/status-go/params"
    22  	"github.com/status-im/status-go/signal"
    23  )
    24  
    25  type PeerPoolSimulationSuite struct {
    26  	suite.Suite
    27  
    28  	bootnode  *p2p.Server
    29  	peers     []*p2p.Server
    30  	discovery []discovery.Discovery
    31  }
    32  
    33  func TestPeerPoolSimulationSuite(t *testing.T) {
    34  	s := &PeerPoolSimulationSuite{}
    35  	suite.Run(t, s)
    36  }
    37  
    38  func (s *PeerPoolSimulationSuite) SetupTest() {
    39  	key, _ := crypto.GenerateKey()
    40  	name := common.MakeName("bootnode", "1.0")
    41  	// 127.0.0.1 is invalidated by discovery v5
    42  	s.bootnode = &p2p.Server{
    43  		Config: p2p.Config{
    44  			MaxPeers:    10,
    45  			Name:        name,
    46  			ListenAddr:  ":0",
    47  			PrivateKey:  key,
    48  			DiscoveryV5: true,
    49  			NoDiscovery: true,
    50  		},
    51  	}
    52  	s.Require().NoError(s.bootnode.Start())
    53  	bootnodePort := uint16(s.bootnode.NodeInfo().Ports.Listener)
    54  	bootnodeV5 := discv5.NewNode(s.bootnode.DiscV5.Self().ID, net.ParseIP("127.0.0.1"), bootnodePort, bootnodePort)
    55  
    56  	// 1 peer to initiate connection, 1 peer as a first candidate, 1 peer - for failover
    57  	s.peers = make([]*p2p.Server, 3)
    58  	s.discovery = make([]discovery.Discovery, 3)
    59  	for i := range s.peers {
    60  		key, _ := crypto.GenerateKey()
    61  		peer := &p2p.Server{
    62  			Config: p2p.Config{
    63  				MaxPeers:         10,
    64  				Name:             common.MakeName("peer-"+strconv.Itoa(i), "1.0"),
    65  				ListenAddr:       ":0",
    66  				PrivateKey:       key,
    67  				NoDiscovery:      true,
    68  				BootstrapNodesV5: []*discv5.Node{bootnodeV5},
    69  			},
    70  		}
    71  		s.NoError(peer.Start())
    72  		s.peers[i] = peer
    73  	}
    74  }
    75  
    76  func (s *PeerPoolSimulationSuite) setupEthV5() {
    77  	for i := range s.peers {
    78  		peer := s.peers[i]
    79  		d := discovery.NewDiscV5(peer.PrivateKey, peer.ListenAddr, peer.BootstrapNodesV5)
    80  		s.NoError(d.Start())
    81  		s.discovery[i] = d
    82  	}
    83  }
    84  
    85  func (s *PeerPoolSimulationSuite) TearDown() {
    86  	s.bootnode.Stop()
    87  	for i := range s.peers {
    88  		s.peers[i].Stop()
    89  		s.NoError(s.discovery[i].Stop())
    90  	}
    91  }
    92  
    93  func (s *PeerPoolSimulationSuite) getPeerFromEvent(events <-chan *p2p.PeerEvent, etype p2p.PeerEventType) (nodeID enode.ID) {
    94  	select {
    95  	case ev := <-events:
    96  		if ev.Type == etype {
    97  			return ev.Peer
    98  		}
    99  		s.Failf("invalid event", "expected %s but got %s for peer %s", etype, ev.Type, ev.Peer)
   100  	case <-time.After(10 * time.Second):
   101  		s.Fail("timed out waiting for a peer")
   102  		return
   103  	}
   104  	return
   105  }
   106  
   107  func (s *PeerPoolSimulationSuite) getPoolEvent(events <-chan string) string {
   108  	select {
   109  	case ev := <-events:
   110  		return ev
   111  	case <-time.After(10 * time.Second):
   112  		s.FailNow("timed out waiting a pool event")
   113  		return ""
   114  	}
   115  }
   116  
   117  func (s *PeerPoolSimulationSuite) TestPeerPoolCacheEthV5() {
   118  	s.setupEthV5()
   119  	var err error
   120  
   121  	topic := discv5.Topic("cap=test")
   122  	config := map[discv5.Topic]params.Limits{
   123  		topic: params.NewLimits(1, 1),
   124  	}
   125  	peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond, nil}
   126  	cache, err := newInMemoryCache()
   127  	s.Require().NoError(err)
   128  	peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
   129  
   130  	// start peer pool
   131  	s.Require().NoError(peerPool.Start(s.peers[1]))
   132  	defer peerPool.Stop()
   133  
   134  	// check if cache is passed to topic pools
   135  	for _, topicPool := range peerPool.topics {
   136  		tp := topicPool.(*TopicPool)
   137  		s.Equal(cache, tp.cache)
   138  	}
   139  }
   140  
   141  // TestPeerPoolMaxPeersOverflow verifies that following scenario will not occur:
   142  // - found peer A and B in the same kademlia cycle
   143  // - process peer A
   144  // - max limit is reached -> closed discv5 and set it to nil
   145  // - process peer B
   146  // - panic because discv5 is nil!!!
   147  func TestPeerPoolMaxPeersOverflow(t *testing.T) {
   148  	maxCachedPeersMultiplier = 0
   149  	signals := make(chan string, 10)
   150  	signal.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
   151  		var envelope struct {
   152  			Type string
   153  		}
   154  		require.NoError(t, json.Unmarshal([]byte(jsonEvent), &envelope))
   155  		signals <- envelope.Type
   156  	})
   157  	defer signal.ResetDefaultNodeNotificationHandler()
   158  
   159  	key, err := crypto.GenerateKey()
   160  	require.NoError(t, err)
   161  	peer := &p2p.Server{
   162  		Config: p2p.Config{
   163  			PrivateKey:  key,
   164  			NoDiscovery: true,
   165  		},
   166  	}
   167  	require.NoError(t, peer.Start())
   168  	defer peer.Stop()
   169  	discovery := discovery.NewDiscV5(key, peer.ListenAddr, nil)
   170  	require.NoError(t, discovery.Start())
   171  	defer func() { assert.NoError(t, discovery.Stop()) }()
   172  	require.True(t, discovery.Running())
   173  
   174  	poolOpts := &Options{DefaultFastSync, DefaultSlowSync, 0, true, 100 * time.Millisecond, nil}
   175  	pool := NewPeerPool(discovery, nil, nil, poolOpts)
   176  	require.NoError(t, pool.Start(peer))
   177  	require.Equal(t, signal.EventDiscoveryStarted, <-signals)
   178  	// without config, it will stop the discovery because all topic pools are satisfied
   179  	pool.events <- &p2p.PeerEvent{Type: p2p.PeerEventTypeAdd}
   180  	require.Equal(t, signal.EventDiscoverySummary, <-signals)
   181  	require.Equal(t, signal.EventDiscoveryStopped, <-signals)
   182  	require.False(t, discovery.Running())
   183  	// another peer added after discovery is stopped should not panic
   184  	pool.events <- &p2p.PeerEvent{Type: p2p.PeerEventTypeAdd}
   185  }
   186  
   187  func TestPeerPoolDiscV5Timeout(t *testing.T) {
   188  	signals := make(chan string)
   189  	signal.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
   190  		var envelope struct {
   191  			Type  string
   192  			Event json.RawMessage
   193  		}
   194  		require.NoError(t, json.Unmarshal([]byte(jsonEvent), &envelope))
   195  		// Send signal asynchronously to avoid blocking.
   196  		// It's better than sending to a buffered channel because
   197  		// it won't ever block, for example, if two events were expected
   198  		// but received more.
   199  		// In this case, a strange PeerEventTypeDrop event was emitted.
   200  		go func() {
   201  			switch typ := envelope.Type; typ {
   202  			case signal.EventDiscoveryStarted, signal.EventDiscoveryStopped:
   203  				signals <- envelope.Type
   204  			}
   205  		}()
   206  	})
   207  	defer signal.ResetDefaultNodeNotificationHandler()
   208  
   209  	// start server
   210  	key, err := crypto.GenerateKey()
   211  	require.NoError(t, err)
   212  	server := &p2p.Server{
   213  		Config: p2p.Config{
   214  			PrivateKey:  key,
   215  			NoDiscovery: true,
   216  		},
   217  	}
   218  	require.NoError(t, server.Start())
   219  	defer server.Stop()
   220  
   221  	discovery := discovery.NewDiscV5(key, server.ListenAddr, nil)
   222  	require.NoError(t, discovery.Start())
   223  	defer func() { assert.NoError(t, discovery.Stop()) }()
   224  	require.True(t, discovery.Running())
   225  
   226  	// start PeerPool
   227  	poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, true, 100 * time.Millisecond, nil}
   228  	pool := NewPeerPool(discovery, nil, nil, poolOpts)
   229  	require.NoError(t, pool.Start(server))
   230  	require.Equal(t, signal.EventDiscoveryStarted, <-signals)
   231  
   232  	// timeout after finding no peers
   233  	select {
   234  	case sig := <-signals:
   235  		require.Equal(t, signal.EventDiscoveryStopped, sig)
   236  	case <-time.After(pool.opts.DiscServerTimeout * 2):
   237  		t.Fatal("timed out")
   238  	}
   239  	require.False(t, discovery.Running())
   240  
   241  	// timeout after discovery restart
   242  	require.NoError(t, pool.restartDiscovery(server))
   243  	require.Equal(t, signal.EventDiscoveryStarted, <-signals)
   244  	require.True(t, discovery.Running())
   245  	pool.events <- &p2p.PeerEvent{Type: p2p.PeerEventTypeDrop} // required to turn the loop and pick up new timeout
   246  	select {
   247  	case sig := <-signals:
   248  		require.Equal(t, signal.EventDiscoveryStopped, sig)
   249  	case <-time.After(pool.opts.DiscServerTimeout * 2):
   250  		t.Fatal("timed out")
   251  	}
   252  	require.False(t, discovery.Running())
   253  }
   254  
   255  func TestPeerPoolNotAllowedStopping(t *testing.T) {
   256  	// create and start server
   257  	key, err := crypto.GenerateKey()
   258  	require.NoError(t, err)
   259  	server := &p2p.Server{
   260  		Config: p2p.Config{
   261  			PrivateKey:  key,
   262  			NoDiscovery: true,
   263  		},
   264  	}
   265  	require.NoError(t, server.Start())
   266  	defer server.Stop()
   267  
   268  	discovery := discovery.NewDiscV5(key, server.ListenAddr, nil)
   269  	require.NoError(t, discovery.Start())
   270  	defer func() { assert.NoError(t, discovery.Stop()) }()
   271  	require.True(t, discovery.Running())
   272  
   273  	// start PeerPool
   274  	poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, false, 100 * time.Millisecond, nil}
   275  	pool := NewPeerPool(discovery, nil, nil, poolOpts)
   276  	require.NoError(t, pool.Start(server))
   277  
   278  	// wait 2x timeout duration
   279  	<-time.After(pool.opts.DiscServerTimeout * 2)
   280  	require.True(t, discovery.Running())
   281  }
   282  
   283  func (s *PeerPoolSimulationSuite) TestUpdateTopicLimits() {
   284  	s.setupEthV5()
   285  	var err error
   286  
   287  	topic := discv5.Topic("cap=test")
   288  	config := map[discv5.Topic]params.Limits{
   289  		topic: params.NewLimits(1, 1),
   290  	}
   291  	peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond, nil}
   292  	cache, err := newInMemoryCache()
   293  	s.Require().NoError(err)
   294  	peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
   295  
   296  	// start peer pool
   297  	s.Require().NoError(peerPool.Start(s.peers[1]))
   298  	defer peerPool.Stop()
   299  
   300  	for _, topicPool := range peerPool.topics {
   301  		tp := topicPool.(*TopicPool)
   302  		s.Equal(1, tp.limits.Max)
   303  		s.Equal(1, tp.limits.Min)
   304  	}
   305  
   306  	// Updating TopicPool's limits
   307  	err = peerPool.UpdateTopic("cap=test", params.NewLimits(5, 10))
   308  	s.Require().NoError(err)
   309  	time.Sleep(1 * time.Millisecond)
   310  	for _, topicPool := range peerPool.topics {
   311  		tp := topicPool.(*TopicPool)
   312  		tp.mu.RLock()
   313  		defer tp.mu.RUnlock()
   314  
   315  		s.Equal(10, tp.limits.Max)
   316  		s.Equal(5, tp.limits.Min)
   317  	}
   318  }
   319  
   320  func (s *PeerPoolSimulationSuite) TestMailServerPeersDiscovery() {
   321  	s.setupEthV5()
   322  
   323  	// eliminate peer we won't use
   324  	s.peers[2].Stop()
   325  
   326  	// Buffered channels must be used because we expect the events
   327  	// to be in the same order. Use a buffer length greater than
   328  	// the expected number of events to avoid deadlock.
   329  	poolEvents := make(chan string, 10)
   330  	summaries := make(chan []*p2p.PeerInfo, 10)
   331  	signal.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
   332  		var envelope struct {
   333  			Type  string
   334  			Event json.RawMessage
   335  		}
   336  		s.NoError(json.Unmarshal([]byte(jsonEvent), &envelope))
   337  
   338  		switch typ := envelope.Type; typ {
   339  		case signal.EventDiscoverySummary:
   340  			poolEvents <- envelope.Type
   341  			var summary []*p2p.PeerInfo
   342  			s.NoError(json.Unmarshal(envelope.Event, &summary))
   343  			if len(summary) != 0 {
   344  				summaries <- summary
   345  			}
   346  		}
   347  	})
   348  	defer signal.ResetDefaultNodeNotificationHandler()
   349  
   350  	// subscribe for peer events before starting the peer pool
   351  	events := make(chan *p2p.PeerEvent, 20)
   352  	subscription := s.peers[1].SubscribeEvents(events)
   353  	defer subscription.Unsubscribe()
   354  
   355  	// create and start topic registry
   356  	register := NewRegister(s.discovery[0], MailServerDiscoveryTopic)
   357  	s.Require().NoError(register.Start())
   358  
   359  	// create and start peer pool
   360  	config := map[discv5.Topic]params.Limits{
   361  		MailServerDiscoveryTopic: params.NewLimits(1, 1),
   362  	}
   363  	cache, err := newInMemoryCache()
   364  	s.Require().NoError(err)
   365  	peerPoolOpts := &Options{
   366  		100 * time.Millisecond,
   367  		100 * time.Millisecond,
   368  		0,
   369  		true,
   370  		100 * time.Millisecond,
   371  		[]enode.ID{s.peers[0].Self().ID()},
   372  	}
   373  	peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
   374  	s.Require().NoError(peerPool.Start(s.peers[1]))
   375  	defer peerPool.Stop()
   376  
   377  	// wait for and verify the mail server peer
   378  	connectedPeer := s.getPeerFromEvent(events, p2p.PeerEventTypeAdd)
   379  	s.Equal(s.peers[0].Self().ID().String(), connectedPeer.String())
   380  
   381  	// wait for a summary event to be sure that ConfirmAdded() was called
   382  	s.Equal(signal.EventDiscoverySummary, s.getPoolEvent(poolEvents))
   383  	summary := (<-summaries)
   384  	s.Require().Len(summary, 1)
   385  	s.Equal(s.peers[0].Self().ID().String(), summary[0].ID)
   386  
   387  	// check cache
   388  	cachedPeers := peerPool.cache.GetPeersRange(MailServerDiscoveryTopic, 5)
   389  	s.Require().Len(cachedPeers, 1)
   390  	s.Equal(discv5.PubkeyID(s.peers[0].Self().Pubkey()), cachedPeers[0].ID)
   391  
   392  	// wait for another event as the peer should be removed
   393  	disconnectedPeer := s.getPeerFromEvent(events, p2p.PeerEventTypeDrop)
   394  	s.Equal(s.peers[0].Self().ID().String(), disconnectedPeer.String())
   395  	s.Equal(signal.EventDiscoverySummary, s.getPoolEvent(poolEvents))
   396  }