github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/p2p/service_test.go (about)

     1  package p2p
     2  
     3  import (
     4  	"context"
     5  	"crypto/ecdsa"
     6  	"fmt"
     7  	"net"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/ethereum/go-ethereum/p2p/discover"
    12  	"github.com/ethereum/go-ethereum/p2p/enode"
    13  	"github.com/libp2p/go-libp2p"
    14  	"github.com/libp2p/go-libp2p-core/host"
    15  	"github.com/libp2p/go-libp2p-core/peer"
    16  	noise "github.com/libp2p/go-libp2p-noise"
    17  	"github.com/multiformats/go-multiaddr"
    18  	mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
    19  	"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
    20  	statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
    21  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
    22  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
    23  	"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
    24  	"github.com/prysmaticlabs/prysm/shared/bytesutil"
    25  	"github.com/prysmaticlabs/prysm/shared/event"
    26  	"github.com/prysmaticlabs/prysm/shared/p2putils"
    27  	"github.com/prysmaticlabs/prysm/shared/testutil/assert"
    28  	"github.com/prysmaticlabs/prysm/shared/testutil/require"
    29  	"github.com/prysmaticlabs/prysm/shared/timeutils"
    30  	logTest "github.com/sirupsen/logrus/hooks/test"
    31  )
    32  
    33  type mockListener struct {
    34  	localNode *enode.LocalNode
    35  }
    36  
    37  func (m mockListener) Self() *enode.Node {
    38  	return m.localNode.Node()
    39  }
    40  
    41  func (mockListener) Close() {
    42  	// no-op
    43  }
    44  
    45  func (mockListener) Lookup(enode.ID) []*enode.Node {
    46  	panic("implement me")
    47  }
    48  
    49  func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
    50  	panic("implement me")
    51  }
    52  
    53  func (mockListener) Resolve(*enode.Node) *enode.Node {
    54  	panic("implement me")
    55  }
    56  
    57  func (mockListener) Ping(*enode.Node) error {
    58  	panic("implement me")
    59  }
    60  
    61  func (mockListener) RequestENR(*enode.Node) (*enode.Node, error) {
    62  	panic("implement me")
    63  }
    64  
    65  func (mockListener) LocalNode() *enode.LocalNode {
    66  	panic("implement me")
    67  }
    68  
    69  func (mockListener) RandomNodes() enode.Iterator {
    70  	panic("implement me")
    71  }
    72  
    73  func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
    74  	_, pkey := createAddrAndPrivKey(t)
    75  	ipAddr := net.ParseIP("127.0.0.1")
    76  	listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
    77  	require.NoError(t, err, "Failed to p2p listen")
    78  	h, err := libp2p.New(context.Background(), []libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
    79  	require.NoError(t, err)
    80  	return h, pkey, ipAddr
    81  }
    82  
    83  func TestService_Stop_SetsStartedToFalse(t *testing.T) {
    84  	s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
    85  	require.NoError(t, err)
    86  	s.started = true
    87  	s.dv5Listener = &mockListener{}
    88  	assert.NoError(t, s.Stop())
    89  	assert.Equal(t, false, s.started)
    90  }
    91  
    92  func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
    93  	s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
    94  	require.NoError(t, err)
    95  	assert.NoError(t, s.Stop())
    96  }
    97  
    98  func TestService_Start_OnlyStartsOnce(t *testing.T) {
    99  	hook := logTest.NewGlobal()
   100  
   101  	cfg := &Config{
   102  		TCPPort:       2000,
   103  		UDPPort:       2000,
   104  		StateNotifier: &mock.MockStateNotifier{},
   105  	}
   106  	s, err := NewService(context.Background(), cfg)
   107  	require.NoError(t, err)
   108  	s.stateNotifier = &mock.MockStateNotifier{}
   109  	s.dv5Listener = &mockListener{}
   110  	exitRoutine := make(chan bool)
   111  	go func() {
   112  		s.Start()
   113  		<-exitRoutine
   114  	}()
   115  	// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
   116  	for sent := 0; sent == 0; {
   117  		sent = s.stateNotifier.StateFeed().Send(&feed.Event{
   118  			Type: statefeed.Initialized,
   119  			Data: &statefeed.InitializedData{
   120  				StartTime:             time.Now(),
   121  				GenesisValidatorsRoot: make([]byte, 32),
   122  			},
   123  		})
   124  	}
   125  	time.Sleep(time.Second * 2)
   126  	assert.Equal(t, true, s.started, "Expected service to be started")
   127  	s.Start()
   128  	require.LogsContain(t, hook, "Attempted to start p2p service when it was already started")
   129  	require.NoError(t, s.Stop())
   130  	exitRoutine <- true
   131  }
   132  
   133  func TestService_Status_NotRunning(t *testing.T) {
   134  	s := &Service{started: false}
   135  	s.dv5Listener = &mockListener{}
   136  	assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
   137  }
   138  
   139  func TestListenForNewNodes(t *testing.T) {
   140  	// Setup bootnode.
   141  	notifier := &mock.MockStateNotifier{}
   142  	cfg := &Config{StateNotifier: notifier}
   143  	port := 2000
   144  	cfg.UDPPort = uint(port)
   145  	_, pkey := createAddrAndPrivKey(t)
   146  	ipAddr := net.ParseIP("127.0.0.1")
   147  	genesisTime := timeutils.Now()
   148  	genesisValidatorsRoot := make([]byte, 32)
   149  	s := &Service{
   150  		cfg:                   cfg,
   151  		genesisTime:           genesisTime,
   152  		genesisValidatorsRoot: genesisValidatorsRoot,
   153  	}
   154  	bootListener, err := s.createListener(ipAddr, pkey)
   155  	require.NoError(t, err)
   156  	defer bootListener.Close()
   157  
   158  	// Use shorter period for testing.
   159  	currentPeriod := pollingPeriod
   160  	pollingPeriod = 1 * time.Second
   161  	defer func() {
   162  		pollingPeriod = currentPeriod
   163  	}()
   164  
   165  	bootNode := bootListener.Self()
   166  
   167  	var listeners []*discover.UDPv5
   168  	var hosts []host.Host
   169  	// setup other nodes.
   170  	cfg = &Config{
   171  		BootstrapNodeAddr:   []string{bootNode.String()},
   172  		Discv5BootStrapAddr: []string{bootNode.String()},
   173  		MaxPeers:            30,
   174  		StateNotifier:       notifier,
   175  	}
   176  	for i := 1; i <= 5; i++ {
   177  		h, pkey, ipAddr := createHost(t, port+i)
   178  		cfg.UDPPort = uint(port + i)
   179  		cfg.TCPPort = uint(port + i)
   180  		s := &Service{
   181  			cfg:                   cfg,
   182  			genesisTime:           genesisTime,
   183  			genesisValidatorsRoot: genesisValidatorsRoot,
   184  		}
   185  		listener, err := s.startDiscoveryV5(ipAddr, pkey)
   186  		assert.NoError(t, err, "Could not start discovery for node")
   187  		listeners = append(listeners, listener)
   188  		hosts = append(hosts, h)
   189  	}
   190  	defer func() {
   191  		// Close down all peers.
   192  		for _, listener := range listeners {
   193  			listener.Close()
   194  		}
   195  	}()
   196  
   197  	// close peers upon exit of test
   198  	defer func() {
   199  		for _, h := range hosts {
   200  			if err := h.Close(); err != nil {
   201  				t.Log(err)
   202  			}
   203  		}
   204  	}()
   205  
   206  	cfg.UDPPort = 14000
   207  	cfg.TCPPort = 14001
   208  
   209  	s, err = NewService(context.Background(), cfg)
   210  	require.NoError(t, err)
   211  	exitRoutine := make(chan bool)
   212  	go func() {
   213  		s.Start()
   214  		<-exitRoutine
   215  	}()
   216  	time.Sleep(1 * time.Second)
   217  	// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
   218  	for sent := 0; sent == 0; {
   219  		sent = s.stateNotifier.StateFeed().Send(&feed.Event{
   220  			Type: statefeed.Initialized,
   221  			Data: &statefeed.InitializedData{
   222  				StartTime:             genesisTime,
   223  				GenesisValidatorsRoot: genesisValidatorsRoot,
   224  			},
   225  		})
   226  	}
   227  	time.Sleep(4 * time.Second)
   228  	assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore")
   229  	require.NoError(t, s.Stop())
   230  	exitRoutine <- true
   231  }
   232  
   233  func TestPeer_Disconnect(t *testing.T) {
   234  	h1, _, _ := createHost(t, 5000)
   235  	defer func() {
   236  		if err := h1.Close(); err != nil {
   237  			t.Log(err)
   238  		}
   239  	}()
   240  
   241  	s := &Service{
   242  		host: h1,
   243  	}
   244  
   245  	h2, _, ipaddr := createHost(t, 5001)
   246  	defer func() {
   247  		if err := h2.Close(); err != nil {
   248  			t.Log(err)
   249  		}
   250  	}()
   251  
   252  	h2Addr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", ipaddr, 5001, h2.ID()))
   253  	require.NoError(t, err)
   254  	addrInfo, err := peer.AddrInfoFromP2pAddr(h2Addr)
   255  	require.NoError(t, err)
   256  	require.NoError(t, s.host.Connect(context.Background(), *addrInfo))
   257  	assert.Equal(t, 1, len(s.host.Network().Peers()), "Invalid number of peers")
   258  	assert.Equal(t, 1, len(s.host.Network().Conns()), "Invalid number of connections")
   259  	require.NoError(t, s.Disconnect(h2.ID()))
   260  	assert.Equal(t, 0, len(s.host.Network().Conns()), "Invalid number of connections")
   261  }
   262  
   263  func TestService_JoinLeaveTopic(t *testing.T) {
   264  	ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
   265  	defer cancel()
   266  	s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
   267  	require.NoError(t, err)
   268  
   269  	go s.awaitStateInitialized()
   270  	fd := initializeStateWithForkDigest(ctx, t, s.stateNotifier.StateFeed())
   271  
   272  	assert.Equal(t, 0, len(s.joinedTopics))
   273  
   274  	topic := fmt.Sprintf(AttestationSubnetTopicFormat, fd, 42) + "/" + encoder.ProtocolSuffixSSZSnappy
   275  	topicHandle, err := s.JoinTopic(topic)
   276  	assert.NoError(t, err)
   277  	assert.Equal(t, 1, len(s.joinedTopics))
   278  
   279  	if topicHandle == nil {
   280  		t.Fatal("topic is nil")
   281  	}
   282  
   283  	sub, err := topicHandle.Subscribe()
   284  	assert.NoError(t, err)
   285  
   286  	// Try leaving topic that has subscriptions.
   287  	want := "cannot close topic: outstanding event handlers or subscriptions"
   288  	assert.ErrorContains(t, want, s.LeaveTopic(topic))
   289  
   290  	// After subscription is cancelled, leaving topic should not result in error.
   291  	sub.Cancel()
   292  	assert.NoError(t, s.LeaveTopic(topic))
   293  }
   294  
   295  // initializeStateWithForkDigest sets up the state feed initialized event and returns the fork
   296  // digest associated with that genesis event.
   297  func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte {
   298  	gt := timeutils.Now()
   299  	gvr := bytesutil.PadTo([]byte("genesis validator root"), 32)
   300  	for n := 0; n == 0; {
   301  		if ctx.Err() != nil {
   302  			t.Fatal(ctx.Err())
   303  		}
   304  		n = ef.Send(&feed.Event{
   305  			Type: statefeed.Initialized,
   306  			Data: &statefeed.InitializedData{
   307  				StartTime:             gt,
   308  				GenesisValidatorsRoot: gvr,
   309  			},
   310  		})
   311  	}
   312  
   313  	fd, err := p2putils.CreateForkDigest(gt, gvr)
   314  	require.NoError(t, err)
   315  
   316  	time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
   317  
   318  	return fd
   319  }
   320  
   321  func TestService_connectWithPeer(t *testing.T) {
   322  	tests := []struct {
   323  		name    string
   324  		peers   *peers.Status
   325  		info    peer.AddrInfo
   326  		wantErr string
   327  	}{
   328  		{
   329  			name: "bad peer",
   330  			peers: func() *peers.Status {
   331  				ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
   332  					ScorerParams: &scorers.Config{},
   333  				})
   334  				for i := 0; i < 10; i++ {
   335  					ps.Scorers().BadResponsesScorer().Increment("bad")
   336  				}
   337  				return ps
   338  			}(),
   339  			info:    peer.AddrInfo{ID: "bad"},
   340  			wantErr: "refused to connect to bad peer",
   341  		},
   342  	}
   343  	for _, tt := range tests {
   344  		t.Run(tt.name, func(t *testing.T) {
   345  			h, _, _ := createHost(t, 34567)
   346  			defer func() {
   347  				if err := h.Close(); err != nil {
   348  					t.Fatal(err)
   349  				}
   350  			}()
   351  			ctx := context.Background()
   352  			s := &Service{
   353  				host:  h,
   354  				peers: tt.peers,
   355  			}
   356  			err := s.connectWithPeer(ctx, tt.info)
   357  			if len(tt.wantErr) > 0 {
   358  				require.ErrorContains(t, tt.wantErr, err)
   359  			} else {
   360  				require.NoError(t, err)
   361  			}
   362  		})
   363  	}
   364  }