github.com/ethereum-optimism/optimism@v1.7.2/op-node/p2p/host_test.go (about)

     1  package p2p
     2  
     3  import (
     4  	"context"
     5  	"crypto/rand"
     6  	"math/big"
     7  	"net"
     8  	"testing"
     9  	"time"
    10  
    11  	ds "github.com/ipfs/go-datastore"
    12  	"github.com/ipfs/go-datastore/sync"
    13  	"github.com/libp2p/go-libp2p"
    14  	"github.com/libp2p/go-libp2p/core/crypto"
    15  	"github.com/libp2p/go-libp2p/core/network"
    16  	"github.com/libp2p/go-libp2p/core/peer"
    17  	mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
    18  	ma "github.com/multiformats/go-multiaddr"
    19  	"github.com/stretchr/testify/require"
    20  	"golang.org/x/exp/slices"
    21  
    22  	"github.com/ethereum/go-ethereum/common"
    23  	"github.com/ethereum/go-ethereum/log"
    24  	"github.com/ethereum/go-ethereum/p2p/enode"
    25  	"github.com/ethereum/go-ethereum/rpc"
    26  
    27  	"github.com/ethereum-optimism/optimism/op-node/metrics"
    28  	"github.com/ethereum-optimism/optimism/op-node/p2p/store"
    29  	"github.com/ethereum-optimism/optimism/op-node/rollup"
    30  	"github.com/ethereum-optimism/optimism/op-service/eth"
    31  	"github.com/ethereum-optimism/optimism/op-service/testlog"
    32  	"github.com/ethereum-optimism/optimism/op-service/testutils"
    33  )
    34  
    35  func TestingConfig(t *testing.T) *Config {
    36  	p, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
    37  	require.NoError(t, err, "failed to generate new p2p priv key")
    38  
    39  	return &Config{
    40  		Priv:                (p).(*crypto.Secp256k1PrivateKey),
    41  		DisableP2P:          false,
    42  		NoDiscovery:         true, // we statically peer during most tests.
    43  		ListenIP:            net.IP{127, 0, 0, 1},
    44  		ListenTCPPort:       0, // bind to any available port
    45  		StaticPeers:         nil,
    46  		HostMux:             []libp2p.Option{YamuxC()},
    47  		NoTransportSecurity: true,
    48  		PeersLo:             1,
    49  		PeersHi:             10,
    50  		PeersGrace:          time.Second * 10,
    51  		NAT:                 false,
    52  		UserAgent:           "optimism-testing",
    53  		TimeoutNegotiation:  time.Second * 2,
    54  		TimeoutAccept:       time.Second * 2,
    55  		TimeoutDial:         time.Second * 2,
    56  		Store:               sync.MutexWrap(ds.NewMapDatastore()),
    57  	}
    58  }
    59  
    60  // Simplified p2p test, to debug/test basic libp2p things with
    61  func TestP2PSimple(t *testing.T) {
    62  	confA := TestingConfig(t)
    63  	confB := TestingConfig(t)
    64  	hostA, err := confA.Host(testlog.Logger(t, log.LevelError).New("host", "A"), nil, metrics.NoopMetrics)
    65  	require.NoError(t, err, "failed to launch host A")
    66  	defer hostA.Close()
    67  	hostB, err := confB.Host(testlog.Logger(t, log.LevelError).New("host", "B"), nil, metrics.NoopMetrics)
    68  	require.NoError(t, err, "failed to launch host B")
    69  	defer hostB.Close()
    70  	err = hostA.Connect(context.Background(), peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()})
    71  	require.NoError(t, err, "failed to connect to peer B from peer A")
    72  	require.Equal(t, hostB.Network().Connectedness(hostA.ID()), network.Connected)
    73  }
    74  
    75  type mockGossipIn struct {
    76  	OnUnsafeL2PayloadFn func(ctx context.Context, from peer.ID, msg *eth.ExecutionPayloadEnvelope) error
    77  }
    78  
    79  func (m *mockGossipIn) OnUnsafeL2Payload(ctx context.Context, from peer.ID, msg *eth.ExecutionPayloadEnvelope) error {
    80  	if m.OnUnsafeL2PayloadFn != nil {
    81  		return m.OnUnsafeL2PayloadFn(ctx, from, msg)
    82  	}
    83  	return nil
    84  }
    85  
    86  // Full setup, using negotiated transport security and muxes
    87  func TestP2PFull(t *testing.T) {
    88  	pA, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
    89  	require.NoError(t, err, "failed to generate new p2p priv key")
    90  	pB, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
    91  	require.NoError(t, err, "failed to generate new p2p priv key")
    92  
    93  	confA := Config{
    94  		Priv:                (pA).(*crypto.Secp256k1PrivateKey),
    95  		DisableP2P:          false,
    96  		NoDiscovery:         true,
    97  		ListenIP:            net.IP{127, 0, 0, 1},
    98  		ListenTCPPort:       0, // bind to any available port
    99  		StaticPeers:         nil,
   100  		HostMux:             []libp2p.Option{YamuxC(), MplexC()},
   101  		HostSecurity:        []libp2p.Option{NoiseC(), TlsC()},
   102  		NoTransportSecurity: false,
   103  		PeersLo:             1,
   104  		PeersHi:             10,
   105  		PeersGrace:          time.Second * 10,
   106  		NAT:                 false,
   107  		UserAgent:           "optimism-testing",
   108  		TimeoutNegotiation:  time.Second * 2,
   109  		TimeoutAccept:       time.Second * 2,
   110  		TimeoutDial:         time.Second * 2,
   111  		Store:               sync.MutexWrap(ds.NewMapDatastore()),
   112  	}
   113  	// copy config A, and change the settings for B
   114  	confB := confA
   115  	confB.Priv = (pB).(*crypto.Secp256k1PrivateKey)
   116  	confB.Store = sync.MutexWrap(ds.NewMapDatastore())
   117  	// TODO: maybe swap the order of sec/mux preferences, to test that negotiation works
   118  
   119  	runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
   120  	runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
   121  
   122  	logA := testlog.Logger(t, log.LevelError).New("host", "A")
   123  	nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false)
   124  	require.NoError(t, err)
   125  	defer nodeA.Close()
   126  
   127  	conns := make(chan network.Conn, 1)
   128  	hostA := nodeA.Host()
   129  	hostA.Network().Notify(&network.NotifyBundle{
   130  		ConnectedF: func(n network.Network, conn network.Conn) {
   131  			conns <- conn
   132  		}})
   133  
   134  	backend := NewP2PAPIBackend(nodeA, logA, nil)
   135  	srv := rpc.NewServer()
   136  	require.NoError(t, srv.RegisterName("opp2p", backend))
   137  	client := rpc.DialInProc(srv)
   138  	p2pClientA := NewClient(client)
   139  
   140  	// Set up B to connect statically
   141  	confB.StaticPeers, err = peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ID: hostA.ID(), Addrs: hostA.Addrs()})
   142  	require.NoError(t, err)
   143  
   144  	// Add address of host B itself, it shouldn't connect or cause issues.
   145  	idB, err := peer.IDFromPublicKey(confB.Priv.GetPublic())
   146  	require.NoError(t, err)
   147  	altAddrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/12345/p2p/" + idB.String())
   148  	require.NoError(t, err)
   149  	confB.StaticPeers = append(confB.StaticPeers, altAddrB)
   150  
   151  	logB := testlog.Logger(t, log.LevelError).New("host", "B")
   152  
   153  	nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false)
   154  	require.NoError(t, err)
   155  	defer nodeB.Close()
   156  	hostB := nodeB.Host()
   157  
   158  	require.True(t, nodeB.IsStatic(hostA.ID()), "node A must be static peer of node B")
   159  	require.False(t, nodeB.IsStatic(hostB.ID()), "node B must not be static peer of node B itself")
   160  
   161  	select {
   162  	case <-time.After(30 * time.Second):
   163  		t.Fatal("failed to connect new host")
   164  	case c := <-conns:
   165  		require.Equal(t, hostB.ID(), c.RemotePeer())
   166  	}
   167  
   168  	ctx := context.Background()
   169  
   170  	selfInfoA, err := p2pClientA.Self(ctx)
   171  	require.NoError(t, err)
   172  	require.Equal(t, selfInfoA.PeerID, hostA.ID())
   173  
   174  	_, err = p2pClientA.DiscoveryTable(ctx)
   175  	// rpc does not preserve error type
   176  	require.Equal(t, err.Error(), ErrDisabledDiscovery.Error(), "expecting discv5 to be disabled")
   177  
   178  	require.NoError(t, p2pClientA.BlockPeer(ctx, hostB.ID()))
   179  	blockedPeers, err := p2pClientA.ListBlockedPeers(ctx)
   180  	require.NoError(t, err)
   181  	require.Equal(t, []peer.ID{hostB.ID()}, blockedPeers)
   182  	require.NoError(t, p2pClientA.UnblockPeer(ctx, hostB.ID()))
   183  
   184  	require.Error(t, p2pClientA.BlockAddr(ctx, nil))
   185  	require.Error(t, p2pClientA.UnblockAddr(ctx, nil))
   186  	require.Error(t, p2pClientA.BlockSubnet(ctx, nil))
   187  	require.Error(t, p2pClientA.UnblockSubnet(ctx, nil))
   188  	require.Error(t, p2pClientA.BlockPeer(ctx, ""))
   189  	require.Error(t, p2pClientA.UnblockPeer(ctx, ""))
   190  	require.Error(t, p2pClientA.ProtectPeer(ctx, ""))
   191  	require.Error(t, p2pClientA.UnprotectPeer(ctx, ""))
   192  	require.Error(t, p2pClientA.ConnectPeer(ctx, ""))
   193  	require.Error(t, p2pClientA.DisconnectPeer(ctx, ""))
   194  
   195  	require.NoError(t, p2pClientA.BlockAddr(ctx, net.IP{123, 123, 123, 123}))
   196  	blockedIPs, err := p2pClientA.ListBlockedAddrs(ctx)
   197  	require.NoError(t, err)
   198  	require.Len(t, blockedIPs, 1)
   199  	require.Equal(t, net.IP{123, 123, 123, 123}, blockedIPs[0].To4())
   200  	require.NoError(t, p2pClientA.UnblockAddr(ctx, net.IP{123, 123, 123, 123}))
   201  
   202  	subnet := &net.IPNet{IP: net.IP{123, 0, 0, 0}.To16(), Mask: net.IPMask{0xff, 0, 0, 0}}
   203  	require.NoError(t, p2pClientA.BlockSubnet(ctx, subnet))
   204  	blockedSubnets, err := p2pClientA.ListBlockedSubnets(ctx)
   205  	require.NoError(t, err)
   206  	require.Len(t, blockedSubnets, 1)
   207  	require.Equal(t, subnet, blockedSubnets[0])
   208  	require.NoError(t, p2pClientA.UnblockSubnet(ctx, subnet))
   209  
   210  	// Ask host A for all peer information they have
   211  	peerDump, err := p2pClientA.Peers(ctx, false)
   212  	require.Nil(t, err)
   213  	require.Contains(t, peerDump.Peers, hostB.ID().String())
   214  	data := peerDump.Peers[hostB.ID().String()]
   215  	require.Equal(t, data.Direction, network.DirInbound)
   216  
   217  	stats, err := p2pClientA.PeerStats(ctx)
   218  	require.Nil(t, err)
   219  	require.Equal(t, uint(1), stats.Connected)
   220  
   221  	// disconnect
   222  	hostBId := hostB.ID().String()
   223  	peerDump, err = p2pClientA.Peers(ctx, false)
   224  	require.Nil(t, err)
   225  	data = peerDump.Peers[hostBId]
   226  	require.NotNil(t, data)
   227  	require.NoError(t, p2pClientA.DisconnectPeer(ctx, hostB.ID()))
   228  	peerDump, err = p2pClientA.Peers(ctx, false)
   229  	require.Nil(t, err)
   230  	data = peerDump.Peers[hostBId]
   231  	require.Nil(t, data)
   232  
   233  	// reconnect
   234  	addrsB, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()})
   235  	require.NoError(t, err)
   236  	require.NoError(t, p2pClientA.ConnectPeer(ctx, addrsB[0].String()))
   237  
   238  	require.NoError(t, p2pClientA.ProtectPeer(ctx, hostB.ID()))
   239  	require.NoError(t, p2pClientA.UnprotectPeer(ctx, hostB.ID()))
   240  }
   241  
   242  func TestDiscovery(t *testing.T) {
   243  	t.Skipf("skipping flaky test")
   244  
   245  	pA, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
   246  	require.NoError(t, err, "failed to generate new p2p priv key")
   247  	pB, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
   248  	require.NoError(t, err, "failed to generate new p2p priv key")
   249  	pC, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
   250  	require.NoError(t, err, "failed to generate new p2p priv key")
   251  
   252  	logA := testlog.Logger(t, log.LevelError).New("host", "A")
   253  	logB := testlog.Logger(t, log.LevelError).New("host", "B")
   254  	logC := testlog.Logger(t, log.LevelError).New("host", "C")
   255  
   256  	discDBA, err := enode.OpenDB("") // "" = memory db
   257  	require.NoError(t, err)
   258  	discDBB, err := enode.OpenDB("")
   259  	require.NoError(t, err)
   260  	discDBC, err := enode.OpenDB("")
   261  	require.NoError(t, err)
   262  
   263  	rollupCfg := &rollup.Config{L2ChainID: big.NewInt(901)}
   264  
   265  	confA := Config{
   266  		Priv:                (pA).(*crypto.Secp256k1PrivateKey),
   267  		DisableP2P:          false,
   268  		NoDiscovery:         false,
   269  		AdvertiseIP:         net.IP{127, 0, 0, 1},
   270  		ListenUDPPort:       0, // bind to any available port
   271  		ListenIP:            net.IP{127, 0, 0, 1},
   272  		ListenTCPPort:       0, // bind to any available port
   273  		StaticPeers:         nil,
   274  		HostMux:             []libp2p.Option{YamuxC(), MplexC()},
   275  		HostSecurity:        []libp2p.Option{NoiseC(), TlsC()},
   276  		NoTransportSecurity: false,
   277  		PeersLo:             1,
   278  		PeersHi:             10,
   279  		PeersGrace:          time.Second * 10,
   280  		NAT:                 false,
   281  		UserAgent:           "optimism-testing",
   282  		TimeoutNegotiation:  time.Second * 2,
   283  		TimeoutAccept:       time.Second * 2,
   284  		TimeoutDial:         time.Second * 2,
   285  		Store:               sync.MutexWrap(ds.NewMapDatastore()),
   286  		DiscoveryDB:         discDBA,
   287  	}
   288  	// copy config A, and change the settings for B
   289  	confB := confA
   290  	confB.Priv = (pB).(*crypto.Secp256k1PrivateKey)
   291  	confB.Store = sync.MutexWrap(ds.NewMapDatastore())
   292  	confB.DiscoveryDB = discDBB
   293  
   294  	runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
   295  	runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
   296  	runCfgC := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
   297  
   298  	resourcesCtx, resourcesCancel := context.WithCancel(context.Background())
   299  	defer resourcesCancel()
   300  
   301  	nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false)
   302  	require.NoError(t, err)
   303  	defer nodeA.Close()
   304  	hostA := nodeA.Host()
   305  	go nodeA.DiscoveryProcess(resourcesCtx, logA, rollupCfg, 10)
   306  
   307  	// Add A as bootnode to B
   308  	confB.Bootnodes = []*enode.Node{nodeA.Dv5Udp().Self()}
   309  	// Copy B config to C, and ensure they have a different priv / peerstore
   310  	confC := confB
   311  	confC.Priv = (pC).(*crypto.Secp256k1PrivateKey)
   312  	confC.Store = sync.MutexWrap(ds.NewMapDatastore())
   313  	confB.DiscoveryDB = discDBC
   314  
   315  	// Start B
   316  	nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false)
   317  	require.NoError(t, err)
   318  	defer nodeB.Close()
   319  	hostB := nodeB.Host()
   320  	go nodeB.DiscoveryProcess(resourcesCtx, logB, rollupCfg, 10)
   321  
   322  	// Track connections to B
   323  	connsB := make(chan network.Conn, 2)
   324  	hostB.Network().Notify(&network.NotifyBundle{
   325  		ConnectedF: func(n network.Network, conn network.Conn) {
   326  			log.Info("connection to B", "peer", conn.RemotePeer())
   327  			connsB <- conn
   328  		}})
   329  
   330  	// Start C
   331  	nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, nil, runCfgC, metrics.NoopMetrics, false)
   332  	require.NoError(t, err)
   333  	defer nodeC.Close()
   334  	hostC := nodeC.Host()
   335  	go nodeC.DiscoveryProcess(resourcesCtx, logC, rollupCfg, 10)
   336  
   337  	// B and C don't know each other yet, but both have A as a bootnode.
   338  	// It should only be a matter of time for them to connect, if they discover each other via A.
   339  	timeout := time.After(time.Second * 60)
   340  	var peersOfB []peer.ID
   341  	// B should be connected to the bootnode (A) it used (it's a valid optimism node to connect to here)
   342  	// C should also be connected, although this one might take more time to discover
   343  	for !slices.Contains(peersOfB, hostA.ID()) || !slices.Contains(peersOfB, hostC.ID()) {
   344  		select {
   345  		case <-timeout:
   346  			var peers []string
   347  			for _, id := range peersOfB {
   348  				peers = append(peers, id.String())
   349  			}
   350  			t.Fatalf("timeout reached - expected host A: %v and host C: %v to be in %v", hostA.ID().String(), hostC.ID().String(), peers)
   351  		case c := <-connsB:
   352  			peersOfB = append(peersOfB, c.RemotePeer())
   353  		}
   354  	}
   355  
   356  	// Check that among known connections (B-A, B-C), we have metadata
   357  	type mdcheck struct {
   358  		n1 *NodeP2P
   359  		n2 *NodeP2P
   360  	}
   361  	cases := []mdcheck{
   362  		{nodeB, nodeA},
   363  		{nodeB, nodeC},
   364  	}
   365  	for _, c := range cases {
   366  		// make peerstore metadata available
   367  		eps, ok := c.n1.Host().Peerstore().(store.ExtendedPeerstore)
   368  		require.True(t, ok)
   369  		// confirm n1 has metadata about n2
   370  		md, err := eps.GetPeerMetadata(c.n2.Host().ID())
   371  		require.NoError(t, err)
   372  		require.NotEmpty(t, md.ENR)
   373  		require.Equal(t, uint64(901), md.OPStackID)
   374  	}
   375  }
   376  
   377  // Most tests should use mocknets instead of using the actual local host network
   378  func TestP2PMocknet(t *testing.T) {
   379  	mnet, err := mocknet.FullMeshConnected(3)
   380  	require.NoError(t, err, "failed to setup mocknet")
   381  	defer mnet.Close()
   382  	hosts := mnet.Hosts()
   383  	hostA, hostB, hostC := hosts[0], hosts[1], hosts[2]
   384  	require.Equal(t, hostA.Network().Connectedness(hostB.ID()), network.Connected)
   385  	require.Equal(t, hostA.Network().Connectedness(hostC.ID()), network.Connected)
   386  	require.Equal(t, hostB.Network().Connectedness(hostC.ID()), network.Connected)
   387  }