github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/internal/p2p/p2ptest/network.go (about)

     1  package p2ptest
     2  
     3  import (
     4  	"context"
     5  	"math/rand"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/stretchr/testify/require"
    10  	dbm "github.com/tendermint/tm-db"
    11  
    12  	"github.com/ari-anchor/sei-tendermint/crypto"
    13  	"github.com/ari-anchor/sei-tendermint/crypto/ed25519"
    14  	"github.com/ari-anchor/sei-tendermint/internal/p2p"
    15  	"github.com/ari-anchor/sei-tendermint/libs/log"
    16  	"github.com/ari-anchor/sei-tendermint/types"
    17  )
    18  
    19  // Network sets up an in-memory network that can be used for high-level P2P
    20  // testing. It creates an arbitrary number of nodes that are connected to each
    21  // other, and can open channels across all nodes with custom reactors.
    22  type Network struct {
    23  	Nodes map[types.NodeID]*Node
    24  
    25  	logger        log.Logger
    26  	memoryNetwork *p2p.MemoryNetwork
    27  	cancel        context.CancelFunc
    28  }
    29  
    30  // NetworkOptions is an argument structure to parameterize the
    31  // MakeNetwork function.
    32  type NetworkOptions struct {
    33  	NumNodes   int
    34  	BufferSize int
    35  	NodeOpts   NodeOptions
    36  }
    37  
    38  type NodeOptions struct {
    39  	MaxPeers     uint16
    40  	MaxConnected uint16
    41  	MaxRetryTime time.Duration
    42  }
    43  
    44  func (opts *NetworkOptions) setDefaults() {
    45  	if opts.BufferSize == 0 {
    46  		opts.BufferSize = 1
    47  	}
    48  }
    49  
    50  // MakeNetwork creates a test network with the given number of nodes and
    51  // connects them to each other.
    52  func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network {
    53  	opts.setDefaults()
    54  	logger := log.NewNopLogger()
    55  	network := &Network{
    56  		Nodes:         map[types.NodeID]*Node{},
    57  		logger:        logger,
    58  		memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
    59  	}
    60  
    61  	for i := 0; i < opts.NumNodes; i++ {
    62  		node := network.MakeNode(ctx, t, opts.NodeOpts)
    63  		network.Nodes[node.NodeID] = node
    64  	}
    65  
    66  	return network
    67  }
    68  
    69  // Start starts the network by setting up a list of node addresses to dial in
    70  // addition to creating a peer update subscription for each node. Finally, all
    71  // nodes are connected to each other.
    72  func (n *Network) Start(ctx context.Context, t *testing.T) {
    73  	ctx, n.cancel = context.WithCancel(ctx)
    74  	t.Cleanup(n.cancel)
    75  
    76  	// Set up a list of node addresses to dial, and a peer update subscription
    77  	// for each node.
    78  	dialQueue := []p2p.NodeAddress{}
    79  	subs := map[types.NodeID]*p2p.PeerUpdates{}
    80  	subctx, subcancel := context.WithCancel(ctx)
    81  	defer subcancel()
    82  	for _, node := range n.Nodes {
    83  		dialQueue = append(dialQueue, node.NodeAddress)
    84  		subs[node.NodeID] = node.PeerManager.Subscribe(subctx)
    85  	}
    86  
    87  	// For each node, dial the nodes that it still doesn't have a connection to
    88  	// (either inbound or outbound), and wait for both sides to confirm the
    89  	// connection via the subscriptions.
    90  	for i, sourceAddress := range dialQueue {
    91  		sourceNode := n.Nodes[sourceAddress.NodeID]
    92  		sourceSub := subs[sourceAddress.NodeID]
    93  
    94  		for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
    95  			targetNode := n.Nodes[targetAddress.NodeID]
    96  			targetSub := subs[targetAddress.NodeID]
    97  			added, err := sourceNode.PeerManager.Add(targetAddress)
    98  			require.NoError(t, err)
    99  			require.True(t, added)
   100  
   101  			select {
   102  			case <-ctx.Done():
   103  				require.Fail(t, "operation canceled")
   104  			case peerUpdate := <-sourceSub.Updates():
   105  				require.Equal(t, targetNode.NodeID, peerUpdate.NodeID)
   106  				require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status)
   107  			case <-time.After(30 * time.Second):
   108  				require.Fail(t, "timed out waiting for peer", "%v dialing %v",
   109  					sourceNode.NodeID, targetNode.NodeID)
   110  			}
   111  
   112  			select {
   113  			case <-ctx.Done():
   114  				require.Fail(t, "operation canceled")
   115  			case peerUpdate := <-targetSub.Updates():
   116  				peerUpdate.Channels = nil
   117  				require.Equal(t, p2p.PeerUpdate{
   118  					NodeID: sourceNode.NodeID,
   119  					Status: p2p.PeerStatusUp,
   120  				}, peerUpdate)
   121  			case <-time.After(30 * time.Second):
   122  				require.Fail(t, "timed out waiting for peer", "%v accepting %v",
   123  					targetNode.NodeID, sourceNode.NodeID)
   124  			}
   125  
   126  			// Add the address to the target as well, so it's able to dial the
   127  			// source back if that's even necessary.
   128  			added, err = targetNode.PeerManager.Add(sourceAddress)
   129  			require.NoError(t, err)
   130  			require.True(t, added)
   131  		}
   132  	}
   133  }
   134  
   135  // NodeIDs returns the network's node IDs.
   136  func (n *Network) NodeIDs() []types.NodeID {
   137  	ids := []types.NodeID{}
   138  	for id := range n.Nodes {
   139  		ids = append(ids, id)
   140  	}
   141  	return ids
   142  }
   143  
   144  // MakeChannels makes a channel on all nodes and returns them, automatically
   145  // doing error checks and cleanups.
   146  func (n *Network) MakeChannels(
   147  	ctx context.Context,
   148  	t *testing.T,
   149  	chDesc *p2p.ChannelDescriptor,
   150  ) map[types.NodeID]*p2p.Channel {
   151  	channels := map[types.NodeID]*p2p.Channel{}
   152  	for _, node := range n.Nodes {
   153  		channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc)
   154  	}
   155  	return channels
   156  }
   157  
   158  // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
   159  // automatically doing error checks. The caller must ensure proper cleanup of
   160  // all the channels.
   161  func (n *Network) MakeChannelsNoCleanup(
   162  	ctx context.Context,
   163  	t *testing.T,
   164  	chDesc *p2p.ChannelDescriptor,
   165  ) map[types.NodeID]*p2p.Channel {
   166  	channels := map[types.NodeID]*p2p.Channel{}
   167  	for _, node := range n.Nodes {
   168  		channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc)
   169  	}
   170  	return channels
   171  }
   172  
   173  // RandomNode returns a random node.
   174  func (n *Network) RandomNode() *Node {
   175  	nodes := make([]*Node, 0, len(n.Nodes))
   176  	for _, node := range n.Nodes {
   177  		nodes = append(nodes, node)
   178  	}
   179  	return nodes[rand.Intn(len(nodes))] // nolint:gosec
   180  }
   181  
   182  // Peers returns a node's peers (i.e. everyone except itself).
   183  func (n *Network) Peers(id types.NodeID) []*Node {
   184  	peers := make([]*Node, 0, len(n.Nodes)-1)
   185  	for _, peer := range n.Nodes {
   186  		if peer.NodeID != id {
   187  			peers = append(peers, peer)
   188  		}
   189  	}
   190  	return peers
   191  }
   192  
   193  // Remove removes a node from the network, stopping it and waiting for all other
   194  // nodes to pick up the disconnection.
   195  func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) {
   196  	require.Contains(t, n.Nodes, id)
   197  	node := n.Nodes[id]
   198  	delete(n.Nodes, id)
   199  
   200  	subs := []*p2p.PeerUpdates{}
   201  	subctx, subcancel := context.WithCancel(ctx)
   202  	defer subcancel()
   203  	for _, peer := range n.Nodes {
   204  		sub := peer.PeerManager.Subscribe(subctx)
   205  		subs = append(subs, sub)
   206  	}
   207  
   208  	require.NoError(t, node.Transport.Close())
   209  	node.cancel()
   210  	if node.Router.IsRunning() {
   211  		node.Router.Stop()
   212  		node.Router.Wait()
   213  	}
   214  
   215  	for _, sub := range subs {
   216  		RequireUpdate(t, sub, p2p.PeerUpdate{
   217  			NodeID: node.NodeID,
   218  			Status: p2p.PeerStatusDown,
   219  		})
   220  	}
   221  }
   222  
   223  // Node is a node in a Network, with a Router and a PeerManager.
   224  type Node struct {
   225  	NodeID      types.NodeID
   226  	NodeInfo    types.NodeInfo
   227  	NodeAddress p2p.NodeAddress
   228  	PrivKey     crypto.PrivKey
   229  	Router      *p2p.Router
   230  	PeerManager *p2p.PeerManager
   231  	Transport   *p2p.MemoryTransport
   232  
   233  	cancel context.CancelFunc
   234  }
   235  
   236  // MakeNode creates a new Node configured for the network with a
   237  // running peer manager, but does not add it to the existing
   238  // network. Callers are responsible for updating peering relationships.
   239  func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node {
   240  	ctx, cancel := context.WithCancel(ctx)
   241  
   242  	privKey := ed25519.GenPrivKey()
   243  	nodeID := types.NodeIDFromPubKey(privKey.PubKey())
   244  	nodeInfo := types.NodeInfo{
   245  		NodeID:     nodeID,
   246  		ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
   247  		Moniker:    string(nodeID),
   248  	}
   249  
   250  	transport := n.memoryNetwork.CreateTransport(nodeID)
   251  	ep, err := transport.Endpoint()
   252  	require.NoError(t, err)
   253  	require.NotNil(t, ep, "transport not listening an endpoint")
   254  
   255  	maxRetryTime := 1000 * time.Millisecond
   256  	if opts.MaxRetryTime > 0 {
   257  		maxRetryTime = opts.MaxRetryTime
   258  	}
   259  
   260  	peerManager, err := p2p.NewPeerManager(n.logger, nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
   261  		MinRetryTime:    10 * time.Millisecond,
   262  		MaxRetryTime:    maxRetryTime,
   263  		RetryTimeJitter: time.Millisecond,
   264  		MaxPeers:        opts.MaxPeers,
   265  		MaxConnected:    opts.MaxConnected,
   266  	})
   267  	require.NoError(t, err)
   268  
   269  	router, err := p2p.NewRouter(
   270  		n.logger,
   271  		p2p.NopMetrics(),
   272  		privKey,
   273  		peerManager,
   274  		func() *types.NodeInfo { return &nodeInfo },
   275  		transport,
   276  		ep,
   277  		nil,
   278  		p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
   279  	)
   280  
   281  	require.NoError(t, err)
   282  	require.NoError(t, router.Start(ctx))
   283  
   284  	t.Cleanup(func() {
   285  		if router.IsRunning() {
   286  			router.Stop()
   287  			router.Wait()
   288  		}
   289  		require.NoError(t, transport.Close())
   290  		cancel()
   291  	})
   292  
   293  	return &Node{
   294  		NodeID:      nodeID,
   295  		NodeInfo:    nodeInfo,
   296  		NodeAddress: ep.NodeAddress(nodeID),
   297  		PrivKey:     privKey,
   298  		Router:      router,
   299  		PeerManager: peerManager,
   300  		Transport:   transport,
   301  		cancel:      cancel,
   302  	}
   303  }
   304  
   305  // MakeChannel opens a channel, with automatic error handling and cleanup. On
   306  // test cleanup, it also checks that the channel is empty, to make sure
   307  // all expected messages have been asserted.
   308  func (n *Node) MakeChannel(
   309  	ctx context.Context,
   310  	t *testing.T,
   311  	chDesc *p2p.ChannelDescriptor,
   312  ) *p2p.Channel {
   313  	ctx, cancel := context.WithCancel(ctx)
   314  	channel, err := n.Router.OpenChannel(ctx, chDesc)
   315  	require.NoError(t, err)
   316  	t.Cleanup(func() {
   317  		RequireEmpty(ctx, t, channel)
   318  		cancel()
   319  	})
   320  	return channel
   321  }
   322  
   323  // MakeChannelNoCleanup opens a channel, with automatic error handling. The
   324  // caller must ensure proper cleanup of the channel.
   325  func (n *Node) MakeChannelNoCleanup(
   326  	ctx context.Context,
   327  	t *testing.T,
   328  	chDesc *p2p.ChannelDescriptor,
   329  ) *p2p.Channel {
   330  	channel, err := n.Router.OpenChannel(ctx, chDesc)
   331  	require.NoError(t, err)
   332  	return channel
   333  }
   334  
   335  // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
   336  // It checks that all updates have been consumed during cleanup.
   337  func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdates {
   338  	t.Helper()
   339  	sub := n.PeerManager.Subscribe(ctx)
   340  	t.Cleanup(func() {
   341  		RequireNoUpdates(ctx, t, sub)
   342  	})
   343  
   344  	return sub
   345  }
   346  
   347  // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup.
   348  // It does *not* check that all updates have been consumed, but will
   349  // close the update channel.
   350  func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, t *testing.T) *p2p.PeerUpdates {
   351  	return n.PeerManager.Subscribe(ctx)
   352  }
   353  
   354  func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor {
   355  	return &p2p.ChannelDescriptor{
   356  		ID:                  chID,
   357  		MessageType:         &Message{},
   358  		Priority:            5,
   359  		SendQueueCapacity:   10,
   360  		RecvMessageCapacity: 10,
   361  	}
   362  }