github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/dht/dht_test.go (about)

     1  package dht_test
     2  
     3  import (
     4  	"context"
     5  	"testing"
     6  	"time"
     7  
     8  	golog "github.com/ipfs/go-log/v2"
     9  	"github.com/libp2p/go-libp2p/core/network"
    10  	"github.com/libp2p/go-libp2p/core/peer"
    11  	"github.com/stretchr/testify/assert"
    12  	"github.com/stretchr/testify/require"
    13  
    14  	"github.com/onflow/flow-go/model/flow"
    15  	libp2pmsg "github.com/onflow/flow-go/model/libp2p/message"
    16  	"github.com/onflow/flow-go/module/irrecoverable"
    17  	mockmodule "github.com/onflow/flow-go/module/mock"
    18  	"github.com/onflow/flow-go/network/channels"
    19  	"github.com/onflow/flow-go/network/message"
    20  	"github.com/onflow/flow-go/network/p2p"
    21  	"github.com/onflow/flow-go/network/p2p/dht"
    22  	p2ptest "github.com/onflow/flow-go/network/p2p/test"
    23  	flowpubsub "github.com/onflow/flow-go/network/validator/pubsub"
    24  	"github.com/onflow/flow-go/utils/unittest"
    25  )
    26  
    27  // Workaround for https://github.com/stretchr/testify/pull/808
    28  const ticksForAssertEventually = 10 * time.Millisecond
    29  
    30  // TestFindPeerWithDHT checks that if a node is configured to participate in the DHT, it is
    31  // able to create new streams with peers even without knowing their address info beforehand.
    32  func TestFindPeerWithDHT(t *testing.T) {
    33  	ctx, cancel := context.WithCancel(context.Background())
    34  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    35  
    36  	count := 10
    37  	golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed
    38  
    39  	sporkId := unittest.IdentifierFixture()
    40  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
    41  	dhtServerNodes, serverIDs := p2ptest.NodesFixture(
    42  		t,
    43  		sporkId,
    44  		"dht_test",
    45  		2,
    46  		idProvider,
    47  		p2ptest.WithRole(flow.RoleExecution),
    48  		p2ptest.WithDHTOptions(dht.AsServer()))
    49  	require.Len(t, dhtServerNodes, 2)
    50  
    51  	dhtClientNodes, clientIDs := p2ptest.NodesFixture(
    52  		t,
    53  		sporkId,
    54  		"dht_test",
    55  		count-2,
    56  		idProvider,
    57  		p2ptest.WithRole(flow.RoleExecution),
    58  		p2ptest.WithDHTOptions(dht.AsClient()))
    59  
    60  	nodes := append(dhtServerNodes, dhtClientNodes...)
    61  	idProvider.SetIdentities(append(serverIDs, clientIDs...))
    62  	p2ptest.StartNodes(t, signalerCtx, nodes)
    63  	defer p2ptest.StopNodes(t, nodes, cancel)
    64  
    65  	getDhtServerAddr := func(i uint) peer.AddrInfo {
    66  		return peer.AddrInfo{ID: dhtServerNodes[i].ID(), Addrs: dhtServerNodes[i].Host().Addrs()}
    67  	}
    68  
    69  	// connect even numbered clients to the first DHT server, and odd number clients to the second
    70  	for i, clientNode := range dhtClientNodes {
    71  		err := clientNode.Host().Connect(ctx, getDhtServerAddr(uint(i%2)))
    72  		require.NoError(t, err)
    73  	}
    74  
    75  	// wait for clients to connect to DHT servers and update their routing tables
    76  	require.Eventually(
    77  		t, func() bool {
    78  			for i, clientNode := range dhtClientNodes {
    79  				if clientNode.RoutingTable().Find(getDhtServerAddr(uint(i%2)).ID) == "" {
    80  					return false
    81  				}
    82  			}
    83  			return true
    84  		}, time.Second*5, ticksForAssertEventually, "nodes failed to connect")
    85  
    86  	// connect the two DHT servers to each other
    87  	err := dhtServerNodes[0].Host().Connect(ctx, getDhtServerAddr(1))
    88  	require.NoError(t, err)
    89  
    90  	// wait for the first server to connect to the second and update its routing table
    91  	require.Eventually(
    92  		t, func() bool {
    93  			return dhtServerNodes[0].RoutingTable().Find(getDhtServerAddr(1).ID) != ""
    94  		}, time.Second*5, ticksForAssertEventually, "dht servers failed to connect")
    95  
    96  	// check that all even numbered clients can create streams with all odd numbered clients
    97  	for i := 0; i < len(dhtClientNodes); i += 2 {
    98  		for j := 1; j < len(dhtClientNodes); j += 2 {
    99  			// client i should not yet know the address of client j, but we clear any addresses
   100  			// here just in case.
   101  			dhtClientNodes[i].Host().Peerstore().ClearAddrs(dhtClientNodes[j].ID())
   102  
   103  			// Try to create a stream from client i to client j. This should resort to a DHT
   104  			// lookup since client i does not know client j's address.
   105  			unittest.RequireReturnsBefore(
   106  				t, func() {
   107  					err = dhtClientNodes[i].OpenAndWriteOnStream(
   108  						ctx, dhtClientNodes[j].ID(), t.Name(), func(stream network.Stream) error {
   109  							// do nothing
   110  							require.NotNil(t, stream)
   111  							return nil
   112  						})
   113  					require.NoError(t, err)
   114  				}, 1*time.Second, "could not create stream on time")
   115  		}
   116  	}
   117  }
   118  
   119  // TestPubSub checks if nodes can subscribe to a topic and send and receive a message on that topic. The DHT discovery
   120  // mechanism is used for nodes to find each other.
   121  func TestPubSubWithDHTDiscovery(t *testing.T) {
   122  	unittest.SkipUnless(t, unittest.TEST_FLAKY, "failing on CI")
   123  
   124  	ctx, cancel := context.WithCancel(context.Background())
   125  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   126  
   127  	count := 5
   128  	golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed
   129  
   130  	// Step 1: Creates nodes
   131  	// Nodes will be connected in a hub and spoke configuration where one node will act as the DHT server,
   132  	// while the other nodes will act as the client.
   133  	// The hub-spoke configuration should eventually converge to a fully connected graph as all nodes discover
   134  	// each other via the central node.
   135  	// We have less than 6 nodes in play, hence the full mesh. LibP2P would limit max connections to 12 if there were
   136  	// more nodes.
   137  	//
   138  	//  Initial configuration  =>  Final/expected configuration
   139  	//   N2      N3                     N2-----N3
   140  	//      \  /                        | \   / |
   141  	//       N1             =>          |   N1  |
   142  	//     /   \                        | /   \ |
   143  	//   N4     N5                      N4-----N5
   144  
   145  	sporkId := unittest.IdentifierFixture()
   146  	topic := channels.TopicFromChannel(channels.TestNetworkChannel, sporkId)
   147  	idProvider := mockmodule.NewIdentityProvider(t)
   148  	// create one node running the DHT Server (mimicking the staked AN)
   149  	dhtServerNodes, serverIDs := p2ptest.NodesFixture(
   150  		t,
   151  		sporkId,
   152  		"dht_test",
   153  		1,
   154  		idProvider,
   155  		p2ptest.WithRole(flow.RoleExecution),
   156  		p2ptest.WithDHTOptions(dht.AsServer()))
   157  	require.Len(t, dhtServerNodes, 1)
   158  	dhtServerNode := dhtServerNodes[0]
   159  
   160  	// crate other nodes running the DHT Client (mimicking the unstaked ANs)
   161  	dhtClientNodes, clientIDs := p2ptest.NodesFixture(
   162  		t,
   163  		sporkId,
   164  		"dht_test",
   165  		count-1,
   166  		idProvider,
   167  		p2ptest.WithRole(flow.RoleExecution),
   168  		p2ptest.WithDHTOptions(dht.AsClient()))
   169  
   170  	ids := append(serverIDs, clientIDs...)
   171  	nodes := append(dhtServerNodes, dhtClientNodes...)
   172  	for i, node := range nodes {
   173  		idProvider.On("ByPeerID", node.ID()).Return(&ids[i], true).Maybe()
   174  
   175  	}
   176  	p2ptest.StartNodes(t, signalerCtx, nodes)
   177  	defer p2ptest.StopNodes(t, nodes, cancel)
   178  
   179  	// Step 2: Connect all nodes running a DHT client to the node running the DHT server
   180  	// This has to be done before subscribing to any topic, otherwise the node gives up on advertising
   181  	// its topics of interest and becomes undiscoverable by other nodes
   182  	// (see: https://github.com/libp2p/go-libp2p-pubsub/issues/442)
   183  	dhtServerAddr := peer.AddrInfo{ID: dhtServerNode.ID(), Addrs: dhtServerNode.Host().Addrs()}
   184  	for _, clientNode := range dhtClientNodes {
   185  		err := clientNode.Host().Connect(ctx, dhtServerAddr)
   186  		require.NoError(t, err)
   187  	}
   188  
   189  	// Step 3: Subscribe to the test topic
   190  	// A node will receive its own message (https://github.com/libp2p/go-libp2p-pubsub/issues/65)
   191  	// hence expect count and not count - 1 messages to be received (one by each node, including the sender)
   192  	ch := make(chan peer.ID, count)
   193  
   194  	messageScope, err := message.NewOutgoingScope(
   195  		ids.NodeIDs(),
   196  		topic,
   197  		&libp2pmsg.TestMessage{},
   198  		unittest.NetworkCodec().Encode,
   199  		message.ProtocolTypePubSub)
   200  	require.NoError(t, err)
   201  
   202  	logger := unittest.Logger()
   203  	topicValidator := flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())
   204  	for _, n := range nodes {
   205  		s, err := n.Subscribe(topic, topicValidator)
   206  		require.NoError(t, err)
   207  
   208  		go func(s p2p.Subscription, nodeID peer.ID) {
   209  			msg, err := s.Next(ctx)
   210  			require.NoError(t, err)
   211  			require.NotNil(t, msg)
   212  			assert.Equal(t, messageScope.Proto().Payload, msg.Data)
   213  			ch <- nodeID
   214  		}(s, n.ID())
   215  	}
   216  
   217  	// fullyConnectedGraph checks that each node is directly connected to all the other nodes
   218  	fullyConnectedGraph := func() bool {
   219  		for i := 0; i < len(nodes); i++ {
   220  			for j := i + 1; j < len(nodes); j++ {
   221  				if nodes[i].Host().Network().Connectedness(nodes[j].ID()) == network.NotConnected {
   222  					return false
   223  				}
   224  			}
   225  		}
   226  		return true
   227  	}
   228  	// assert that the graph is fully connected
   229  	require.Eventually(t, fullyConnectedGraph, time.Second*5, ticksForAssertEventually, "nodes failed to discover each other")
   230  
   231  	// Step 4: publish a message to the topic
   232  	require.NoError(t, dhtServerNode.Publish(ctx, messageScope))
   233  
   234  	// Step 5: By now, all peers would have been discovered and the message should have been successfully published
   235  	// A hash set to keep track of the nodes who received the message
   236  	recv := make(map[peer.ID]bool, count)
   237  
   238  loop:
   239  	for i := 0; i < count; i++ {
   240  		select {
   241  		case res := <-ch:
   242  			recv[res] = true
   243  		case <-time.After(3 * time.Second):
   244  			var missing peer.IDSlice
   245  			for _, n := range nodes {
   246  				if _, found := recv[n.ID()]; !found {
   247  					missing = append(missing, n.ID())
   248  				}
   249  			}
   250  			assert.Failf(t, "messages not received by some nodes", "%+v", missing)
   251  			break loop
   252  		}
   253  	}
   254  
   255  	// Step 6: unsubscribes all nodes from the topic
   256  	for _, n := range nodes {
   257  		assert.NoError(t, n.Unsubscribe(topic))
   258  	}
   259  }