github.com/koko1123/flow-go-1@v0.29.6/network/p2p/dht/dht_test.go (about)

     1  package dht_test
     2  
     3  import (
     4  	"context"
     5  	"testing"
     6  	"time"
     7  
     8  	golog "github.com/ipfs/go-log/v2"
     9  	"github.com/libp2p/go-libp2p/core/network"
    10  	"github.com/libp2p/go-libp2p/core/peer"
    11  	"github.com/stretchr/testify/assert"
    12  	"github.com/stretchr/testify/require"
    13  
    14  	libp2pmsg "github.com/koko1123/flow-go-1/model/libp2p/message"
    15  	"github.com/koko1123/flow-go-1/module/irrecoverable"
    16  	"github.com/koko1123/flow-go-1/module/metrics"
    17  	"github.com/koko1123/flow-go-1/network/channels"
    18  	"github.com/koko1123/flow-go-1/network/message"
    19  	"github.com/koko1123/flow-go-1/network/p2p"
    20  	"github.com/koko1123/flow-go-1/network/p2p/dht"
    21  	p2ptest "github.com/koko1123/flow-go-1/network/p2p/test"
    22  	flowpubsub "github.com/koko1123/flow-go-1/network/validator/pubsub"
    23  	"github.com/koko1123/flow-go-1/utils/unittest"
    24  )
    25  
    26  // Workaround for https://github.com/stretchr/testify/pull/808
    27  const ticksForAssertEventually = 10 * time.Millisecond
    28  
    29  // TestFindPeerWithDHT checks that if a node is configured to participate in the DHT, it is
    30  // able to create new streams with peers even without knowing their address info beforehand.
    31  func TestFindPeerWithDHT(t *testing.T) {
    32  	ctx, cancel := context.WithCancel(context.Background())
    33  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    34  
    35  	count := 10
    36  	golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed
    37  
    38  	sporkId := unittest.IdentifierFixture()
    39  	dhtServerNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", 2, p2ptest.WithDHTOptions(dht.AsServer()))
    40  	require.Len(t, dhtServerNodes, 2)
    41  
    42  	dhtClientNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", count-2, p2ptest.WithDHTOptions(dht.AsClient()))
    43  
    44  	nodes := append(dhtServerNodes, dhtClientNodes...)
    45  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
    46  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
    47  
    48  	getDhtServerAddr := func(i uint) peer.AddrInfo {
    49  		return peer.AddrInfo{ID: dhtServerNodes[i].Host().ID(), Addrs: dhtServerNodes[i].Host().Addrs()}
    50  	}
    51  
    52  	// connect even numbered clients to the first DHT server, and odd number clients to the second
    53  	for i, clientNode := range dhtClientNodes {
    54  		err := clientNode.Host().Connect(ctx, getDhtServerAddr(uint(i%2)))
    55  		require.NoError(t, err)
    56  	}
    57  
    58  	// wait for clients to connect to DHT servers and update their routing tables
    59  	require.Eventually(t, func() bool {
    60  		for i, clientNode := range dhtClientNodes {
    61  			if clientNode.RoutingTable().Find(getDhtServerAddr(uint(i%2)).ID) == "" {
    62  				return false
    63  			}
    64  		}
    65  		return true
    66  	}, time.Second*5, ticksForAssertEventually, "nodes failed to connect")
    67  
    68  	// connect the two DHT servers to each other
    69  	err := dhtServerNodes[0].Host().Connect(ctx, getDhtServerAddr(1))
    70  	require.NoError(t, err)
    71  
    72  	// wait for the first server to connect to the second and update its routing table
    73  	require.Eventually(t, func() bool {
    74  		return dhtServerNodes[0].RoutingTable().Find(getDhtServerAddr(1).ID) != ""
    75  	}, time.Second*5, ticksForAssertEventually, "dht servers failed to connect")
    76  
    77  	// check that all even numbered clients can create streams with all odd numbered clients
    78  	for i := 0; i < len(dhtClientNodes); i += 2 {
    79  		for j := 1; j < len(dhtClientNodes); j += 2 {
    80  			// client i should not yet know the address of client j, but we clear any addresses
    81  			// here just in case.
    82  			dhtClientNodes[i].Host().Peerstore().ClearAddrs(dhtClientNodes[j].Host().ID())
    83  
    84  			// Try to create a stream from client i to client j. This should resort to a DHT
    85  			// lookup since client i does not know client j's address.
    86  			unittest.RequireReturnsBefore(t, func() {
    87  				_, err = dhtClientNodes[i].CreateStream(ctx, dhtClientNodes[j].Host().ID())
    88  				require.NoError(t, err)
    89  			}, 1*time.Second, "could not create stream on time")
    90  		}
    91  	}
    92  }
    93  
    94  // TestPubSub checks if nodes can subscribe to a topic and send and receive a message on that topic. The DHT discovery
    95  // mechanism is used for nodes to find each other.
    96  func TestPubSubWithDHTDiscovery(t *testing.T) {
    97  	unittest.SkipUnless(t, unittest.TEST_FLAKY, "failing on CI")
    98  
    99  	ctx, cancel := context.WithCancel(context.Background())
   100  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   101  
   102  	topic := channels.Topic("/flow/" + unittest.IdentifierFixture().String())
   103  	count := 5
   104  	golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed
   105  
   106  	// Step 1: Creates nodes
   107  	// Nodes will be connected in a hub and spoke configuration where one node will act as the DHT server,
   108  	// while the other nodes will act as the client.
   109  	// The hub-spoke configuration should eventually converge to a fully connected graph as all nodes discover
   110  	// each other via the central node.
   111  	// We have less than 6 nodes in play, hence the full mesh. LibP2P would limit max connections to 12 if there were
   112  	// more nodes.
   113  	//
   114  	//  Initial configuration  =>  Final/expected configuration
   115  	//   N2      N3                     N2-----N3
   116  	//      \  /                        | \   / |
   117  	//       N1             =>          |   N1  |
   118  	//     /   \                        | /   \ |
   119  	//   N4     N5                      N4-----N5
   120  
   121  	sporkId := unittest.IdentifierFixture()
   122  	// create one node running the DHT Server (mimicking the staked AN)
   123  	dhtServerNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", 1, p2ptest.WithDHTOptions(dht.AsServer()))
   124  	require.Len(t, dhtServerNodes, 1)
   125  	dhtServerNode := dhtServerNodes[0]
   126  
   127  	// crate other nodes running the DHT Client (mimicking the unstaked ANs)
   128  	dhtClientNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", count-1, p2ptest.WithDHTOptions(dht.AsClient()))
   129  
   130  	nodes := append(dhtServerNodes, dhtClientNodes...)
   131  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   132  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   133  
   134  	// Step 2: Connect all nodes running a DHT client to the node running the DHT server
   135  	// This has to be done before subscribing to any topic, otherwise the node gives up on advertising
   136  	// its topics of interest and becomes undiscoverable by other nodes
   137  	// (see: https://github.com/libp2p/go-libp2p-pubsub/issues/442)
   138  	dhtServerAddr := peer.AddrInfo{ID: dhtServerNode.Host().ID(), Addrs: dhtServerNode.Host().Addrs()}
   139  	for _, clientNode := range dhtClientNodes {
   140  		err := clientNode.Host().Connect(ctx, dhtServerAddr)
   141  		require.NoError(t, err)
   142  	}
   143  
   144  	// Step 3: Subscribe to the test topic
   145  	// A node will receive its own message (https://github.com/libp2p/go-libp2p-pubsub/issues/65)
   146  	// hence expect count and not count - 1 messages to be received (one by each node, including the sender)
   147  	ch := make(chan peer.ID, count)
   148  
   149  	codec := unittest.NetworkCodec()
   150  
   151  	payload, _ := codec.Encode(&libp2pmsg.TestMessage{})
   152  	msg := &message.Message{
   153  		Payload: payload,
   154  	}
   155  
   156  	data, err := msg.Marshal()
   157  	require.NoError(t, err)
   158  
   159  	logger := unittest.Logger()
   160  
   161  	topicValidator := flowpubsub.TopicValidator(logger, codec, unittest.NetworkSlashingViolationsConsumer(logger, metrics.NewNoopCollector()), unittest.AllowAllPeerFilter())
   162  	for _, n := range nodes {
   163  		s, err := n.Subscribe(topic, topicValidator)
   164  		require.NoError(t, err)
   165  
   166  		go func(s p2p.Subscription, nodeID peer.ID) {
   167  			msg, err := s.Next(ctx)
   168  			require.NoError(t, err)
   169  			require.NotNil(t, msg)
   170  			assert.Equal(t, data, msg.Data)
   171  			ch <- nodeID
   172  		}(s, n.Host().ID())
   173  	}
   174  
   175  	// fullyConnectedGraph checks that each node is directly connected to all the other nodes
   176  	fullyConnectedGraph := func() bool {
   177  		for i := 0; i < len(nodes); i++ {
   178  			for j := i + 1; j < len(nodes); j++ {
   179  				if nodes[i].Host().Network().Connectedness(nodes[j].Host().ID()) == network.NotConnected {
   180  					return false
   181  				}
   182  			}
   183  		}
   184  		return true
   185  	}
   186  	// assert that the graph is fully connected
   187  	require.Eventually(t, fullyConnectedGraph, time.Second*5, ticksForAssertEventually, "nodes failed to discover each other")
   188  
   189  	// Step 4: publish a message to the topic
   190  	require.NoError(t, dhtServerNode.Publish(ctx, topic, data))
   191  
   192  	// Step 5: By now, all peers would have been discovered and the message should have been successfully published
   193  	// A hash set to keep track of the nodes who received the message
   194  	recv := make(map[peer.ID]bool, count)
   195  
   196  loop:
   197  	for i := 0; i < count; i++ {
   198  		select {
   199  		case res := <-ch:
   200  			recv[res] = true
   201  		case <-time.After(3 * time.Second):
   202  			var missing peer.IDSlice
   203  			for _, n := range nodes {
   204  				if _, found := recv[n.Host().ID()]; !found {
   205  					missing = append(missing, n.Host().ID())
   206  				}
   207  			}
   208  			assert.Failf(t, "messages not received by some nodes", "%+v", missing)
   209  			break loop
   210  		}
   211  	}
   212  
   213  	// Step 6: unsubscribes all nodes from the topic
   214  	for _, n := range nodes {
   215  		assert.NoError(t, n.UnSubscribe(topic))
   216  	}
   217  }