github.com/koko1123/flow-go-1@v0.29.6/network/internal/p2pfixtures/fixtures.go (about)

     1  package p2pfixtures
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"context"
     7  	"fmt"
     8  	"net"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/koko1123/flow-go-1/network/message"
    13  
    14  	addrutil "github.com/libp2p/go-addr-util"
    15  	pubsub "github.com/libp2p/go-libp2p-pubsub"
    16  	"github.com/libp2p/go-libp2p/core/host"
    17  	"github.com/libp2p/go-libp2p/core/peer"
    18  	"github.com/libp2p/go-libp2p/core/peerstore"
    19  	"github.com/libp2p/go-libp2p/core/routing"
    20  	"github.com/multiformats/go-multiaddr"
    21  	manet "github.com/multiformats/go-multiaddr/net"
    22  	"github.com/rs/zerolog"
    23  	"github.com/stretchr/testify/require"
    24  
    25  	"github.com/onflow/flow-go/crypto"
    26  	"github.com/koko1123/flow-go-1/model/flow"
    27  	"github.com/koko1123/flow-go-1/module/metrics"
    28  	flownet "github.com/koko1123/flow-go-1/network"
    29  	"github.com/koko1123/flow-go-1/network/channels"
    30  	"github.com/koko1123/flow-go-1/network/internal/p2putils"
    31  	"github.com/koko1123/flow-go-1/network/internal/testutils"
    32  	"github.com/koko1123/flow-go-1/network/p2p"
    33  	p2pdht "github.com/koko1123/flow-go-1/network/p2p/dht"
    34  	"github.com/koko1123/flow-go-1/network/p2p/keyutils"
    35  	"github.com/koko1123/flow-go-1/network/p2p/p2pbuilder"
    36  	validator "github.com/koko1123/flow-go-1/network/validator/pubsub"
    37  
    38  	"github.com/koko1123/flow-go-1/network/p2p/unicast"
    39  	"github.com/koko1123/flow-go-1/network/p2p/utils"
    40  	"github.com/koko1123/flow-go-1/utils/unittest"
    41  )
    42  
    43  // NetworkingKeyFixtures is a test helper that generates a ECDSA flow key pair.
    44  func NetworkingKeyFixtures(t *testing.T) crypto.PrivateKey {
    45  	seed := unittest.SeedFixture(48)
    46  	key, err := crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed)
    47  	require.NoError(t, err)
    48  	return key
    49  }
    50  
    51  // SilentNodeFixture returns a TCP listener and a node which never replies
    52  func SilentNodeFixture(t *testing.T) (net.Listener, flow.Identity) {
    53  	key := NetworkingKeyFixtures(t)
    54  
    55  	lst, err := net.Listen("tcp4", unittest.DefaultAddress)
    56  	require.NoError(t, err)
    57  
    58  	addr, err := manet.FromNetAddr(lst.Addr())
    59  	require.NoError(t, err)
    60  
    61  	addrs := []multiaddr.Multiaddr{addr}
    62  	addrs, err = addrutil.ResolveUnspecifiedAddresses(addrs, nil)
    63  	require.NoError(t, err)
    64  
    65  	go acceptAndHang(t, lst)
    66  
    67  	ip, port, err := p2putils.IPPortFromMultiAddress(addrs...)
    68  	require.NoError(t, err)
    69  
    70  	identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress(ip+":"+port))
    71  	return lst, *identity
    72  }
    73  
    74  func acceptAndHang(t *testing.T, l net.Listener) {
    75  	conns := make([]net.Conn, 0, 10)
    76  	for {
    77  		c, err := l.Accept()
    78  		if err != nil {
    79  			break
    80  		}
    81  		if c != nil {
    82  			conns = append(conns, c)
    83  		}
    84  	}
    85  	for _, c := range conns {
    86  		require.NoError(t, c.Close())
    87  	}
    88  }
    89  
    90  type nodeOpt func(p2pbuilder.NodeBuilder)
    91  
    92  func WithSubscriptionFilter(filter pubsub.SubscriptionFilter) nodeOpt {
    93  	return func(builder p2pbuilder.NodeBuilder) {
    94  		builder.SetSubscriptionFilter(filter)
    95  	}
    96  }
    97  
    98  func CreateNode(t *testing.T, nodeID flow.Identifier, networkKey crypto.PrivateKey, sporkID flow.Identifier, logger zerolog.Logger, opts ...nodeOpt) p2p.LibP2PNode {
    99  	builder := p2pbuilder.NewNodeBuilder(
   100  		logger,
   101  		metrics.NewNoopCollector(),
   102  		unittest.DefaultAddress,
   103  		networkKey,
   104  		sporkID,
   105  		p2pbuilder.DefaultResourceManagerConfig()).
   106  		SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) {
   107  			return p2pdht.NewDHT(c, h, unicast.FlowDHTProtocolID(sporkID), zerolog.Nop(), metrics.NewNoopCollector())
   108  		}).
   109  		SetResourceManager(testutils.NewResourceManager(t))
   110  
   111  	for _, opt := range opts {
   112  		opt(builder)
   113  	}
   114  
   115  	libp2pNode, err := builder.Build()
   116  	require.NoError(t, err)
   117  
   118  	return libp2pNode
   119  }
   120  
   121  // PeerIdFixture creates a random and unique peer ID (libp2p node ID).
   122  func PeerIdFixture(t *testing.T) peer.ID {
   123  	key, err := generateNetworkingKey(unittest.IdentifierFixture())
   124  	require.NoError(t, err)
   125  
   126  	pubKey, err := keyutils.LibP2PPublicKeyFromFlow(key.PublicKey())
   127  	require.NoError(t, err)
   128  
   129  	peerID, err := peer.IDFromPublicKey(pubKey)
   130  	require.NoError(t, err)
   131  
   132  	return peerID
   133  }
   134  
   135  // generateNetworkingKey generates a Flow ECDSA key using the given seed
   136  func generateNetworkingKey(s flow.Identifier) (crypto.PrivateKey, error) {
   137  	seed := make([]byte, crypto.KeyGenSeedMinLenECDSASecp256k1)
   138  	copy(seed, s[:])
   139  	return crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed)
   140  }
   141  
   142  // PeerIdsFixture creates random and unique peer IDs (libp2p node IDs).
   143  func PeerIdsFixture(t *testing.T, n int) []peer.ID {
   144  	peerIDs := make([]peer.ID, n)
   145  	for i := 0; i < n; i++ {
   146  		peerIDs[i] = PeerIdFixture(t)
   147  	}
   148  	return peerIDs
   149  }
   150  
   151  // SubMustNeverReceiveAnyMessage checks that the subscription never receives any message within the given timeout by the context.
   152  func SubMustNeverReceiveAnyMessage(t *testing.T, ctx context.Context, sub p2p.Subscription) {
   153  	timeouted := make(chan struct{})
   154  	go func() {
   155  		_, err := sub.Next(ctx)
   156  		require.Error(t, err)
   157  		require.ErrorIs(t, err, context.DeadlineExceeded)
   158  		close(timeouted)
   159  	}()
   160  
   161  	// wait for the timeout, we choose the timeout to be long enough to make sure that
   162  	// on a happy path the timeout never happens, and short enough to make sure that
   163  	// the test doesn't take too long in case of a failure.
   164  	unittest.RequireCloseBefore(t, timeouted, 10*time.Second, "timeout did not happen on receiving expected pubsub message")
   165  }
   166  
   167  // HasSubReceivedMessage checks that the subscription have received the given message within the given timeout by the context.
   168  // It returns true if the subscription has received the message, false otherwise.
   169  func HasSubReceivedMessage(t *testing.T, ctx context.Context, expectedMessage []byte, sub p2p.Subscription) bool {
   170  	received := make(chan struct{})
   171  	go func() {
   172  		msg, err := sub.Next(ctx)
   173  		if err != nil {
   174  			require.ErrorIs(t, err, context.DeadlineExceeded)
   175  			return
   176  		}
   177  		if !bytes.Equal(expectedMessage, msg.Data) {
   178  			return
   179  		}
   180  		close(received)
   181  	}()
   182  
   183  	select {
   184  	case <-received:
   185  		return true
   186  	case <-ctx.Done():
   187  		return false
   188  	}
   189  }
   190  
   191  // SubsMustNeverReceiveAnyMessage checks that all subscriptions never receive any message within the given timeout by the context.
   192  func SubsMustNeverReceiveAnyMessage(t *testing.T, ctx context.Context, subs []p2p.Subscription) {
   193  	for _, sub := range subs {
   194  		SubMustNeverReceiveAnyMessage(t, ctx, sub)
   195  	}
   196  }
   197  
   198  // AddNodesToEachOthersPeerStore adds the dialing address of all nodes to the peer store of all other nodes.
   199  // However, it does not connect them to each other.
   200  func AddNodesToEachOthersPeerStore(t *testing.T, nodes []p2p.LibP2PNode, ids flow.IdentityList) {
   201  	for _, node := range nodes {
   202  		for i, other := range nodes {
   203  			if node == other {
   204  				continue
   205  			}
   206  			otherPInfo, err := utils.PeerAddressInfo(*ids[i])
   207  			require.NoError(t, err)
   208  			node.Host().Peerstore().AddAddrs(otherPInfo.ID, otherPInfo.Addrs, peerstore.AddressTTL)
   209  		}
   210  	}
   211  }
   212  
   213  // EnsureNotConnected ensures that no connection exists from "from" nodes to "to" nodes.
   214  func EnsureNotConnected(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode) {
   215  	for _, node := range from {
   216  		for _, other := range to {
   217  			if node == other {
   218  				require.Fail(t, "overlapping nodes in from and to lists")
   219  			}
   220  			require.Error(t, node.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.Host().ID())))
   221  		}
   222  	}
   223  }
   224  
   225  // EnsureNotConnectedBetweenGroups ensures no connection exists between the given groups of nodes.
   226  func EnsureNotConnectedBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode) {
   227  	// ensure no connection from group A to group B
   228  	EnsureNotConnected(t, ctx, groupA, groupB)
   229  	// ensure no connection from group B to group A
   230  	EnsureNotConnected(t, ctx, groupB, groupA)
   231  }
   232  
   233  // EnsureNoPubsubMessageExchange ensures that the no pubsub message is exchanged "from" the given nodes "to" the given nodes.
   234  func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) {
   235  	_, topic := messageFactory()
   236  
   237  	subs := make([]p2p.Subscription, len(to))
   238  	svc := unittest.NetworkSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector())
   239  	tv := validator.TopicValidator(
   240  		unittest.Logger(),
   241  		unittest.NetworkCodec(),
   242  		svc,
   243  		unittest.AllowAllPeerFilter())
   244  	var err error
   245  	for _, node := range from {
   246  		_, err = node.Subscribe(topic, tv)
   247  		require.NoError(t, err)
   248  	}
   249  
   250  	for i, node := range to {
   251  		s, err := node.Subscribe(topic, tv)
   252  		require.NoError(t, err)
   253  		subs[i] = s
   254  	}
   255  
   256  	// let subscriptions propagate
   257  	time.Sleep(1 * time.Second)
   258  
   259  	for _, node := range from {
   260  		// creates a unique message to be published by the node.
   261  		msg, _ := messageFactory()
   262  		channel, ok := channels.ChannelFromTopic(topic)
   263  		require.True(t, ok)
   264  		data := MustEncodeEvent(t, msg, channel)
   265  
   266  		// ensure the message is NOT received by any of the nodes.
   267  		require.NoError(t, node.Publish(ctx, topic, data))
   268  		ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   269  		SubsMustNeverReceiveAnyMessage(t, ctx, subs)
   270  		cancel()
   271  	}
   272  }
   273  
   274  // EnsureNoPubsubExchangeBetweenGroups ensures that no pubsub message is exchanged between the given groups of nodes.
   275  func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) {
   276  	// ensure no message exchange from group A to group B
   277  	EnsureNoPubsubMessageExchange(t, ctx, groupA, groupB, messageFactory)
   278  	// ensure no message exchange from group B to group A
   279  	EnsureNoPubsubMessageExchange(t, ctx, groupB, groupA, messageFactory)
   280  }
   281  
   282  // EnsureMessageExchangeOverUnicast ensures that the given nodes exchange arbitrary messages on through unicasting (i.e., stream creation).
   283  // It fails the test if any of the nodes does not receive the message from the other nodes.
   284  // The "inbounds" parameter specifies the inbound channel of the nodes on which the messages are received.
   285  // The "messageFactory" parameter specifies the function that creates unique messages to be sent.
   286  func EnsureMessageExchangeOverUnicast(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, inbounds []chan string, messageFactory func() string) {
   287  	for _, this := range nodes {
   288  		msg := messageFactory()
   289  
   290  		// send the message to all other nodes
   291  		for _, other := range nodes {
   292  			if this == other {
   293  				continue
   294  			}
   295  			s, err := this.CreateStream(ctx, other.Host().ID())
   296  			require.NoError(t, err)
   297  			rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
   298  			_, err = rw.WriteString(msg)
   299  			require.NoError(t, err)
   300  
   301  			// Flush the stream
   302  			require.NoError(t, rw.Flush())
   303  		}
   304  
   305  		// wait for the message to be received by all other nodes
   306  		for i, other := range nodes {
   307  			if this == other {
   308  				continue
   309  			}
   310  
   311  			select {
   312  			case rcv := <-inbounds[i]:
   313  				require.Equal(t, msg, rcv)
   314  			case <-time.After(3 * time.Second):
   315  				require.Fail(t, fmt.Sprintf("did not receive message from node %d", i))
   316  			}
   317  		}
   318  	}
   319  }
   320  
   321  // EnsureNoStreamCreationBetweenGroups ensures that no stream is created between the given groups of nodes.
   322  func EnsureNoStreamCreationBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, errorCheckers ...func(*testing.T, error)) {
   323  	// no stream from groupA -> groupB
   324  	EnsureNoStreamCreation(t, ctx, groupA, groupB, errorCheckers...)
   325  	// no stream from groupB -> groupA
   326  	EnsureNoStreamCreation(t, ctx, groupB, groupA, errorCheckers...)
   327  }
   328  
   329  // EnsureNoStreamCreation ensures that no stream is created "from" the given nodes "to" the given nodes.
   330  func EnsureNoStreamCreation(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode, errorCheckers ...func(*testing.T, error)) {
   331  	for _, this := range from {
   332  		for _, other := range to {
   333  			if this == other {
   334  				// should not happen, unless the test is misconfigured.
   335  				require.Fail(t, "node is in both from and to lists")
   336  			}
   337  			// stream creation should fail
   338  			_, err := this.CreateStream(ctx, other.Host().ID())
   339  			require.Error(t, err)
   340  			require.True(t, flownet.IsPeerUnreachableError(err))
   341  
   342  			// runs the error checkers if any.
   343  			for _, check := range errorCheckers {
   344  				check(t, err)
   345  			}
   346  		}
   347  	}
   348  }
   349  
   350  // EnsureStreamCreation ensures that a stream is created between each of the  "from" nodes to each of the "to" nodes.
   351  func EnsureStreamCreation(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode) {
   352  	for _, this := range from {
   353  		for _, other := range to {
   354  			if this == other {
   355  				// should not happen, unless the test is misconfigured.
   356  				require.Fail(t, "node is in both from and to lists")
   357  			}
   358  			// stream creation should pass without error
   359  			s, err := this.CreateStream(ctx, other.Host().ID())
   360  			require.NoError(t, err)
   361  			require.NotNil(t, s)
   362  		}
   363  	}
   364  }
   365  
   366  // LongStringMessageFactoryFixture returns a function that creates a long unique string message.
   367  func LongStringMessageFactoryFixture(t *testing.T) func() string {
   368  	return func() string {
   369  		msg := "this is an intentionally long MESSAGE to be bigger than buffer size of most of stream compressors"
   370  		require.Greater(t, len(msg), 10, "we must stress test with longer than 10 bytes messages")
   371  		return fmt.Sprintf("%s %d \n", msg, time.Now().UnixNano()) // add timestamp to make sure we don't send the same message twice
   372  	}
   373  }
   374  
   375  // MustEncodeEvent encodes and returns the given event and fails the test if it faces any issue while encoding.
   376  func MustEncodeEvent(t *testing.T, v interface{}, channel channels.Channel) []byte {
   377  	bz, err := unittest.NetworkCodec().Encode(v)
   378  	require.NoError(t, err)
   379  
   380  	msg := message.Message{
   381  		ChannelID: channel.String(),
   382  		Payload:   bz,
   383  	}
   384  	data, err := msg.Marshal()
   385  	require.NoError(t, err)
   386  
   387  	return data
   388  }
   389  
   390  // SubMustReceiveMessage checks that the subscription have received the given message within the given timeout by the context.
   391  func SubMustReceiveMessage(t *testing.T, ctx context.Context, expectedMessage []byte, sub p2p.Subscription) {
   392  	received := make(chan struct{})
   393  	go func() {
   394  		msg, err := sub.Next(ctx)
   395  		require.NoError(t, err)
   396  		require.Equal(t, expectedMessage, msg.Data)
   397  		close(received)
   398  	}()
   399  
   400  	select {
   401  	case <-received:
   402  		return
   403  	case <-ctx.Done():
   404  		require.Fail(t, "timeout on receiving expected pubsub message")
   405  	}
   406  }
   407  
   408  // SubsMustReceiveMessage checks that all subscriptions receive the given message within the given timeout by the context.
   409  func SubsMustReceiveMessage(t *testing.T, ctx context.Context, expectedMessage []byte, subs []p2p.Subscription) {
   410  	for _, sub := range subs {
   411  		SubMustReceiveMessage(t, ctx, expectedMessage, sub)
   412  	}
   413  }