github.com/onflow/flow-go@v0.33.17/network/p2p/test/fixtures.go (about)

     1  package p2ptest
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	crand "math/rand"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	dht "github.com/libp2p/go-libp2p-kad-dht"
    12  	pb "github.com/libp2p/go-libp2p-pubsub/pb"
    13  	"github.com/libp2p/go-libp2p/core/connmgr"
    14  	"github.com/libp2p/go-libp2p/core/host"
    15  	"github.com/libp2p/go-libp2p/core/network"
    16  	"github.com/libp2p/go-libp2p/core/peer"
    17  	"github.com/libp2p/go-libp2p/core/protocol"
    18  	"github.com/libp2p/go-libp2p/core/routing"
    19  	discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff"
    20  	"github.com/rs/zerolog"
    21  	mockery "github.com/stretchr/testify/mock"
    22  	"github.com/stretchr/testify/require"
    23  	"golang.org/x/exp/rand"
    24  
    25  	"github.com/onflow/flow-go/config"
    26  	"github.com/onflow/flow-go/crypto"
    27  	"github.com/onflow/flow-go/model/flow"
    28  	"github.com/onflow/flow-go/module"
    29  	"github.com/onflow/flow-go/module/irrecoverable"
    30  	"github.com/onflow/flow-go/module/metrics"
    31  	flownet "github.com/onflow/flow-go/network"
    32  	"github.com/onflow/flow-go/network/channels"
    33  	"github.com/onflow/flow-go/network/internal/p2pfixtures"
    34  	"github.com/onflow/flow-go/network/message"
    35  	"github.com/onflow/flow-go/network/p2p"
    36  	p2pbuilder "github.com/onflow/flow-go/network/p2p/builder"
    37  	p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config"
    38  	"github.com/onflow/flow-go/network/p2p/connection"
    39  	p2pdht "github.com/onflow/flow-go/network/p2p/dht"
    40  	mockp2p "github.com/onflow/flow-go/network/p2p/mock"
    41  	"github.com/onflow/flow-go/network/p2p/unicast/protocols"
    42  	"github.com/onflow/flow-go/network/p2p/utils"
    43  	validator "github.com/onflow/flow-go/network/validator/pubsub"
    44  	"github.com/onflow/flow-go/utils/logging"
    45  	"github.com/onflow/flow-go/utils/unittest"
    46  )
    47  
    48  const (
    49  	// libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the
    50  	// timeout has been selected to be large enough to allow for the node to start up on a CI even when
    51  	// the test is run in parallel with other tests. Hence, no further increase of the timeout is
    52  	// expected to be necessary. Any failure to start a node within this timeout is likely to be
    53  	// caused by a bug in the code.
    54  	libp2pNodeStartupTimeout = 10 * time.Second
    55  	// libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the
    56  	// timeout has been selected to be large enough to allow for the node to start up on a CI even when
    57  	// the test is run in parallel with other tests. Hence, no further increase of the timeout is
    58  	// expected to be necessary. Any failure to start a node within this timeout is likely to be
    59  	// caused by a bug in the code.
    60  	libp2pNodeShutdownTimeout = 10 * time.Second
    61  
    62  	// topicIDFixtureLen is the length of the topic ID fixture for testing.
    63  	topicIDFixtureLen = 10
    64  	// messageIDFixtureLen is the length of the message ID fixture for testing.
    65  	messageIDFixtureLen = 10
    66  )
    67  
    68  // NetworkingKeyFixtures is a test helper that generates a ECDSA flow key pair.
    69  func NetworkingKeyFixtures(t *testing.T) crypto.PrivateKey {
    70  	seed := unittest.SeedFixture(48)
    71  	key, err := crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed)
    72  	require.NoError(t, err)
    73  	return key
    74  }
    75  
    76  // NodeFixture is a test fixture that creates a single libp2p node with the given key, spork id, and options.
    77  // It returns the node and its identity.
    78  func NodeFixture(t *testing.T,
    79  	sporkID flow.Identifier,
    80  	dhtPrefix string,
    81  	idProvider module.IdentityProvider,
    82  	opts ...NodeFixtureParameterOption) (p2p.LibP2PNode, flow.Identity) {
    83  
    84  	defaultFlowConfig, err := config.DefaultConfig()
    85  	require.NoError(t, err)
    86  	require.NotNil(t, idProvider)
    87  	connectionGater := NewConnectionGater(idProvider, func(p peer.ID) error {
    88  		return nil
    89  	})
    90  	require.NotNil(t, connectionGater)
    91  
    92  	parameters := &NodeFixtureParameters{
    93  		NetworkingType: flownet.PrivateNetwork,
    94  		HandlerFunc:    func(network.Stream) {},
    95  		Unicasts:       nil,
    96  		Key:            NetworkingKeyFixtures(t),
    97  		Address:        unittest.DefaultAddress,
    98  		Logger:         unittest.Logger().Level(zerolog.WarnLevel),
    99  		Role:           flow.RoleCollection,
   100  		IdProvider:     idProvider,
   101  		MetricsCfg: &p2pbuilderconfig.MetricsConfig{
   102  			HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(),
   103  			Metrics:          metrics.NewNoopCollector(),
   104  		},
   105  		ResourceManager:   &network.NullResourceManager{},
   106  		ConnGater:         connectionGater,
   107  		PeerManagerConfig: PeerManagerConfigFixture(), // disabled by default
   108  		FlowConfig:        defaultFlowConfig,
   109  	}
   110  
   111  	for _, opt := range opts {
   112  		opt(parameters)
   113  	}
   114  
   115  	identity := unittest.IdentityFixture(unittest.WithNetworkingKey(parameters.Key.PublicKey()),
   116  		unittest.WithAddress(parameters.Address),
   117  		unittest.WithRole(parameters.Role))
   118  
   119  	logger := parameters.Logger.With().Hex("node_id", logging.ID(identity.NodeID)).Logger()
   120  
   121  	connManager, err := connection.NewConnManager(logger, parameters.MetricsCfg.Metrics, &parameters.FlowConfig.NetworkConfig.ConnectionManager)
   122  	require.NoError(t, err)
   123  
   124  	builder := p2pbuilder.NewNodeBuilder(
   125  		logger,
   126  		&parameters.FlowConfig.NetworkConfig.GossipSub,
   127  		parameters.MetricsCfg,
   128  		parameters.NetworkingType,
   129  		parameters.Address,
   130  		parameters.Key,
   131  		sporkID,
   132  		parameters.IdProvider,
   133  		&parameters.FlowConfig.NetworkConfig.ResourceManager,
   134  		parameters.PeerManagerConfig,
   135  		&p2p.DisallowListCacheConfig{
   136  			MaxSize: uint32(1000),
   137  			Metrics: metrics.NewNoopCollector(),
   138  		},
   139  		&p2pbuilderconfig.UnicastConfig{
   140  			Unicast:                parameters.FlowConfig.NetworkConfig.Unicast,
   141  			RateLimiterDistributor: parameters.UnicastRateLimiterDistributor,
   142  		}).
   143  		SetConnectionManager(connManager).
   144  		SetResourceManager(parameters.ResourceManager)
   145  
   146  	if parameters.DhtOptions != nil && (parameters.Role != flow.RoleAccess && parameters.Role != flow.RoleExecution) {
   147  		require.Fail(t, "DHT should not be enabled for non-access and non-execution nodes")
   148  	}
   149  
   150  	if parameters.Role == flow.RoleAccess || parameters.Role == flow.RoleExecution {
   151  		// Only access and execution nodes need to run DHT;
   152  		// Access nodes and execution nodes need DHT to run a blob service.
   153  		// Moreover, access nodes run a DHT to let un-staked (public) access nodes find each other on the public network.
   154  		builder.SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) {
   155  			return p2pdht.NewDHT(ctx,
   156  				host,
   157  				protocol.ID(protocols.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix),
   158  				logger,
   159  				parameters.MetricsCfg.Metrics,
   160  				parameters.DhtOptions...)
   161  		})
   162  	}
   163  
   164  	if parameters.GossipSubRpcInspectorSuiteFactory != nil {
   165  		builder.OverrideDefaultRpcInspectorSuiteFactory(parameters.GossipSubRpcInspectorSuiteFactory)
   166  	}
   167  
   168  	if parameters.ResourceManager != nil {
   169  		builder.SetResourceManager(parameters.ResourceManager)
   170  	}
   171  
   172  	if parameters.ConnGater != nil {
   173  		builder.SetConnectionGater(parameters.ConnGater)
   174  	}
   175  
   176  	if parameters.PeerScoringEnabled {
   177  		builder.OverrideGossipSubScoringConfig(parameters.PeerScoringConfigOverride)
   178  	}
   179  
   180  	if parameters.GossipSubFactory != nil && parameters.GossipSubConfig != nil {
   181  		builder.SetGossipSubFactory(parameters.GossipSubFactory, parameters.GossipSubConfig)
   182  	}
   183  
   184  	if parameters.ConnManager != nil {
   185  		builder.SetConnectionManager(parameters.ConnManager)
   186  	}
   187  
   188  	n, err := builder.Build()
   189  	require.NoError(t, err)
   190  
   191  	if parameters.HandlerFunc != nil {
   192  		err = n.WithDefaultUnicastProtocol(parameters.HandlerFunc, parameters.Unicasts)
   193  		require.NoError(t, err)
   194  	}
   195  
   196  	// get the actual IP and port that have been assigned by the subsystem
   197  	ip, port, err := n.GetIPPort()
   198  	require.NoError(t, err)
   199  	identity.Address = ip + ":" + port
   200  
   201  	if parameters.PeerProvider != nil {
   202  		n.WithPeersProvider(parameters.PeerProvider)
   203  	}
   204  
   205  	return n, *identity
   206  }
   207  
   208  // RegisterPeerProviders registers the peer provider for all the nodes in the input slice.
   209  // All node ids are registered as the peers provider for all the nodes.
   210  // This means that every node will be connected to every other node by the peer manager.
   211  // This is useful for suppressing the "peer provider not set" verbose warning logs in tests scenarios where
   212  // it is desirable to have all nodes connected to each other.
   213  // Args:
   214  // - t: testing.T- the test object; not used, but included in the signature to defensively prevent misuse of the test utility in production.
   215  // - nodes: nodes to register the peer provider for, each node will be connected to all other nodes.
   216  func RegisterPeerProviders(_ *testing.T, nodes []p2p.LibP2PNode) {
   217  	ids := peer.IDSlice{}
   218  	for _, node := range nodes {
   219  		ids = append(ids, node.ID())
   220  	}
   221  	for _, node := range nodes {
   222  		node.WithPeersProvider(func() peer.IDSlice {
   223  			return ids
   224  		})
   225  	}
   226  }
   227  
   228  type NodeFixtureParameterOption func(*NodeFixtureParameters)
   229  
   230  type NodeFixtureParameters struct {
   231  	HandlerFunc                       network.StreamHandler
   232  	NetworkingType                    flownet.NetworkingType
   233  	Unicasts                          []protocols.ProtocolName
   234  	Key                               crypto.PrivateKey
   235  	Address                           string
   236  	DhtOptions                        []dht.Option
   237  	Role                              flow.Role
   238  	Logger                            zerolog.Logger
   239  	PeerScoringEnabled                bool
   240  	IdProvider                        module.IdentityProvider
   241  	PeerScoringConfigOverride         *p2p.PeerScoringConfigOverride
   242  	PeerManagerConfig                 *p2pbuilderconfig.PeerManagerConfig
   243  	PeerProvider                      p2p.PeersProvider // peer manager parameter
   244  	ConnGater                         p2p.ConnectionGater
   245  	ConnManager                       connmgr.ConnManager
   246  	GossipSubFactory                  p2p.GossipSubFactoryFunc
   247  	GossipSubConfig                   p2p.GossipSubAdapterConfigFunc
   248  	MetricsCfg                        *p2pbuilderconfig.MetricsConfig
   249  	ResourceManager                   network.ResourceManager
   250  	GossipSubRpcInspectorSuiteFactory p2p.GossipSubRpcInspectorSuiteFactoryFunc
   251  	FlowConfig                        *config.FlowConfig
   252  	UnicastRateLimiterDistributor     p2p.UnicastRateLimiterDistributor
   253  }
   254  
   255  func WithUnicastRateLimitDistributor(distributor p2p.UnicastRateLimiterDistributor) NodeFixtureParameterOption {
   256  	return func(p *NodeFixtureParameters) {
   257  		p.UnicastRateLimiterDistributor = distributor
   258  	}
   259  }
   260  
   261  func OverrideGossipSubRpcInspectorSuiteFactory(factory p2p.GossipSubRpcInspectorSuiteFactoryFunc) NodeFixtureParameterOption {
   262  	return func(p *NodeFixtureParameters) {
   263  		p.GossipSubRpcInspectorSuiteFactory = factory
   264  	}
   265  }
   266  
   267  func OverrideFlowConfig(cfg *config.FlowConfig) NodeFixtureParameterOption {
   268  	return func(p *NodeFixtureParameters) {
   269  		p.FlowConfig = cfg
   270  	}
   271  }
   272  
   273  // EnablePeerScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override.
   274  // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config.
   275  // Anything that is left to nil or zero value in the override will be ignored and the default value will be used.
   276  // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing.
   277  // Default Use Tip: use p2p.PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override.
   278  // Args:
   279  //   - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use p2p.PeerScoringConfigNoOverride for production or when
   280  //     you don't want to override the default peer scoring config.
   281  //
   282  // Returns:
   283  // - NodeFixtureParameterOption: a function that can be passed to the NodeFixture function to enable peer scoring.
   284  func EnablePeerScoringWithOverride(override *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption {
   285  	return func(p *NodeFixtureParameters) {
   286  		p.PeerScoringEnabled = true
   287  		p.PeerScoringConfigOverride = override
   288  	}
   289  }
   290  
   291  func WithDefaultStreamHandler(handler network.StreamHandler) NodeFixtureParameterOption {
   292  	return func(p *NodeFixtureParameters) {
   293  		p.HandlerFunc = handler
   294  	}
   295  }
   296  
   297  func WithPeerManagerEnabled(cfg *p2pbuilderconfig.PeerManagerConfig, peerProvider p2p.PeersProvider) NodeFixtureParameterOption {
   298  	return func(p *NodeFixtureParameters) {
   299  		p.PeerManagerConfig = cfg
   300  		p.PeerProvider = peerProvider
   301  	}
   302  }
   303  
   304  func WithPreferredUnicasts(unicasts []protocols.ProtocolName) NodeFixtureParameterOption {
   305  	return func(p *NodeFixtureParameters) {
   306  		p.Unicasts = unicasts
   307  	}
   308  }
   309  
   310  func WithNetworkingPrivateKey(key crypto.PrivateKey) NodeFixtureParameterOption {
   311  	return func(p *NodeFixtureParameters) {
   312  		p.Key = key
   313  	}
   314  }
   315  
   316  func WithNetworkingAddress(address string) NodeFixtureParameterOption {
   317  	return func(p *NodeFixtureParameters) {
   318  		p.Address = address
   319  	}
   320  }
   321  
   322  func WithDHTOptions(opts ...dht.Option) NodeFixtureParameterOption {
   323  	return func(p *NodeFixtureParameters) {
   324  		p.DhtOptions = opts
   325  	}
   326  }
   327  
   328  func WithConnectionGater(connGater p2p.ConnectionGater) NodeFixtureParameterOption {
   329  	return func(p *NodeFixtureParameters) {
   330  		p.ConnGater = connGater
   331  	}
   332  }
   333  
   334  func WithConnectionManager(connManager connmgr.ConnManager) NodeFixtureParameterOption {
   335  	return func(p *NodeFixtureParameters) {
   336  		p.ConnManager = connManager
   337  	}
   338  }
   339  
   340  func WithRole(role flow.Role) NodeFixtureParameterOption {
   341  	return func(p *NodeFixtureParameters) {
   342  		p.Role = role
   343  	}
   344  }
   345  
   346  func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption {
   347  	return func(p *NodeFixtureParameters) {
   348  		p.PeerScoringConfigOverride = cfg
   349  	}
   350  }
   351  
   352  func WithLogger(logger zerolog.Logger) NodeFixtureParameterOption {
   353  	return func(p *NodeFixtureParameters) {
   354  		p.Logger = logger
   355  	}
   356  }
   357  
   358  func WithMetricsCollector(metrics module.NetworkMetrics) NodeFixtureParameterOption {
   359  	return func(p *NodeFixtureParameters) {
   360  		p.MetricsCfg.Metrics = metrics
   361  	}
   362  }
   363  
   364  // WithDefaultResourceManager sets the resource manager to nil, which will cause the node to use the default resource manager.
   365  // Otherwise, it uses the resource manager provided by the test (the infinite resource manager).
   366  func WithDefaultResourceManager() NodeFixtureParameterOption {
   367  	return func(p *NodeFixtureParameters) {
   368  		p.ResourceManager = nil
   369  	}
   370  }
   371  
   372  // WithResourceManager sets the resource manager to the provided resource manager.
   373  // Otherwise, it uses the resource manager provided by the test (the infinite resource manager).
   374  func WithResourceManager(resourceManager network.ResourceManager) NodeFixtureParameterOption {
   375  	return func(p *NodeFixtureParameters) {
   376  		p.ResourceManager = resourceManager
   377  	}
   378  }
   379  
   380  func WithUnicastHandlerFunc(handler network.StreamHandler) NodeFixtureParameterOption {
   381  	return func(p *NodeFixtureParameters) {
   382  		p.HandlerFunc = handler
   383  	}
   384  }
   385  
   386  // PeerManagerConfigFixture is a test fixture that sets the default config for the peer manager.
   387  func PeerManagerConfigFixture(opts ...func(*p2pbuilderconfig.PeerManagerConfig)) *p2pbuilderconfig.PeerManagerConfig {
   388  	cfg := &p2pbuilderconfig.PeerManagerConfig{
   389  		ConnectionPruning: true,
   390  		UpdateInterval:    1 * time.Second,
   391  		ConnectorFactory:  connection.DefaultLibp2pBackoffConnectorFactory(),
   392  	}
   393  	for _, opt := range opts {
   394  		opt(cfg)
   395  	}
   396  	return cfg
   397  }
   398  
   399  // WithZeroJitterAndZeroBackoff is a test fixture that sets the default config for the peer manager.
   400  // It uses a backoff connector with zero jitter and zero backoff.
   401  func WithZeroJitterAndZeroBackoff(t *testing.T) func(*p2pbuilderconfig.PeerManagerConfig) {
   402  	return func(cfg *p2pbuilderconfig.PeerManagerConfig) {
   403  		cfg.ConnectorFactory = func(host host.Host) (p2p.Connector, error) {
   404  			cacheSize := 100
   405  			dialTimeout := time.Minute * 2
   406  			backoff := discoveryBackoff.NewExponentialBackoff(1*time.Second, 1*time.Hour, func(_, _, _ time.Duration, _ *crand.Rand) time.Duration {
   407  				return 0 // no jitter
   408  			}, time.Second, 1, 0, crand.NewSource(crand.Int63()))
   409  			backoffConnector, err := discoveryBackoff.NewBackoffConnector(host, cacheSize, dialTimeout, backoff)
   410  			require.NoError(t, err)
   411  			return backoffConnector, nil
   412  		}
   413  	}
   414  }
   415  
   416  // NodesFixture is a test fixture that creates a number of libp2p nodes with the given callback function for stream handling.
   417  // It returns the nodes and their identities.
   418  func NodesFixture(t *testing.T,
   419  	sporkID flow.Identifier,
   420  	dhtPrefix string,
   421  	count int,
   422  	idProvider module.IdentityProvider,
   423  	opts ...NodeFixtureParameterOption) ([]p2p.LibP2PNode, flow.IdentityList) {
   424  	var nodes []p2p.LibP2PNode
   425  
   426  	// creating nodes
   427  	var identities flow.IdentityList
   428  	for i := 0; i < count; i++ {
   429  		// create a node on localhost with a random port assigned by the OS
   430  		node, identity := NodeFixture(t, sporkID, dhtPrefix, idProvider, opts...)
   431  		nodes = append(nodes, node)
   432  		identities = append(identities, &identity)
   433  	}
   434  
   435  	return nodes, identities
   436  }
   437  
   438  // StartNodes start all nodes in the input slice using the provided context, timing out if nodes are
   439  // not all Ready() before duration expires
   440  func StartNodes(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode) {
   441  	rdas := make([]module.ReadyDoneAware, 0, len(nodes))
   442  	for _, node := range nodes {
   443  		node.Start(ctx)
   444  		rdas = append(rdas, node)
   445  
   446  		if peerManager := node.PeerManagerComponent(); peerManager != nil {
   447  			// we need to start the peer manager post the node startup (if such component exists).
   448  			peerManager.Start(ctx)
   449  			rdas = append(rdas, peerManager)
   450  		}
   451  	}
   452  	for _, r := range rdas {
   453  		// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   454  		unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, r)
   455  	}
   456  }
   457  
   458  // StartNode start a single node using the provided context, timing out if nodes are not all Ready()
   459  // before duration expires, (i.e., 2 seconds).
   460  // Args:
   461  // - t: testing.T- the test object.
   462  // - ctx: context to use.
   463  // - node: node to start.
   464  func StartNode(t *testing.T, ctx irrecoverable.SignalerContext, node p2p.LibP2PNode) {
   465  	node.Start(ctx)
   466  	// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   467  	unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, node)
   468  }
   469  
   470  // StopNodes stops all nodes in the input slice using the provided cancel func, timing out if nodes are
   471  // not all Done() before duration expires (i.e., 5 seconds).
   472  // Args:
   473  // - t: testing.T- the test object.
   474  // - nodes: nodes to stop.
   475  // - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done.
   476  func StopNodes(t *testing.T, nodes []p2p.LibP2PNode, cancel context.CancelFunc) {
   477  	cancel()
   478  	for _, node := range nodes {
   479  		// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   480  		unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node)
   481  	}
   482  }
   483  
   484  // StopNode stops a single node using the provided cancel func, timing out if nodes are not all Done()
   485  // before duration expires, (i.e., 2 seconds).
   486  // Args:
   487  // - t: testing.T- the test object.
   488  // - node: node to stop.
   489  // - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done.
   490  func StopNode(t *testing.T, node p2p.LibP2PNode, cancel context.CancelFunc) {
   491  	cancel()
   492  	// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   493  	unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node)
   494  }
   495  
   496  // StreamHandlerFixture returns a stream handler that writes the received message to the given channel.
   497  func StreamHandlerFixture(t *testing.T) (func(s network.Stream), chan string) {
   498  	ch := make(chan string, 1) // channel to receive messages
   499  
   500  	return func(s network.Stream) {
   501  		rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
   502  		str, err := rw.ReadString('\n')
   503  		require.NoError(t, err)
   504  		ch <- str
   505  	}, ch
   506  }
   507  
   508  // LetNodesDiscoverEachOther connects all nodes to each other on the pubsub mesh.
   509  func LetNodesDiscoverEachOther(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, ids flow.IdentityList) {
   510  	for _, node := range nodes {
   511  		for i, other := range nodes {
   512  			if node == other {
   513  				continue
   514  			}
   515  			otherPInfo, err := utils.PeerAddressInfo(*ids[i])
   516  			require.NoError(t, err)
   517  			require.NoError(t, node.ConnectToPeer(ctx, otherPInfo))
   518  		}
   519  	}
   520  }
   521  
   522  // TryConnectionAndEnsureConnected tries connecting nodes to each other and ensures that the given nodes are connected to each other.
   523  // It fails the test if any of the nodes is not connected to any other node.
   524  func TryConnectionAndEnsureConnected(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) {
   525  	for _, node := range nodes {
   526  		for _, other := range nodes {
   527  			if node == other {
   528  				continue
   529  			}
   530  			require.NoError(t, node.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.ID())))
   531  			// the other node should be connected to this node
   532  			require.Equal(t, node.Host().Network().Connectedness(other.ID()), network.Connected)
   533  			// at least one connection should be established
   534  			require.True(t, len(node.Host().Network().ConnsToPeer(other.ID())) > 0)
   535  		}
   536  	}
   537  }
   538  
   539  // RequireConnectedEventually ensures eventually that the given nodes are already connected to each other.
   540  // It fails the test if any of the nodes is not connected to any other node.
   541  // Args:
   542  // - nodes: the nodes to check
   543  // - tick: the tick duration
   544  // - timeout: the timeout duration
   545  func RequireConnectedEventually(t *testing.T, nodes []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) {
   546  	require.Eventually(t, func() bool {
   547  		for _, node := range nodes {
   548  			for _, other := range nodes {
   549  				if node == other {
   550  					continue
   551  				}
   552  				if node.Host().Network().Connectedness(other.ID()) != network.Connected {
   553  					return false
   554  				}
   555  				if len(node.Host().Network().ConnsToPeer(other.ID())) == 0 {
   556  					return false
   557  				}
   558  			}
   559  		}
   560  		return true
   561  	}, timeout, tick)
   562  }
   563  
   564  // RequireEventuallyNotConnected ensures eventually that the given groups of nodes are not connected to each other.
   565  // It fails the test if any of the nodes from groupA is connected to any of the nodes from groupB.
   566  // Args:
   567  // - groupA: the first group of nodes
   568  // - groupB: the second group of nodes
   569  // - tick: the tick duration
   570  // - timeout: the timeout duration
   571  func RequireEventuallyNotConnected(t *testing.T, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) {
   572  	require.Eventually(t, func() bool {
   573  		for _, node := range groupA {
   574  			for _, other := range groupB {
   575  				if node.Host().Network().Connectedness(other.ID()) == network.Connected {
   576  					return false
   577  				}
   578  				if len(node.Host().Network().ConnsToPeer(other.ID())) > 0 {
   579  					return false
   580  				}
   581  			}
   582  		}
   583  		return true
   584  	}, timeout, tick)
   585  }
   586  
   587  // EnsureStreamCreationInBothDirections ensure that between each pair of nodes in the given list, a stream is created in both directions.
   588  func EnsureStreamCreationInBothDirections(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) {
   589  	for _, this := range nodes {
   590  		for _, other := range nodes {
   591  			if this == other {
   592  				continue
   593  			}
   594  			// stream creation should pass without error
   595  			err := this.OpenAndWriteOnStream(ctx, other.ID(), t.Name(), func(stream network.Stream) error {
   596  				// do nothing
   597  				require.NotNil(t, stream)
   598  				return nil
   599  			})
   600  			require.NoError(t, err)
   601  
   602  		}
   603  	}
   604  }
   605  
   606  // EnsurePubsubMessageExchange ensures that the given connected nodes exchange the given message on the given channel through pubsub.
   607  // Args:
   608  //   - nodes: the nodes to exchange messages
   609  //   - ctx: the context- the test will fail if the context expires.
   610  //   - topic: the topic to exchange messages on
   611  //   - count: the number of messages to exchange from each node.
   612  //   - messageFactory: a function that creates a unique message to be published by the node.
   613  //     The function should return a different message each time it is called.
   614  //
   615  // Note-1: this function assumes a timeout of 5 seconds for each message to be received.
   616  // Note-2: TryConnectionAndEnsureConnected() must be called to connect all nodes before calling this function.
   617  func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) {
   618  	subs := make([]p2p.Subscription, len(nodes))
   619  	for i, node := range nodes {
   620  		ps, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   621  		require.NoError(t, err)
   622  		subs[i] = ps
   623  	}
   624  
   625  	// let subscriptions propagate
   626  	time.Sleep(1 * time.Second)
   627  
   628  	for _, node := range nodes {
   629  		for i := 0; i < count; i++ {
   630  			// creates a unique message to be published by the node
   631  			payload := messageFactory()
   632  			outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{unittest.IdentifierFixture()},
   633  				topic,
   634  				payload,
   635  				unittest.NetworkCodec().Encode,
   636  				message.ProtocolTypePubSub)
   637  			require.NoError(t, err)
   638  			require.NoError(t, node.Publish(ctx, outgoingMessageScope))
   639  
   640  			// wait for the message to be received by all nodes
   641  			ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   642  			expectedReceivedData, err := outgoingMessageScope.Proto().Marshal()
   643  			require.NoError(t, err)
   644  			p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, subs)
   645  			cancel()
   646  		}
   647  	}
   648  }
   649  
   650  // EnsurePubsubMessageExchangeFromNode ensures that the given node exchanges the given message on the given channel through pubsub with the other nodes.
   651  // Args:
   652  //   - node: the node to exchange messages
   653  //
   654  // - ctx: the context- the test will fail if the context expires.
   655  // - sender: the node that sends the message to the other node.
   656  // - receiverNode: the node that receives the message from the other node.
   657  // - receiverIdentifier: the identifier of the receiver node.
   658  // - topic: the topic to exchange messages on.
   659  // - count: the number of messages to exchange from `sender` to `receiver`.
   660  // - messageFactory: a function that creates a unique message to be published by the node.
   661  func EnsurePubsubMessageExchangeFromNode(t *testing.T,
   662  	ctx context.Context,
   663  	sender p2p.LibP2PNode,
   664  	receiverNode p2p.LibP2PNode,
   665  	receiverIdentifier flow.Identifier,
   666  	topic channels.Topic,
   667  	count int,
   668  	messageFactory func() interface{}) {
   669  	_, err := sender.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   670  	require.NoError(t, err)
   671  
   672  	toSub, err := receiverNode.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   673  	require.NoError(t, err)
   674  
   675  	// let subscriptions propagate
   676  	time.Sleep(1 * time.Second)
   677  
   678  	for i := 0; i < count; i++ {
   679  		// creates a unique message to be published by the node
   680  		payload := messageFactory()
   681  		outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{receiverIdentifier},
   682  			topic,
   683  			payload,
   684  			unittest.NetworkCodec().Encode,
   685  			message.ProtocolTypePubSub)
   686  		require.NoError(t, err)
   687  		require.NoError(t, sender.Publish(ctx, outgoingMessageScope))
   688  
   689  		// wait for the message to be received by all nodes
   690  		ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   691  		expectedReceivedData, err := outgoingMessageScope.Proto().Marshal()
   692  		require.NoError(t, err)
   693  		p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, []p2p.Subscription{toSub})
   694  		cancel()
   695  	}
   696  }
   697  
   698  // EnsureNotConnectedBetweenGroups ensures no connection exists between the given groups of nodes.
   699  func EnsureNotConnectedBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode) {
   700  	// ensure no connection from group A to group B
   701  	p2pfixtures.EnsureNotConnected(t, ctx, groupA, groupB)
   702  	// ensure no connection from group B to group A
   703  	p2pfixtures.EnsureNotConnected(t, ctx, groupB, groupA)
   704  }
   705  
   706  // EnsureNoPubsubMessageExchange ensures that the no pubsub message is exchanged "from" the given nodes "to" the given nodes.
   707  // Args:
   708  //   - from: the nodes that send messages to the other group but their message must not be received by the other group.
   709  //
   710  // - to: the nodes that are the target of the messages sent by the other group ("from") but must not receive any message from them.
   711  // - topic: the topic to exchange messages on.
   712  // - count: the number of messages to exchange from each node.
   713  // - messageFactory: a function that creates a unique message to be published by the node.
   714  func EnsureNoPubsubMessageExchange(t *testing.T,
   715  	ctx context.Context,
   716  	from []p2p.LibP2PNode,
   717  	to []p2p.LibP2PNode,
   718  	toIdentifiers flow.IdentifierList,
   719  	topic channels.Topic,
   720  	count int,
   721  	messageFactory func() interface{}) {
   722  	subs := make([]p2p.Subscription, len(to))
   723  	tv := validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())
   724  	var err error
   725  	for _, node := range from {
   726  		_, err = node.Subscribe(topic, tv)
   727  		require.NoError(t, err)
   728  	}
   729  
   730  	for i, node := range to {
   731  		s, err := node.Subscribe(topic, tv)
   732  		require.NoError(t, err)
   733  		subs[i] = s
   734  	}
   735  
   736  	// let subscriptions propagate
   737  	time.Sleep(1 * time.Second)
   738  
   739  	wg := &sync.WaitGroup{}
   740  	for _, node := range from {
   741  		node := node // capture range variable
   742  		for i := 0; i < count; i++ {
   743  			wg.Add(1)
   744  			go func() {
   745  				// creates a unique message to be published by the node.
   746  
   747  				payload := messageFactory()
   748  				outgoingMessageScope, err := message.NewOutgoingScope(toIdentifiers, topic, payload, unittest.NetworkCodec().Encode, message.ProtocolTypePubSub)
   749  				require.NoError(t, err)
   750  				require.NoError(t, node.Publish(ctx, outgoingMessageScope))
   751  
   752  				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   753  				p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs)
   754  				cancel()
   755  				wg.Done()
   756  			}()
   757  		}
   758  	}
   759  
   760  	// we wait for 5 seconds at most for the messages to be exchanged, hence we wait for a total of 6 seconds here to ensure
   761  	// that the goroutines are done in a timely manner.
   762  	unittest.RequireReturnsBefore(t, wg.Wait, 6*time.Second, "timed out waiting for messages to be exchanged")
   763  }
   764  
   765  // EnsureNoPubsubExchangeBetweenGroups ensures that no pubsub message is exchanged between the given groups of nodes.
   766  // Args:
   767  // - t: *testing.T instance
   768  // - ctx: context.Context instance
   769  // - groupANodes: first group of nodes- no message should be exchanged from any node of this group to the other group.
   770  // - groupAIdentifiers: identifiers of the nodes in the first group.
   771  // - groupBNodes: second group of nodes- no message should be exchanged from any node of this group to the other group.
   772  // - groupBIdentifiers: identifiers of the nodes in the second group.
   773  // - topic: pubsub topic- no message should be exchanged on this topic.
   774  // - count: number of messages to be exchanged- no message should be exchanged.
   775  // - messageFactory: function to create a unique message to be published by the node.
   776  func EnsureNoPubsubExchangeBetweenGroups(t *testing.T,
   777  	ctx context.Context,
   778  	groupANodes []p2p.LibP2PNode,
   779  	groupAIdentifiers flow.IdentifierList,
   780  	groupBNodes []p2p.LibP2PNode,
   781  	groupBIdentifiers flow.IdentifierList,
   782  	topic channels.Topic,
   783  	count int,
   784  	messageFactory func() interface{}) {
   785  	// ensure no message exchange from group A to group B
   786  	EnsureNoPubsubMessageExchange(t, ctx, groupANodes, groupBNodes, groupBIdentifiers, topic, count, messageFactory)
   787  	// ensure no message exchange from group B to group A
   788  	EnsureNoPubsubMessageExchange(t, ctx, groupBNodes, groupANodes, groupAIdentifiers, topic, count, messageFactory)
   789  }
   790  
   791  // PeerIdSliceFixture returns a slice of random peer IDs for testing.
   792  // peer ID is the identifier of a node on the libp2p network.
   793  // Args:
   794  // - t: *testing.T instance
   795  // - n: number of peer IDs to generate
   796  // Returns:
   797  // - peer.IDSlice: slice of peer IDs
   798  func PeerIdSliceFixture(t *testing.T, n int) peer.IDSlice {
   799  	ids := make([]peer.ID, n)
   800  	for i := 0; i < n; i++ {
   801  		ids[i] = unittest.PeerIdFixture(t)
   802  	}
   803  	return ids
   804  }
   805  
   806  // NewConnectionGater creates a new connection gater for testing with given allow listing filter.
   807  func NewConnectionGater(idProvider module.IdentityProvider, allowListFilter p2p.PeerFilter) p2p.ConnectionGater {
   808  	filters := []p2p.PeerFilter{allowListFilter}
   809  	return connection.NewConnGater(unittest.Logger(), idProvider, connection.WithOnInterceptPeerDialFilters(filters), connection.WithOnInterceptSecuredFilters(filters))
   810  }
   811  
   812  // MockInspectorNotificationDistributorReadyDoneAware mocks the Ready and Done methods of the distributor to return a channel that is already closed,
   813  // so that the distributor is considered ready and done when the test needs.
   814  func MockInspectorNotificationDistributorReadyDoneAware(d *mockp2p.GossipSubInspectorNotificationDistributor) {
   815  	d.On("Start", mockery.Anything).Return().Maybe()
   816  	d.On("Ready").Return(func() <-chan struct{} {
   817  		ch := make(chan struct{})
   818  		close(ch)
   819  		return ch
   820  	}()).Maybe()
   821  	d.On("Done").Return(func() <-chan struct{} {
   822  		ch := make(chan struct{})
   823  		close(ch)
   824  		return ch
   825  	}()).Maybe()
   826  }
   827  
   828  // MockScoringRegistrySubscriptionValidatorReadyDoneAware mocks the Ready and Done methods of the subscription validator to return a channel that is already closed,
   829  // so that the distributor is considered ready and done when the test needs.
   830  func MockScoringRegistrySubscriptionValidatorReadyDoneAware(s *mockp2p.SubscriptionValidator) {
   831  	s.On("Start", mockery.Anything).Return().Maybe()
   832  	s.On("Ready").Return(func() <-chan struct{} {
   833  		ch := make(chan struct{})
   834  		close(ch)
   835  		return ch
   836  	}()).Maybe()
   837  	s.On("Done").Return(func() <-chan struct{} {
   838  		ch := make(chan struct{})
   839  		close(ch)
   840  		return ch
   841  	}()).Maybe()
   842  }
   843  
   844  // GossipSubRpcFixtures returns a slice of random message IDs for testing.
   845  // Args:
   846  // - t: *testing.T instance
   847  // - count: number of message IDs to generate
   848  // Returns:
   849  // - []string: slice of message IDs.
   850  // Note: evey other parameters that are not explicitly set are set to 10. This function suites applications that need to generate a large number of RPC messages with
   851  // filled random data. For a better control over the generated data, use GossipSubRpcFixture.
   852  func GossipSubRpcFixtures(t *testing.T, count int) []*pb.RPC {
   853  	c := 10
   854  	rpcs := make([]*pb.RPC, 0)
   855  	for i := 0; i < count; i++ {
   856  		rpcs = append(rpcs,
   857  			GossipSubRpcFixture(t,
   858  				c,
   859  				WithPrune(c, GossipSubTopicIdFixture()),
   860  				WithGraft(c, GossipSubTopicIdFixture()),
   861  				WithIHave(c, c, GossipSubTopicIdFixture()),
   862  				WithIWant(c, c)))
   863  	}
   864  	return rpcs
   865  }
   866  
   867  // GossipSubRpcFixture returns a random GossipSub RPC message. An RPC message is the GossipSub-level message that is exchanged between nodes.
   868  // It contains individual messages, subscriptions, and control messages.
   869  // Args:
   870  // - t: *testing.T instance
   871  // - msgCnt: number of messages to generate
   872  // - opts: options to customize control messages (not having an option means no control message).
   873  // Returns:
   874  // - *pb.RPC: a random GossipSub RPC message
   875  // Note: the message is not signed.
   876  func GossipSubRpcFixture(t *testing.T, msgCnt int, opts ...GossipSubCtrlOption) *pb.RPC {
   877  	rand.Seed(uint64(time.Now().UnixNano()))
   878  
   879  	// creates a random number of Subscriptions
   880  	numSubscriptions := 10
   881  	topicIdSize := 10
   882  	subscriptions := make([]*pb.RPC_SubOpts, numSubscriptions)
   883  	for i := 0; i < numSubscriptions; i++ {
   884  		subscribe := rand.Intn(2) == 1
   885  		topicID := unittest.RandomStringFixture(t, topicIdSize)
   886  		subscriptions[i] = &pb.RPC_SubOpts{
   887  			Subscribe: &subscribe,
   888  			Topicid:   &topicID,
   889  		}
   890  	}
   891  
   892  	// generates random messages
   893  	messages := make([]*pb.Message, msgCnt)
   894  	for i := 0; i < msgCnt; i++ {
   895  		messages[i] = GossipSubMessageFixture(t)
   896  	}
   897  
   898  	// Create a Control Message
   899  	controlMessages := GossipSubCtrlFixture(opts...)
   900  
   901  	// Create the RPC
   902  	rpc := &pb.RPC{
   903  		Subscriptions: subscriptions,
   904  		Publish:       messages,
   905  		Control:       controlMessages,
   906  	}
   907  
   908  	return rpc
   909  }
   910  
   911  type GossipSubCtrlOption func(*pb.ControlMessage)
   912  
   913  // GossipSubCtrlFixture returns a ControlMessage with the given options.
   914  func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pb.ControlMessage {
   915  	msg := &pb.ControlMessage{}
   916  	for _, opt := range opts {
   917  		opt(msg)
   918  	}
   919  	return msg
   920  }
   921  
   922  // WithIHave adds iHave control messages of the given size and number to the control message.
   923  func WithIHave(msgCount, msgIDCount int, topicId string) GossipSubCtrlOption {
   924  	return func(msg *pb.ControlMessage) {
   925  		iHaves := make([]*pb.ControlIHave, msgCount)
   926  		for i := 0; i < msgCount; i++ {
   927  			iHaves[i] = &pb.ControlIHave{
   928  				TopicID:    &topicId,
   929  				MessageIDs: GossipSubMessageIdsFixture(msgIDCount),
   930  			}
   931  		}
   932  		msg.Ihave = iHaves
   933  	}
   934  }
   935  
   936  // WithIHaveMessageIDs adds iHave control messages with the given message IDs to the control message.
   937  func WithIHaveMessageIDs(msgIDs []string, topicId string) GossipSubCtrlOption {
   938  	return func(msg *pb.ControlMessage) {
   939  		msg.Ihave = []*pb.ControlIHave{
   940  			{
   941  				TopicID:    &topicId,
   942  				MessageIDs: msgIDs,
   943  			},
   944  		}
   945  	}
   946  }
   947  
   948  // WithIWant adds iWant control messages of the given size and number to the control message.
   949  // The message IDs are generated randomly.
   950  // Args:
   951  //
   952  //	msgCount: number of iWant messages to add.
   953  //	msgIdsPerIWant: number of message IDs to add to each iWant message.
   954  //
   955  // Returns:
   956  // A GossipSubCtrlOption that adds iWant messages to the control message.
   957  // Example: WithIWant(2, 3) will add 2 iWant messages, each with 3 message IDs.
   958  func WithIWant(iWantCount int, msgIdsPerIWant int) GossipSubCtrlOption {
   959  	return func(msg *pb.ControlMessage) {
   960  		iWants := make([]*pb.ControlIWant, iWantCount)
   961  		for i := 0; i < iWantCount; i++ {
   962  			iWants[i] = &pb.ControlIWant{
   963  				MessageIDs: GossipSubMessageIdsFixture(msgIdsPerIWant),
   964  			}
   965  		}
   966  		msg.Iwant = iWants
   967  	}
   968  }
   969  
   970  // WithGraft adds GRAFT control messages with given topicID to the control message.
   971  func WithGraft(msgCount int, topicId string) GossipSubCtrlOption {
   972  	return func(msg *pb.ControlMessage) {
   973  		grafts := make([]*pb.ControlGraft, msgCount)
   974  		for i := 0; i < msgCount; i++ {
   975  			grafts[i] = &pb.ControlGraft{
   976  				TopicID: &topicId,
   977  			}
   978  		}
   979  		msg.Graft = grafts
   980  	}
   981  }
   982  
   983  // WithPrune adds PRUNE control messages with given topicID to the control message.
   984  func WithPrune(msgCount int, topicId string) GossipSubCtrlOption {
   985  	return func(msg *pb.ControlMessage) {
   986  		prunes := make([]*pb.ControlPrune, msgCount)
   987  		for i := 0; i < msgCount; i++ {
   988  			prunes[i] = &pb.ControlPrune{
   989  				TopicID: &topicId,
   990  			}
   991  		}
   992  		msg.Prune = prunes
   993  	}
   994  }
   995  
   996  // gossipSubMessageIdFixture returns a random gossipSub message ID.
   997  func gossipSubMessageIdFixture() string {
   998  	// TODO: messageID length should be a parameter.
   999  	return unittest.GenerateRandomStringWithLen(messageIDFixtureLen)
  1000  }
  1001  
  1002  // GossipSubTopicIdFixture returns a random gossipSub topic ID.
  1003  func GossipSubTopicIdFixture() string {
  1004  	// TODO: topicID length should be a parameter.
  1005  	return unittest.GenerateRandomStringWithLen(topicIDFixtureLen)
  1006  }
  1007  
  1008  // GossipSubMessageIdsFixture returns a slice of random gossipSub message IDs of the given size.
  1009  func GossipSubMessageIdsFixture(count int) []string {
  1010  	msgIds := make([]string, count)
  1011  	for i := 0; i < count; i++ {
  1012  		msgIds[i] = gossipSubMessageIdFixture()
  1013  	}
  1014  	return msgIds
  1015  }
  1016  
  1017  // GossipSubMessageFixture returns a random gossipSub message; this contains a single pubsub message that is exchanged between nodes.
  1018  // The message is generated randomly.
  1019  // Args:
  1020  // - t: *testing.T instance
  1021  // Returns:
  1022  // - *pb.Message: a random gossipSub message
  1023  // Note: the message is not signed.
  1024  func GossipSubMessageFixture(t *testing.T) *pb.Message {
  1025  	byteSize := 100
  1026  	topic := unittest.RandomStringFixture(t, byteSize)
  1027  	return &pb.Message{
  1028  		From:      unittest.RandomBytes(byteSize),
  1029  		Data:      unittest.RandomBytes(byteSize),
  1030  		Seqno:     unittest.RandomBytes(byteSize),
  1031  		Topic:     &topic,
  1032  		Signature: unittest.RandomBytes(byteSize),
  1033  		Key:       unittest.RandomBytes(byteSize),
  1034  	}
  1035  }