github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/test/fixtures.go (about)

     1  package p2ptest
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	crand "math/rand"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	dht "github.com/libp2p/go-libp2p-kad-dht"
    12  	pb "github.com/libp2p/go-libp2p-pubsub/pb"
    13  	"github.com/libp2p/go-libp2p/core/connmgr"
    14  	"github.com/libp2p/go-libp2p/core/host"
    15  	"github.com/libp2p/go-libp2p/core/network"
    16  	"github.com/libp2p/go-libp2p/core/peer"
    17  	"github.com/libp2p/go-libp2p/core/protocol"
    18  	"github.com/libp2p/go-libp2p/core/routing"
    19  	discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff"
    20  	"github.com/onflow/crypto"
    21  	"github.com/rs/zerolog"
    22  	"github.com/stretchr/testify/require"
    23  	"golang.org/x/exp/rand"
    24  
    25  	"github.com/onflow/flow-go/config"
    26  	"github.com/onflow/flow-go/model/flow"
    27  	"github.com/onflow/flow-go/module"
    28  	"github.com/onflow/flow-go/module/irrecoverable"
    29  	"github.com/onflow/flow-go/module/metrics"
    30  	flownet "github.com/onflow/flow-go/network"
    31  	"github.com/onflow/flow-go/network/channels"
    32  	"github.com/onflow/flow-go/network/internal/p2pfixtures"
    33  	"github.com/onflow/flow-go/network/message"
    34  	"github.com/onflow/flow-go/network/p2p"
    35  	p2pbuilder "github.com/onflow/flow-go/network/p2p/builder"
    36  	p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config"
    37  	"github.com/onflow/flow-go/network/p2p/connection"
    38  	p2pdht "github.com/onflow/flow-go/network/p2p/dht"
    39  	"github.com/onflow/flow-go/network/p2p/unicast/protocols"
    40  	"github.com/onflow/flow-go/network/p2p/utils"
    41  	validator "github.com/onflow/flow-go/network/validator/pubsub"
    42  	"github.com/onflow/flow-go/utils/logging"
    43  	"github.com/onflow/flow-go/utils/unittest"
    44  )
    45  
    46  const (
    47  	// libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the
    48  	// timeout has been selected to be large enough to allow for the node to start up on a CI even when
    49  	// the test is run in parallel with other tests. Hence, no further increase of the timeout is
    50  	// expected to be necessary. Any failure to start a node within this timeout is likely to be
    51  	// caused by a bug in the code.
    52  	libp2pNodeStartupTimeout = 10 * time.Second
    53  	// libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the
    54  	// timeout has been selected to be large enough to allow for the node to start up on a CI even when
    55  	// the test is run in parallel with other tests. Hence, no further increase of the timeout is
    56  	// expected to be necessary. Any failure to start a node within this timeout is likely to be
    57  	// caused by a bug in the code.
    58  	libp2pNodeShutdownTimeout = 10 * time.Second
    59  
    60  	// topicIDFixtureLen is the length of the topic ID fixture for testing.
    61  	topicIDFixtureLen = 10
    62  	// messageIDFixtureLen is the length of the message ID fixture for testing.
    63  	messageIDFixtureLen = 10
    64  )
    65  
    66  // NetworkingKeyFixtures is a test helper that generates a ECDSA flow key pair.
    67  func NetworkingKeyFixtures(t *testing.T) crypto.PrivateKey {
    68  	seed := unittest.SeedFixture(48)
    69  	key, err := crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed)
    70  	require.NoError(t, err)
    71  	return key
    72  }
    73  
    74  // NodeFixture is a test fixture that creates a single libp2p node with the given key, spork id, and options.
    75  // It returns the node and its identity.
    76  func NodeFixture(t *testing.T,
    77  	sporkID flow.Identifier,
    78  	dhtPrefix string,
    79  	idProvider module.IdentityProvider,
    80  	opts ...NodeFixtureParameterOption) (p2p.LibP2PNode, flow.Identity) {
    81  
    82  	defaultFlowConfig, err := config.DefaultConfig()
    83  	require.NoError(t, err)
    84  	require.NotNil(t, idProvider)
    85  	connectionGater := NewConnectionGater(idProvider, func(p peer.ID) error {
    86  		return nil
    87  	})
    88  	require.NotNil(t, connectionGater)
    89  
    90  	parameters := &NodeFixtureParameters{
    91  		NetworkingType: flownet.PrivateNetwork,
    92  		HandlerFunc:    func(network.Stream) {},
    93  		Unicasts:       nil,
    94  		Key:            NetworkingKeyFixtures(t),
    95  		Address:        unittest.DefaultAddress,
    96  		Logger:         unittest.Logger().Level(zerolog.WarnLevel),
    97  		Role:           flow.RoleCollection,
    98  		IdProvider:     idProvider,
    99  		MetricsCfg: &p2pbuilderconfig.MetricsConfig{
   100  			HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(),
   101  			Metrics:          metrics.NewNoopCollector(),
   102  		},
   103  		ResourceManager:   &network.NullResourceManager{},
   104  		ConnGater:         connectionGater,
   105  		PeerManagerConfig: PeerManagerConfigFixture(), // disabled by default
   106  		FlowConfig:        defaultFlowConfig,
   107  	}
   108  
   109  	for _, opt := range opts {
   110  		opt(parameters)
   111  	}
   112  
   113  	identity := unittest.IdentityFixture(unittest.WithNetworkingKey(parameters.Key.PublicKey()),
   114  		unittest.WithAddress(parameters.Address),
   115  		unittest.WithRole(parameters.Role))
   116  
   117  	logger := parameters.Logger.With().Hex("node_id", logging.ID(identity.NodeID)).Logger()
   118  
   119  	connManager, err := connection.NewConnManager(logger, parameters.MetricsCfg.Metrics, &parameters.FlowConfig.NetworkConfig.ConnectionManager)
   120  	require.NoError(t, err)
   121  
   122  	builder := p2pbuilder.NewNodeBuilder(
   123  		logger,
   124  		&parameters.FlowConfig.NetworkConfig.GossipSub,
   125  		parameters.MetricsCfg,
   126  		parameters.NetworkingType,
   127  		parameters.Address,
   128  		parameters.Key,
   129  		sporkID,
   130  		parameters.IdProvider,
   131  		&parameters.FlowConfig.NetworkConfig.ResourceManager,
   132  		parameters.PeerManagerConfig,
   133  		&p2p.DisallowListCacheConfig{
   134  			MaxSize: uint32(1000),
   135  			Metrics: metrics.NewNoopCollector(),
   136  		},
   137  		&p2pbuilderconfig.UnicastConfig{
   138  			Unicast:                parameters.FlowConfig.NetworkConfig.Unicast,
   139  			RateLimiterDistributor: parameters.UnicastRateLimiterDistributor,
   140  		}).
   141  		SetConnectionManager(connManager).
   142  		SetResourceManager(parameters.ResourceManager)
   143  
   144  	if parameters.DhtOptions != nil && (parameters.Role != flow.RoleAccess && parameters.Role != flow.RoleExecution) {
   145  		require.Fail(t, "DHT should not be enabled for non-access and non-execution nodes")
   146  	}
   147  
   148  	if parameters.Role == flow.RoleAccess || parameters.Role == flow.RoleExecution {
   149  		// Only access and execution nodes need to run DHT;
   150  		// Access nodes and execution nodes need DHT to run a blob service.
   151  		// Moreover, access nodes run a DHT to let un-staked (public) access nodes find each other on the public network.
   152  		builder.SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) {
   153  			return p2pdht.NewDHT(ctx,
   154  				host,
   155  				protocol.ID(protocols.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix),
   156  				logger,
   157  				parameters.MetricsCfg.Metrics,
   158  				parameters.DhtOptions...)
   159  		})
   160  	}
   161  
   162  	if parameters.GossipSubRpcInspectorFactory != nil {
   163  		builder.OverrideDefaultRpcInspectorFactory(parameters.GossipSubRpcInspectorFactory)
   164  	}
   165  
   166  	if parameters.ResourceManager != nil {
   167  		builder.SetResourceManager(parameters.ResourceManager)
   168  	}
   169  
   170  	if parameters.ConnGater != nil {
   171  		builder.SetConnectionGater(parameters.ConnGater)
   172  	}
   173  
   174  	if parameters.PeerScoringEnabled {
   175  		builder.OverrideGossipSubScoringConfig(parameters.PeerScoringConfigOverride)
   176  	}
   177  
   178  	if parameters.GossipSubFactory != nil && parameters.GossipSubConfig != nil {
   179  		builder.OverrideGossipSubFactory(parameters.GossipSubFactory, parameters.GossipSubConfig)
   180  	}
   181  
   182  	if parameters.ConnManager != nil {
   183  		builder.SetConnectionManager(parameters.ConnManager)
   184  	}
   185  
   186  	n, err := builder.Build()
   187  	require.NoError(t, err)
   188  
   189  	if parameters.HandlerFunc != nil {
   190  		err = n.WithDefaultUnicastProtocol(parameters.HandlerFunc, parameters.Unicasts)
   191  		require.NoError(t, err)
   192  	}
   193  
   194  	// get the actual IP and port that have been assigned by the subsystem
   195  	ip, port, err := n.GetIPPort()
   196  	require.NoError(t, err)
   197  	identity.Address = ip + ":" + port
   198  
   199  	if parameters.PeerProvider != nil {
   200  		n.WithPeersProvider(parameters.PeerProvider)
   201  	}
   202  
   203  	return n, *identity
   204  }
   205  
   206  // RegisterPeerProviders registers the peer provider for all the nodes in the input slice.
   207  // All node ids are registered as the peers provider for all the nodes.
   208  // This means that every node will be connected to every other node by the peer manager.
   209  // This is useful for suppressing the "peer provider not set" verbose warning logs in tests scenarios where
   210  // it is desirable to have all nodes connected to each other.
   211  // Args:
   212  // - t: testing.T- the test object; not used, but included in the signature to defensively prevent misuse of the test utility in production.
   213  // - nodes: nodes to register the peer provider for, each node will be connected to all other nodes.
   214  func RegisterPeerProviders(_ *testing.T, nodes []p2p.LibP2PNode) {
   215  	ids := peer.IDSlice{}
   216  	for _, node := range nodes {
   217  		ids = append(ids, node.ID())
   218  	}
   219  	for _, node := range nodes {
   220  		node.WithPeersProvider(func() peer.IDSlice {
   221  			return ids
   222  		})
   223  	}
   224  }
   225  
   226  type NodeFixtureParameterOption func(*NodeFixtureParameters)
   227  
   228  type NodeFixtureParameters struct {
   229  	HandlerFunc                   network.StreamHandler
   230  	NetworkingType                flownet.NetworkingType
   231  	Unicasts                      []protocols.ProtocolName
   232  	Key                           crypto.PrivateKey
   233  	Address                       string
   234  	DhtOptions                    []dht.Option
   235  	Role                          flow.Role
   236  	Logger                        zerolog.Logger
   237  	PeerScoringEnabled            bool
   238  	IdProvider                    module.IdentityProvider
   239  	PeerScoringConfigOverride     *p2p.PeerScoringConfigOverride
   240  	PeerManagerConfig             *p2pbuilderconfig.PeerManagerConfig
   241  	PeerProvider                  p2p.PeersProvider // peer manager parameter
   242  	ConnGater                     p2p.ConnectionGater
   243  	ConnManager                   connmgr.ConnManager
   244  	GossipSubFactory              p2p.GossipSubFactoryFunc
   245  	GossipSubConfig               p2p.GossipSubAdapterConfigFunc
   246  	MetricsCfg                    *p2pbuilderconfig.MetricsConfig
   247  	ResourceManager               network.ResourceManager
   248  	GossipSubRpcInspectorFactory  p2p.GossipSubRpcInspectorFactoryFunc
   249  	FlowConfig                    *config.FlowConfig
   250  	UnicastRateLimiterDistributor p2p.UnicastRateLimiterDistributor
   251  }
   252  
   253  func WithUnicastRateLimitDistributor(distributor p2p.UnicastRateLimiterDistributor) NodeFixtureParameterOption {
   254  	return func(p *NodeFixtureParameters) {
   255  		p.UnicastRateLimiterDistributor = distributor
   256  	}
   257  }
   258  
   259  func OverrideGossipSubRpcInspectorFactory(factory p2p.GossipSubRpcInspectorFactoryFunc) NodeFixtureParameterOption {
   260  	return func(p *NodeFixtureParameters) {
   261  		p.GossipSubRpcInspectorFactory = factory
   262  	}
   263  }
   264  
   265  func OverrideFlowConfig(cfg *config.FlowConfig) NodeFixtureParameterOption {
   266  	return func(p *NodeFixtureParameters) {
   267  		p.FlowConfig = cfg
   268  	}
   269  }
   270  
   271  // EnablePeerScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override.
   272  // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config.
   273  // Anything that is left to nil or zero value in the override will be ignored and the default value will be used.
   274  // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing.
   275  // Default Use Tip: use p2p.PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override.
   276  // Args:
   277  //   - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use p2p.PeerScoringConfigNoOverride for production or when
   278  //     you don't want to override the default peer scoring config.
   279  //
   280  // Returns:
   281  // - NodeFixtureParameterOption: a function that can be passed to the NodeFixture function to enable peer scoring.
   282  func EnablePeerScoringWithOverride(override *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption {
   283  	return func(p *NodeFixtureParameters) {
   284  		p.PeerScoringEnabled = true
   285  		p.PeerScoringConfigOverride = override
   286  	}
   287  }
   288  
   289  func WithDefaultStreamHandler(handler network.StreamHandler) NodeFixtureParameterOption {
   290  	return func(p *NodeFixtureParameters) {
   291  		p.HandlerFunc = handler
   292  	}
   293  }
   294  
   295  func WithPeerManagerEnabled(cfg *p2pbuilderconfig.PeerManagerConfig, peerProvider p2p.PeersProvider) NodeFixtureParameterOption {
   296  	return func(p *NodeFixtureParameters) {
   297  		p.PeerManagerConfig = cfg
   298  		p.PeerProvider = peerProvider
   299  	}
   300  }
   301  
   302  func WithPreferredUnicasts(unicasts []protocols.ProtocolName) NodeFixtureParameterOption {
   303  	return func(p *NodeFixtureParameters) {
   304  		p.Unicasts = unicasts
   305  	}
   306  }
   307  
   308  func WithNetworkingPrivateKey(key crypto.PrivateKey) NodeFixtureParameterOption {
   309  	return func(p *NodeFixtureParameters) {
   310  		p.Key = key
   311  	}
   312  }
   313  
   314  func WithNetworkingAddress(address string) NodeFixtureParameterOption {
   315  	return func(p *NodeFixtureParameters) {
   316  		p.Address = address
   317  	}
   318  }
   319  
   320  func WithDHTOptions(opts ...dht.Option) NodeFixtureParameterOption {
   321  	return func(p *NodeFixtureParameters) {
   322  		p.DhtOptions = opts
   323  	}
   324  }
   325  
   326  func WithConnectionGater(connGater p2p.ConnectionGater) NodeFixtureParameterOption {
   327  	return func(p *NodeFixtureParameters) {
   328  		p.ConnGater = connGater
   329  	}
   330  }
   331  
   332  func WithConnectionManager(connManager connmgr.ConnManager) NodeFixtureParameterOption {
   333  	return func(p *NodeFixtureParameters) {
   334  		p.ConnManager = connManager
   335  	}
   336  }
   337  
   338  func WithRole(role flow.Role) NodeFixtureParameterOption {
   339  	return func(p *NodeFixtureParameters) {
   340  		p.Role = role
   341  	}
   342  }
   343  
   344  func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption {
   345  	return func(p *NodeFixtureParameters) {
   346  		p.PeerScoringConfigOverride = cfg
   347  	}
   348  }
   349  
   350  func WithLogger(logger zerolog.Logger) NodeFixtureParameterOption {
   351  	return func(p *NodeFixtureParameters) {
   352  		p.Logger = logger
   353  	}
   354  }
   355  
   356  func WithMetricsCollector(metrics module.NetworkMetrics) NodeFixtureParameterOption {
   357  	return func(p *NodeFixtureParameters) {
   358  		p.MetricsCfg.Metrics = metrics
   359  	}
   360  }
   361  
   362  // WithDefaultResourceManager sets the resource manager to nil, which will cause the node to use the default resource manager.
   363  // Otherwise, it uses the resource manager provided by the test (the infinite resource manager).
   364  func WithDefaultResourceManager() NodeFixtureParameterOption {
   365  	return func(p *NodeFixtureParameters) {
   366  		p.ResourceManager = nil
   367  	}
   368  }
   369  
   370  // WithResourceManager sets the resource manager to the provided resource manager.
   371  // Otherwise, it uses the resource manager provided by the test (the infinite resource manager).
   372  func WithResourceManager(resourceManager network.ResourceManager) NodeFixtureParameterOption {
   373  	return func(p *NodeFixtureParameters) {
   374  		p.ResourceManager = resourceManager
   375  	}
   376  }
   377  
   378  func WithUnicastHandlerFunc(handler network.StreamHandler) NodeFixtureParameterOption {
   379  	return func(p *NodeFixtureParameters) {
   380  		p.HandlerFunc = handler
   381  	}
   382  }
   383  
   384  // PeerManagerConfigFixture is a test fixture that sets the default config for the peer manager.
   385  func PeerManagerConfigFixture(opts ...func(*p2pbuilderconfig.PeerManagerConfig)) *p2pbuilderconfig.PeerManagerConfig {
   386  	cfg := &p2pbuilderconfig.PeerManagerConfig{
   387  		ConnectionPruning: true,
   388  		UpdateInterval:    1 * time.Second,
   389  		ConnectorFactory:  connection.DefaultLibp2pBackoffConnectorFactory(),
   390  	}
   391  	for _, opt := range opts {
   392  		opt(cfg)
   393  	}
   394  	return cfg
   395  }
   396  
   397  // WithZeroJitterAndZeroBackoff is a test fixture that sets the default config for the peer manager.
   398  // It uses a backoff connector with zero jitter and zero backoff.
   399  func WithZeroJitterAndZeroBackoff(t *testing.T) func(*p2pbuilderconfig.PeerManagerConfig) {
   400  	return func(cfg *p2pbuilderconfig.PeerManagerConfig) {
   401  		cfg.ConnectorFactory = func(host host.Host) (p2p.Connector, error) {
   402  			cacheSize := 100
   403  			dialTimeout := time.Minute * 2
   404  			backoff := discoveryBackoff.NewExponentialBackoff(1*time.Second, 1*time.Hour, func(_, _, _ time.Duration, _ *crand.Rand) time.Duration {
   405  				return 0 // no jitter
   406  			}, time.Second, 1, 0, crand.NewSource(crand.Int63()))
   407  			backoffConnector, err := discoveryBackoff.NewBackoffConnector(host, cacheSize, dialTimeout, backoff)
   408  			require.NoError(t, err)
   409  			return backoffConnector, nil
   410  		}
   411  	}
   412  }
   413  
   414  // NodesFixture is a test fixture that creates a number of libp2p nodes with the given callback function for stream handling.
   415  // It returns the nodes and their identities.
   416  func NodesFixture(t *testing.T,
   417  	sporkID flow.Identifier,
   418  	dhtPrefix string,
   419  	count int,
   420  	idProvider module.IdentityProvider,
   421  	opts ...NodeFixtureParameterOption) ([]p2p.LibP2PNode, flow.IdentityList) {
   422  	var nodes []p2p.LibP2PNode
   423  
   424  	// creating nodes
   425  	var identities flow.IdentityList
   426  	for i := 0; i < count; i++ {
   427  		// create a node on localhost with a random port assigned by the OS
   428  		node, identity := NodeFixture(t, sporkID, dhtPrefix, idProvider, opts...)
   429  		nodes = append(nodes, node)
   430  		identities = append(identities, &identity)
   431  	}
   432  
   433  	return nodes, identities
   434  }
   435  
   436  // StartNodes start all nodes in the input slice using the provided context, timing out if nodes are
   437  // not all Ready() before duration expires
   438  func StartNodes(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode) {
   439  	rdas := make([]module.ReadyDoneAware, 0, len(nodes))
   440  	for _, node := range nodes {
   441  		node.Start(ctx)
   442  		rdas = append(rdas, node)
   443  
   444  		if peerManager := node.PeerManagerComponent(); peerManager != nil {
   445  			// we need to start the peer manager post the node startup (if such component exists).
   446  			peerManager.Start(ctx)
   447  			rdas = append(rdas, peerManager)
   448  		}
   449  	}
   450  	for _, r := range rdas {
   451  		// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   452  		unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, r)
   453  	}
   454  }
   455  
   456  // StartNode start a single node using the provided context, timing out if nodes are not all Ready()
   457  // before duration expires, (i.e., 2 seconds).
   458  // Args:
   459  // - t: testing.T- the test object.
   460  // - ctx: context to use.
   461  // - node: node to start.
   462  func StartNode(t *testing.T, ctx irrecoverable.SignalerContext, node p2p.LibP2PNode) {
   463  	node.Start(ctx)
   464  	// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   465  	unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, node)
   466  }
   467  
   468  // StopNodes stops all nodes in the input slice using the provided cancel func, timing out if nodes are
   469  // not all Done() before duration expires (i.e., 5 seconds).
   470  // Args:
   471  // - t: testing.T- the test object.
   472  // - nodes: nodes to stop.
   473  // - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done.
   474  func StopNodes(t *testing.T, nodes []p2p.LibP2PNode, cancel context.CancelFunc) {
   475  	cancel()
   476  	for _, node := range nodes {
   477  		// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   478  		unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node)
   479  	}
   480  }
   481  
   482  // StopNode stops a single node using the provided cancel func, timing out if nodes are not all Done()
   483  // before duration expires, (i.e., 2 seconds).
   484  // Args:
   485  // - t: testing.T- the test object.
   486  // - node: node to stop.
   487  // - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done.
   488  func StopNode(t *testing.T, node p2p.LibP2PNode, cancel context.CancelFunc) {
   489  	cancel()
   490  	// Any failure to start a node within this timeout is likely to be caused by a bug in the code.
   491  	unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node)
   492  }
   493  
   494  // StreamHandlerFixture returns a stream handler that writes the received message to the given channel.
   495  func StreamHandlerFixture(t *testing.T) (func(s network.Stream), chan string) {
   496  	ch := make(chan string, 1) // channel to receive messages
   497  
   498  	return func(s network.Stream) {
   499  		rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
   500  		str, err := rw.ReadString('\n')
   501  		require.NoError(t, err)
   502  		ch <- str
   503  	}, ch
   504  }
   505  
   506  // LetNodesDiscoverEachOther connects all nodes to each other on the pubsub mesh.
   507  func LetNodesDiscoverEachOther(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, ids flow.IdentityList) {
   508  	for _, node := range nodes {
   509  		for i, other := range nodes {
   510  			if node == other {
   511  				continue
   512  			}
   513  			otherPInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton)
   514  			require.NoError(t, err)
   515  			require.NoError(t, node.ConnectToPeer(ctx, otherPInfo))
   516  		}
   517  	}
   518  }
   519  
   520  // TryConnectionAndEnsureConnected tries connecting nodes to each other and ensures that the given nodes are connected to each other.
   521  // It fails the test if any of the nodes is not connected to any other node.
   522  func TryConnectionAndEnsureConnected(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) {
   523  	for _, node := range nodes {
   524  		for _, other := range nodes {
   525  			if node == other {
   526  				continue
   527  			}
   528  			require.NoError(t, node.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.ID())))
   529  			// the other node should be connected to this node
   530  			require.Equal(t, node.Host().Network().Connectedness(other.ID()), network.Connected)
   531  			// at least one connection should be established
   532  			require.True(t, len(node.Host().Network().ConnsToPeer(other.ID())) > 0)
   533  		}
   534  	}
   535  }
   536  
   537  // RequireConnectedEventually ensures eventually that the given nodes are already connected to each other.
   538  // It fails the test if any of the nodes is not connected to any other node.
   539  // Args:
   540  // - nodes: the nodes to check
   541  // - tick: the tick duration
   542  // - timeout: the timeout duration
   543  func RequireConnectedEventually(t *testing.T, nodes []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) {
   544  	require.Eventually(t, func() bool {
   545  		for _, node := range nodes {
   546  			for _, other := range nodes {
   547  				if node == other {
   548  					continue
   549  				}
   550  				if node.Host().Network().Connectedness(other.ID()) != network.Connected {
   551  					return false
   552  				}
   553  				if len(node.Host().Network().ConnsToPeer(other.ID())) == 0 {
   554  					return false
   555  				}
   556  			}
   557  		}
   558  		return true
   559  	}, timeout, tick)
   560  }
   561  
   562  // RequireEventuallyNotConnected ensures eventually that the given groups of nodes are not connected to each other.
   563  // It fails the test if any of the nodes from groupA is connected to any of the nodes from groupB.
   564  // Args:
   565  // - groupA: the first group of nodes
   566  // - groupB: the second group of nodes
   567  // - tick: the tick duration
   568  // - timeout: the timeout duration
   569  func RequireEventuallyNotConnected(t *testing.T, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) {
   570  	require.Eventually(t, func() bool {
   571  		for _, node := range groupA {
   572  			for _, other := range groupB {
   573  				if node.Host().Network().Connectedness(other.ID()) == network.Connected {
   574  					return false
   575  				}
   576  				if len(node.Host().Network().ConnsToPeer(other.ID())) > 0 {
   577  					return false
   578  				}
   579  			}
   580  		}
   581  		return true
   582  	}, timeout, tick)
   583  }
   584  
   585  // EnsureStreamCreationInBothDirections ensure that between each pair of nodes in the given list, a stream is created in both directions.
   586  func EnsureStreamCreationInBothDirections(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) {
   587  	for _, this := range nodes {
   588  		for _, other := range nodes {
   589  			if this == other {
   590  				continue
   591  			}
   592  			// stream creation should pass without error
   593  			err := this.OpenAndWriteOnStream(ctx, other.ID(), t.Name(), func(stream network.Stream) error {
   594  				// do nothing
   595  				require.NotNil(t, stream)
   596  				return nil
   597  			})
   598  			require.NoError(t, err)
   599  
   600  		}
   601  	}
   602  }
   603  
   604  // EnsurePubsubMessageExchange ensures that the given connected nodes exchange the given message on the given channel through pubsub.
   605  // Args:
   606  //   - nodes: the nodes to exchange messages
   607  //   - ctx: the context- the test will fail if the context expires.
   608  //   - topic: the topic to exchange messages on
   609  //   - count: the number of messages to exchange from each node.
   610  //   - messageFactory: a function that creates a unique message to be published by the node.
   611  //     The function should return a different message each time it is called.
   612  //
   613  // Note-1: this function assumes a timeout of 5 seconds for each message to be received.
   614  // Note-2: TryConnectionAndEnsureConnected() must be called to connect all nodes before calling this function.
   615  func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) {
   616  	subs := make([]p2p.Subscription, len(nodes))
   617  	for i, node := range nodes {
   618  		ps, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   619  		require.NoError(t, err)
   620  		subs[i] = ps
   621  	}
   622  
   623  	// let subscriptions propagate
   624  	time.Sleep(1 * time.Second)
   625  
   626  	for _, node := range nodes {
   627  		for i := 0; i < count; i++ {
   628  			// creates a unique message to be published by the node
   629  			payload := messageFactory()
   630  			outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{unittest.IdentifierFixture()},
   631  				topic,
   632  				payload,
   633  				unittest.NetworkCodec().Encode,
   634  				message.ProtocolTypePubSub)
   635  			require.NoError(t, err)
   636  			require.NoError(t, node.Publish(ctx, outgoingMessageScope))
   637  
   638  			// wait for the message to be received by all nodes
   639  			ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   640  			expectedReceivedData, err := outgoingMessageScope.Proto().Marshal()
   641  			require.NoError(t, err)
   642  			p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, subs)
   643  			cancel()
   644  		}
   645  	}
   646  }
   647  
   648  // EnsurePubsubMessageExchangeFromNode ensures that the given node exchanges the given message on the given channel through pubsub with the other nodes.
   649  // Args:
   650  //   - node: the node to exchange messages
   651  //
   652  // - ctx: the context- the test will fail if the context expires.
   653  // - sender: the node that sends the message to the other node.
   654  // - receiverNode: the node that receives the message from the other node.
   655  // - receiverIdentifier: the identifier of the receiver node.
   656  // - topic: the topic to exchange messages on.
   657  // - count: the number of messages to exchange from `sender` to `receiver`.
   658  // - messageFactory: a function that creates a unique message to be published by the node.
   659  func EnsurePubsubMessageExchangeFromNode(t *testing.T,
   660  	ctx context.Context,
   661  	sender p2p.LibP2PNode,
   662  	receiverNode p2p.LibP2PNode,
   663  	receiverIdentifier flow.Identifier,
   664  	topic channels.Topic,
   665  	count int,
   666  	messageFactory func() interface{}) {
   667  	_, err := sender.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   668  	require.NoError(t, err)
   669  
   670  	toSub, err := receiverNode.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()))
   671  	require.NoError(t, err)
   672  
   673  	// let subscriptions propagate
   674  	time.Sleep(1 * time.Second)
   675  
   676  	for i := 0; i < count; i++ {
   677  		// creates a unique message to be published by the node
   678  		payload := messageFactory()
   679  		outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{receiverIdentifier},
   680  			topic,
   681  			payload,
   682  			unittest.NetworkCodec().Encode,
   683  			message.ProtocolTypePubSub)
   684  		require.NoError(t, err)
   685  		require.NoError(t, sender.Publish(ctx, outgoingMessageScope))
   686  
   687  		// wait for the message to be received by all nodes
   688  		ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   689  		expectedReceivedData, err := outgoingMessageScope.Proto().Marshal()
   690  		require.NoError(t, err)
   691  		p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, []p2p.Subscription{toSub})
   692  		cancel()
   693  	}
   694  }
   695  
   696  // EnsureNotConnectedBetweenGroups ensures no connection exists between the given groups of nodes.
   697  func EnsureNotConnectedBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode) {
   698  	// ensure no connection from group A to group B
   699  	p2pfixtures.EnsureNotConnected(t, ctx, groupA, groupB)
   700  	// ensure no connection from group B to group A
   701  	p2pfixtures.EnsureNotConnected(t, ctx, groupB, groupA)
   702  }
   703  
   704  // EnsureNoPubsubMessageExchange ensures that the no pubsub message is exchanged "from" the given nodes "to" the given nodes.
   705  // Args:
   706  //   - from: the nodes that send messages to the other group but their message must not be received by the other group.
   707  //
   708  // - to: the nodes that are the target of the messages sent by the other group ("from") but must not receive any message from them.
   709  // - topic: the topic to exchange messages on.
   710  // - count: the number of messages to exchange from each node.
   711  // - messageFactory: a function that creates a unique message to be published by the node.
   712  func EnsureNoPubsubMessageExchange(t *testing.T,
   713  	ctx context.Context,
   714  	from []p2p.LibP2PNode,
   715  	to []p2p.LibP2PNode,
   716  	toIdentifiers flow.IdentifierList,
   717  	topic channels.Topic,
   718  	count int,
   719  	messageFactory func() interface{}) {
   720  	subs := make([]p2p.Subscription, len(to))
   721  	tv := validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())
   722  	var err error
   723  	for _, node := range from {
   724  		_, err = node.Subscribe(topic, tv)
   725  		require.NoError(t, err)
   726  	}
   727  
   728  	for i, node := range to {
   729  		s, err := node.Subscribe(topic, tv)
   730  		require.NoError(t, err)
   731  		subs[i] = s
   732  	}
   733  
   734  	// let subscriptions propagate
   735  	time.Sleep(1 * time.Second)
   736  
   737  	wg := &sync.WaitGroup{}
   738  	for _, node := range from {
   739  		node := node // capture range variable
   740  		for i := 0; i < count; i++ {
   741  			wg.Add(1)
   742  			go func() {
   743  				// creates a unique message to be published by the node.
   744  
   745  				payload := messageFactory()
   746  				outgoingMessageScope, err := message.NewOutgoingScope(toIdentifiers, topic, payload, unittest.NetworkCodec().Encode, message.ProtocolTypePubSub)
   747  				require.NoError(t, err)
   748  				require.NoError(t, node.Publish(ctx, outgoingMessageScope))
   749  
   750  				ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   751  				p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs)
   752  				cancel()
   753  				wg.Done()
   754  			}()
   755  		}
   756  	}
   757  
   758  	// we wait for 5 seconds at most for the messages to be exchanged, hence we wait for a total of 6 seconds here to ensure
   759  	// that the goroutines are done in a timely manner.
   760  	unittest.RequireReturnsBefore(t, wg.Wait, 6*time.Second, "timed out waiting for messages to be exchanged")
   761  }
   762  
   763  // EnsureNoPubsubExchangeBetweenGroups ensures that no pubsub message is exchanged between the given groups of nodes.
   764  // Args:
   765  // - t: *testing.T instance
   766  // - ctx: context.Context instance
   767  // - groupANodes: first group of nodes- no message should be exchanged from any node of this group to the other group.
   768  // - groupAIdentifiers: identifiers of the nodes in the first group.
   769  // - groupBNodes: second group of nodes- no message should be exchanged from any node of this group to the other group.
   770  // - groupBIdentifiers: identifiers of the nodes in the second group.
   771  // - topic: pubsub topic- no message should be exchanged on this topic.
   772  // - count: number of messages to be exchanged- no message should be exchanged.
   773  // - messageFactory: function to create a unique message to be published by the node.
   774  func EnsureNoPubsubExchangeBetweenGroups(t *testing.T,
   775  	ctx context.Context,
   776  	groupANodes []p2p.LibP2PNode,
   777  	groupAIdentifiers flow.IdentifierList,
   778  	groupBNodes []p2p.LibP2PNode,
   779  	groupBIdentifiers flow.IdentifierList,
   780  	topic channels.Topic,
   781  	count int,
   782  	messageFactory func() interface{}) {
   783  	// ensure no message exchange from group A to group B
   784  	EnsureNoPubsubMessageExchange(t, ctx, groupANodes, groupBNodes, groupBIdentifiers, topic, count, messageFactory)
   785  	// ensure no message exchange from group B to group A
   786  	EnsureNoPubsubMessageExchange(t, ctx, groupBNodes, groupANodes, groupAIdentifiers, topic, count, messageFactory)
   787  }
   788  
   789  // PeerIdSliceFixture returns a slice of random peer IDs for testing.
   790  // peer ID is the identifier of a node on the libp2p network.
   791  // Args:
   792  // - t: *testing.T instance
   793  // - n: number of peer IDs to generate
   794  // Returns:
   795  // - peer.IDSlice: slice of peer IDs
   796  func PeerIdSliceFixture(t *testing.T, n int) peer.IDSlice {
   797  	ids := make([]peer.ID, n)
   798  	for i := 0; i < n; i++ {
   799  		ids[i] = unittest.PeerIdFixture(t)
   800  	}
   801  	return ids
   802  }
   803  
   804  // NewConnectionGater creates a new connection gater for testing with given allow listing filter.
   805  func NewConnectionGater(idProvider module.IdentityProvider, allowListFilter p2p.PeerFilter) p2p.ConnectionGater {
   806  	filters := []p2p.PeerFilter{allowListFilter}
   807  	return connection.NewConnGater(unittest.Logger(), idProvider, connection.WithOnInterceptPeerDialFilters(filters), connection.WithOnInterceptSecuredFilters(filters))
   808  }
   809  
   810  // GossipSubRpcFixtures returns a slice of random message IDs for testing.
   811  // Args:
   812  // - t: *testing.T instance
   813  // - count: number of message IDs to generate
   814  // Returns:
   815  // - []string: slice of message IDs.
   816  // Note: evey other parameters that are not explicitly set are set to 10. This function suites applications that need to generate a large number of RPC messages with
   817  // filled random data. For a better control over the generated data, use GossipSubRpcFixture.
   818  func GossipSubRpcFixtures(t *testing.T, count int) []*pb.RPC {
   819  	c := 10
   820  	rpcs := make([]*pb.RPC, 0)
   821  	for i := 0; i < count; i++ {
   822  		rpcs = append(rpcs,
   823  			GossipSubRpcFixture(t,
   824  				c,
   825  				WithPrune(c, GossipSubTopicIdFixture()),
   826  				WithGraft(c, GossipSubTopicIdFixture()),
   827  				WithIHave(c, c, GossipSubTopicIdFixture()),
   828  				WithIWant(c, c)))
   829  	}
   830  	return rpcs
   831  }
   832  
   833  // GossipSubRpcFixture returns a random GossipSub RPC message. An RPC message is the GossipSub-level message that is exchanged between nodes.
   834  // It contains individual messages, subscriptions, and control messages.
   835  // Args:
   836  // - t: *testing.T instance
   837  // - msgCnt: number of messages to generate
   838  // - opts: options to customize control messages (not having an option means no control message).
   839  // Returns:
   840  // - *pb.RPC: a random GossipSub RPC message
   841  // Note: the message is not signed.
   842  func GossipSubRpcFixture(t *testing.T, msgCnt int, opts ...GossipSubCtrlOption) *pb.RPC {
   843  	rand.Seed(uint64(time.Now().UnixNano()))
   844  
   845  	// creates a random number of Subscriptions
   846  	numSubscriptions := 10
   847  	topicIdSize := 10
   848  	subscriptions := make([]*pb.RPC_SubOpts, numSubscriptions)
   849  	for i := 0; i < numSubscriptions; i++ {
   850  		subscribe := rand.Intn(2) == 1
   851  		topicID := unittest.RandomStringFixture(t, topicIdSize)
   852  		subscriptions[i] = &pb.RPC_SubOpts{
   853  			Subscribe: &subscribe,
   854  			Topicid:   &topicID,
   855  		}
   856  	}
   857  
   858  	// generates random messages
   859  	messages := make([]*pb.Message, msgCnt)
   860  	for i := 0; i < msgCnt; i++ {
   861  		messages[i] = GossipSubMessageFixture(t)
   862  	}
   863  
   864  	// Create a Control Message
   865  	controlMessages := GossipSubCtrlFixture(opts...)
   866  
   867  	// Create the RPC
   868  	rpc := &pb.RPC{
   869  		Subscriptions: subscriptions,
   870  		Publish:       messages,
   871  		Control:       controlMessages,
   872  	}
   873  
   874  	return rpc
   875  }
   876  
   877  type GossipSubCtrlOption func(*pb.ControlMessage)
   878  
   879  // GossipSubCtrlFixture returns a ControlMessage with the given options.
   880  func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pb.ControlMessage {
   881  	msg := &pb.ControlMessage{}
   882  	for _, opt := range opts {
   883  		opt(msg)
   884  	}
   885  	return msg
   886  }
   887  
   888  // WithIHave adds iHave control messages of the given size and number to the control message.
   889  func WithIHave(msgCount, msgIDCount int, topicId string) GossipSubCtrlOption {
   890  	return func(msg *pb.ControlMessage) {
   891  		iHaves := make([]*pb.ControlIHave, msgCount)
   892  		for i := 0; i < msgCount; i++ {
   893  			iHaves[i] = &pb.ControlIHave{
   894  				TopicID:    &topicId,
   895  				MessageIDs: GossipSubMessageIdsFixture(msgIDCount),
   896  			}
   897  		}
   898  		msg.Ihave = iHaves
   899  	}
   900  }
   901  
   902  // WithIHaveMessageIDs adds iHave control messages with the given message IDs to the control message.
   903  func WithIHaveMessageIDs(msgIDs []string, topicId string) GossipSubCtrlOption {
   904  	return func(msg *pb.ControlMessage) {
   905  		msg.Ihave = []*pb.ControlIHave{
   906  			{
   907  				TopicID:    &topicId,
   908  				MessageIDs: msgIDs,
   909  			},
   910  		}
   911  	}
   912  }
   913  
   914  // WithIWant adds iWant control messages of the given size and number to the control message.
   915  // The message IDs are generated randomly.
   916  // Args:
   917  //
   918  //	msgCount: number of iWant messages to add.
   919  //	msgIdsPerIWant: number of message IDs to add to each iWant message.
   920  //
   921  // Returns:
   922  // A GossipSubCtrlOption that adds iWant messages to the control message.
   923  // Example: WithIWant(2, 3) will add 2 iWant messages, each with 3 message IDs.
   924  func WithIWant(iWantCount int, msgIdsPerIWant int) GossipSubCtrlOption {
   925  	return func(msg *pb.ControlMessage) {
   926  		iWants := make([]*pb.ControlIWant, iWantCount)
   927  		for i := 0; i < iWantCount; i++ {
   928  			iWants[i] = &pb.ControlIWant{
   929  				MessageIDs: GossipSubMessageIdsFixture(msgIdsPerIWant),
   930  			}
   931  		}
   932  		msg.Iwant = iWants
   933  	}
   934  }
   935  
   936  // WithGraft adds GRAFT control messages with given topicID to the control message.
   937  func WithGraft(msgCount int, topicId string) GossipSubCtrlOption {
   938  	return func(msg *pb.ControlMessage) {
   939  		grafts := make([]*pb.ControlGraft, msgCount)
   940  		for i := 0; i < msgCount; i++ {
   941  			grafts[i] = &pb.ControlGraft{
   942  				TopicID: &topicId,
   943  			}
   944  		}
   945  		msg.Graft = grafts
   946  	}
   947  }
   948  
   949  // WithGrafts adds a GRAFT control message with each given topicID to the control message.
   950  func WithGrafts(topicIds ...string) GossipSubCtrlOption {
   951  	return func(msg *pb.ControlMessage) {
   952  		grafts := make([]*pb.ControlGraft, len(topicIds))
   953  		for i, topic := range topicIds {
   954  			grafts[i] = &pb.ControlGraft{
   955  				TopicID: &topic,
   956  			}
   957  		}
   958  		msg.Graft = grafts
   959  	}
   960  }
   961  
   962  // WithPrune adds PRUNE control messages with given topicID to the control message.
   963  func WithPrune(msgCount int, topicId string) GossipSubCtrlOption {
   964  	return func(msg *pb.ControlMessage) {
   965  		prunes := make([]*pb.ControlPrune, msgCount)
   966  		for i := 0; i < msgCount; i++ {
   967  			prunes[i] = &pb.ControlPrune{
   968  				TopicID: &topicId,
   969  			}
   970  		}
   971  		msg.Prune = prunes
   972  	}
   973  }
   974  
   975  // WithPrunes adds a PRUNE control message with each given topicID to the control message.
   976  func WithPrunes(topicIds ...string) GossipSubCtrlOption {
   977  	return func(msg *pb.ControlMessage) {
   978  		prunes := make([]*pb.ControlPrune, len(topicIds))
   979  		for i, topic := range topicIds {
   980  			prunes[i] = &pb.ControlPrune{
   981  				TopicID: &topic,
   982  			}
   983  		}
   984  		msg.Prune = prunes
   985  	}
   986  }
   987  
   988  // gossipSubMessageIdFixture returns a random gossipSub message ID.
   989  func gossipSubMessageIdFixture() string {
   990  	// TODO: messageID length should be a parameter.
   991  	return unittest.GenerateRandomStringWithLen(messageIDFixtureLen)
   992  }
   993  
   994  // GossipSubTopicIdFixture returns a random gossipSub topic ID.
   995  func GossipSubTopicIdFixture() string {
   996  	// TODO: topicID length should be a parameter.
   997  	return unittest.GenerateRandomStringWithLen(topicIDFixtureLen)
   998  }
   999  
  1000  // GossipSubMessageIdsFixture returns a slice of random gossipSub message IDs of the given size.
  1001  func GossipSubMessageIdsFixture(count int) []string {
  1002  	msgIds := make([]string, count)
  1003  	for i := 0; i < count; i++ {
  1004  		msgIds[i] = gossipSubMessageIdFixture()
  1005  	}
  1006  	return msgIds
  1007  }
  1008  
  1009  // GossipSubMessageFixture returns a random gossipSub message; this contains a single pubsub message that is exchanged between nodes.
  1010  // The message is generated randomly.
  1011  // Args:
  1012  // - t: *testing.T instance
  1013  // Returns:
  1014  // - *pb.Message: a random gossipSub message
  1015  // Note: the message is not signed.
  1016  func GossipSubMessageFixture(t *testing.T) *pb.Message {
  1017  	byteSize := 100
  1018  	topic := unittest.RandomStringFixture(t, byteSize)
  1019  	return &pb.Message{
  1020  		From:      unittest.RandomBytes(byteSize),
  1021  		Data:      unittest.RandomBytes(byteSize),
  1022  		Seqno:     unittest.RandomBytes(byteSize),
  1023  		Topic:     &topic,
  1024  		Signature: unittest.RandomBytes(byteSize),
  1025  		Key:       unittest.RandomBytes(byteSize),
  1026  	}
  1027  }
  1028  
  1029  // UpdatableTopicProviderFixture is a mock implementation of the TopicProvider interface.
  1030  type UpdatableTopicProviderFixture struct {
  1031  	topics        []string
  1032  	subscriptions map[string][]peer.ID
  1033  }
  1034  
  1035  func NewUpdatableTopicProviderFixture() *UpdatableTopicProviderFixture {
  1036  	return &UpdatableTopicProviderFixture{
  1037  		topics:        []string{},
  1038  		subscriptions: map[string][]peer.ID{},
  1039  	}
  1040  }
  1041  
  1042  func (m *UpdatableTopicProviderFixture) GetTopics() []string {
  1043  	return m.topics
  1044  }
  1045  
  1046  func (m *UpdatableTopicProviderFixture) ListPeers(topic string) []peer.ID {
  1047  	return m.subscriptions[topic]
  1048  }
  1049  
  1050  func (m *UpdatableTopicProviderFixture) UpdateTopics(topics []string) {
  1051  	m.topics = topics
  1052  }
  1053  
  1054  func (m *UpdatableTopicProviderFixture) UpdateSubscriptions(topic string, peers []peer.ID) {
  1055  	m.subscriptions[topic] = peers
  1056  }