github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/internal/testutils/testUtil.go (about)

     1  package testutils
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"runtime"
     7  	"strings"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/libp2p/go-libp2p/core/peer"
    13  	"github.com/rs/zerolog"
    14  	"github.com/stretchr/testify/require"
    15  
    16  	"github.com/onflow/flow-go/config"
    17  	"github.com/onflow/flow-go/model/flow"
    18  	"github.com/onflow/flow-go/model/flow/filter"
    19  	libp2pmessage "github.com/onflow/flow-go/model/libp2p/message"
    20  	"github.com/onflow/flow-go/module"
    21  	"github.com/onflow/flow-go/module/irrecoverable"
    22  	"github.com/onflow/flow-go/module/metrics"
    23  	"github.com/onflow/flow-go/module/mock"
    24  	"github.com/onflow/flow-go/module/observable"
    25  	"github.com/onflow/flow-go/network"
    26  	alspmgr "github.com/onflow/flow-go/network/alsp/manager"
    27  	netcache "github.com/onflow/flow-go/network/cache"
    28  	"github.com/onflow/flow-go/network/codec/cbor"
    29  	"github.com/onflow/flow-go/network/mocknetwork"
    30  	"github.com/onflow/flow-go/network/netconf"
    31  	"github.com/onflow/flow-go/network/p2p"
    32  	"github.com/onflow/flow-go/network/p2p/conduit"
    33  	"github.com/onflow/flow-go/network/p2p/connection"
    34  	p2ptest "github.com/onflow/flow-go/network/p2p/test"
    35  	"github.com/onflow/flow-go/network/p2p/translator"
    36  	"github.com/onflow/flow-go/network/underlay"
    37  	"github.com/onflow/flow-go/utils/unittest"
    38  )
    39  
    40  // RateLimitConsumer p2p.RateLimiterConsumer fixture that invokes a callback when rate limit event is consumed.
    41  type RateLimitConsumer struct {
    42  	callback func(pid peer.ID, role, msgType, topic, reason string) // callback func that will be invoked on rate limit
    43  }
    44  
    45  func (r *RateLimitConsumer) OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string) {
    46  	r.callback(pid, role, msgType, topic, reason)
    47  }
    48  
    49  type PeerTag struct {
    50  	Peer peer.ID
    51  	Tag  string
    52  }
    53  
    54  // TagWatchingConnManager implements connection.ConnManager struct, and manages connections with tags. It
    55  // also maintains a set of observers that it notifies when a tag is added or removed from a peer.
    56  type TagWatchingConnManager struct {
    57  	*connection.ConnManager
    58  	observers map[observable.Observer]struct{}
    59  	obsLock   sync.RWMutex
    60  }
    61  
    62  // Subscribe allows an observer to subscribe to receive notifications when a tag is added or removed from a peer.
    63  func (tw *TagWatchingConnManager) Subscribe(observer observable.Observer) {
    64  	tw.obsLock.Lock()
    65  	defer tw.obsLock.Unlock()
    66  	var void struct{}
    67  	tw.observers[observer] = void
    68  }
    69  
    70  // Unsubscribe allows an observer to unsubscribe from receiving notifications.
    71  func (tw *TagWatchingConnManager) Unsubscribe(observer observable.Observer) {
    72  	tw.obsLock.Lock()
    73  	defer tw.obsLock.Unlock()
    74  	delete(tw.observers, observer)
    75  }
    76  
    77  // Protect adds a tag to a peer. It also notifies all observers that a tag has been added to a peer.
    78  func (tw *TagWatchingConnManager) Protect(id peer.ID, tag string) {
    79  	tw.obsLock.RLock()
    80  	defer tw.obsLock.RUnlock()
    81  	tw.ConnManager.Protect(id, tag)
    82  	for obs := range tw.observers {
    83  		go obs.OnNext(PeerTag{Peer: id, Tag: tag})
    84  	}
    85  }
    86  
    87  // Unprotect removes a tag from a peer. It also notifies all observers that a tag has been removed from a peer.
    88  func (tw *TagWatchingConnManager) Unprotect(id peer.ID, tag string) bool {
    89  	tw.obsLock.RLock()
    90  	defer tw.obsLock.RUnlock()
    91  	res := tw.ConnManager.Unprotect(id, tag)
    92  	for obs := range tw.observers {
    93  		go obs.OnNext(PeerTag{Peer: id, Tag: tag})
    94  	}
    95  	return res
    96  }
    97  
    98  // NewTagWatchingConnManager creates a new TagWatchingConnManager with the given config. It returns an error if the config is invalid.
    99  func NewTagWatchingConnManager(log zerolog.Logger, metrics module.LibP2PConnectionMetrics, config *netconf.ConnectionManager) (*TagWatchingConnManager, error) {
   100  	cm, err := connection.NewConnManager(log, metrics, config)
   101  	if err != nil {
   102  		return nil, fmt.Errorf("could not create connection manager: %w", err)
   103  	}
   104  
   105  	return &TagWatchingConnManager{
   106  		ConnManager: cm,
   107  		observers:   make(map[observable.Observer]struct{}),
   108  		obsLock:     sync.RWMutex{},
   109  	}, nil
   110  }
   111  
   112  // LibP2PNodeForNetworkFixture is a test helper that generate flow identities with a valid port and libp2p nodes.
   113  // Note that the LibP2PNode created by this fixture is meant to used with a network component.
   114  // If you want to create a standalone LibP2PNode without network component, please use p2ptest.NodeFixture.
   115  // Args:
   116  //
   117  //	t: testing.T- the test object
   118  //	sporkId: flow.Identifier - the spork id to use for the nodes
   119  //	n: int - number of nodes to create
   120  //
   121  // opts: []p2ptest.NodeFixtureParameterOption - options to configure the nodes
   122  // Returns:
   123  //
   124  //	flow.IdentityList - list of identities created for the nodes, one for each node.
   125  //
   126  // []p2p.LibP2PNode - list of libp2p nodes created.
   127  // TODO: several test cases only need a single node, consider encapsulating this function in a single node fixture.
   128  func LibP2PNodeForNetworkFixture(t *testing.T, sporkId flow.Identifier, n int, opts ...p2ptest.NodeFixtureParameterOption) (flow.IdentityList, []p2p.LibP2PNode) {
   129  	libP2PNodes := make([]p2p.LibP2PNode, 0)
   130  	identities := make(flow.IdentityList, 0)
   131  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   132  	opts = append(opts, p2ptest.WithUnicastHandlerFunc(nil))
   133  
   134  	for i := 0; i < n; i++ {
   135  		node, nodeId := p2ptest.NodeFixture(t,
   136  			sporkId,
   137  			t.Name(),
   138  			idProvider,
   139  			opts...)
   140  		libP2PNodes = append(libP2PNodes, node)
   141  		identities = append(identities, &nodeId)
   142  	}
   143  	idProvider.SetIdentities(identities)
   144  	return identities, libP2PNodes
   145  }
   146  
   147  // NetworksFixture generates the network for the given libp2p nodes.
   148  func NetworksFixture(t *testing.T,
   149  	sporkId flow.Identifier,
   150  	ids flow.IdentityList,
   151  	libp2pNodes []p2p.LibP2PNode,
   152  	configOpts ...func(*underlay.NetworkConfig)) ([]*underlay.Network, []*unittest.UpdatableIDProvider) {
   153  
   154  	count := len(ids)
   155  	nets := make([]*underlay.Network, 0)
   156  	idProviders := make([]*unittest.UpdatableIDProvider, 0)
   157  
   158  	for i := 0; i < count; i++ {
   159  		idProvider := unittest.NewUpdatableIDProvider(ids)
   160  		params := NetworkConfigFixture(t, *ids[i], idProvider, sporkId, libp2pNodes[i])
   161  
   162  		for _, opt := range configOpts {
   163  			opt(params)
   164  		}
   165  
   166  		net, err := underlay.NewNetwork(params)
   167  		require.NoError(t, err)
   168  
   169  		nets = append(nets, net)
   170  		idProviders = append(idProviders, idProvider)
   171  	}
   172  
   173  	return nets, idProviders
   174  }
   175  
   176  func NetworkConfigFixture(
   177  	t *testing.T,
   178  	myId flow.Identity,
   179  	idProvider module.IdentityProvider,
   180  	sporkId flow.Identifier,
   181  	libp2pNode p2p.LibP2PNode,
   182  	opts ...underlay.NetworkConfigOption) *underlay.NetworkConfig {
   183  
   184  	me := mock.NewLocal(t)
   185  	me.On("NodeID").Return(myId.NodeID).Maybe()
   186  	me.On("NotMeFilter").Return(filter.Not(filter.HasNodeID[flow.Identity](me.NodeID()))).Maybe()
   187  	me.On("Address").Return(myId.Address).Maybe()
   188  
   189  	defaultFlowConfig, err := config.DefaultConfig()
   190  	require.NoError(t, err)
   191  
   192  	receiveCache := netcache.NewHeroReceiveCache(
   193  		defaultFlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize,
   194  		unittest.Logger(),
   195  		metrics.NewNoopCollector())
   196  	params := &underlay.NetworkConfig{
   197  		Logger:                unittest.Logger(),
   198  		Codec:                 unittest.NetworkCodec(),
   199  		Libp2pNode:            libp2pNode,
   200  		Me:                    me,
   201  		BitSwapMetrics:        metrics.NewNoopCollector(),
   202  		Topology:              unittest.NetworkTopology(),
   203  		Metrics:               metrics.NewNoopCollector(),
   204  		IdentityProvider:      idProvider,
   205  		ReceiveCache:          receiveCache,
   206  		ConduitFactory:        conduit.NewDefaultConduitFactory(),
   207  		SporkId:               sporkId,
   208  		UnicastMessageTimeout: underlay.DefaultUnicastTimeout,
   209  		IdentityTranslator:    translator.NewIdentityProviderIDTranslator(idProvider),
   210  		AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{
   211  			Logger:                  unittest.Logger(),
   212  			SpamRecordCacheSize:     defaultFlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize,
   213  			SpamReportQueueSize:     defaultFlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize,
   214  			HeartBeatInterval:       defaultFlowConfig.NetworkConfig.AlspConfig.HearBeatInterval,
   215  			AlspMetrics:             metrics.NewNoopCollector(),
   216  			HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(),
   217  		},
   218  		SlashingViolationConsumerFactory: func(_ network.ConduitAdapter) network.ViolationsConsumer {
   219  			return mocknetwork.NewViolationsConsumer(t)
   220  		},
   221  	}
   222  
   223  	for _, opt := range opts {
   224  		opt(params)
   225  	}
   226  
   227  	return params
   228  }
   229  
   230  // StartNodesAndNetworks starts the provided networks and libp2p nodes, returning the irrecoverable error channel.
   231  // Arguments:
   232  // - ctx: the irrecoverable context to use for starting the nodes and networks.
   233  // - t: the test object.
   234  // - nodes: the libp2p nodes to start.
   235  // - nets: the networks to start.
   236  // - timeout: the timeout to use for waiting for the nodes and networks to start.
   237  //
   238  // This function fails the test if the nodes or networks do not start within the given timeout.
   239  func StartNodesAndNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode, nets []network.EngineRegistry) {
   240  	StartNetworks(ctx, t, nets)
   241  
   242  	// start up nodes and Peer managers
   243  	StartNodes(ctx, t, nodes)
   244  }
   245  
   246  // StartNetworks starts the provided networks using the provided irrecoverable context
   247  // Arguments:
   248  // - ctx: the irrecoverable context to use for starting the networks.
   249  // - t: the test object.
   250  // - nets: the networks to start.
   251  // - duration: the timeout to use for waiting for the networks to start.
   252  //
   253  // This function fails the test if the networks do not start within the given timeout.
   254  func StartNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nets []network.EngineRegistry) {
   255  	for _, net := range nets {
   256  		net.Start(ctx)
   257  		unittest.RequireComponentsReadyBefore(t, 5*time.Second, net)
   258  	}
   259  }
   260  
   261  // StartNodes starts the provided nodes and their peer managers using the provided irrecoverable context
   262  func StartNodes(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode) {
   263  	for _, node := range nodes {
   264  		node.Start(ctx)
   265  		unittest.RequireComponentsReadyBefore(t, 5*time.Second, node)
   266  
   267  		pm := node.PeerManagerComponent()
   268  		pm.Start(ctx)
   269  		unittest.RequireComponentsReadyBefore(t, 5*time.Second, pm)
   270  	}
   271  }
   272  
   273  // StopComponents stops ReadyDoneAware instances in parallel and fails the test if they could not be stopped within the
   274  // duration.
   275  func StopComponents[R module.ReadyDoneAware](t *testing.T, rda []R, duration time.Duration) {
   276  	comps := make([]module.ReadyDoneAware, 0, len(rda))
   277  	for _, c := range rda {
   278  		comps = append(comps, c)
   279  	}
   280  
   281  	unittest.RequireComponentsDoneBefore(t, duration, comps...)
   282  }
   283  
   284  // OptionalSleep introduces a sleep to allow nodes to heartbeat and discover each other (only needed when using PubSub)
   285  func OptionalSleep(send ConduitSendWrapperFunc) {
   286  	sendFuncName := runtime.FuncForPC(reflect.ValueOf(send).Pointer()).Name()
   287  	if strings.Contains(sendFuncName, "Multicast") || strings.Contains(sendFuncName, "Publish") {
   288  		time.Sleep(2 * time.Second)
   289  	}
   290  }
   291  
   292  // NetworkPayloadFixture creates a blob of random bytes with the given size (in bytes) and returns it.
   293  // The primary goal of utilizing this helper function is to apply stress tests on the network layer by
   294  // sending large messages to transmit.
   295  func NetworkPayloadFixture(t *testing.T, size uint) []byte {
   296  	// reserves 1000 bytes for the message headers, encoding overhead, and libp2p message overhead.
   297  	overhead := 1000
   298  	require.Greater(t, int(size), overhead, "could not generate message below size threshold")
   299  	emptyEvent := &libp2pmessage.TestMessage{
   300  		Text: "",
   301  	}
   302  
   303  	// encodes the message
   304  	codec := cbor.NewCodec()
   305  	empty, err := codec.Encode(emptyEvent)
   306  	require.NoError(t, err)
   307  
   308  	// max possible payload size
   309  	payloadSize := int(size) - overhead - len(empty)
   310  	payload := make([]byte, payloadSize)
   311  
   312  	// populates payload with random bytes
   313  	for i := range payload {
   314  		payload[i] = 'a' // a utf-8 char that translates to 1-byte when converted to a string
   315  	}
   316  
   317  	event := emptyEvent
   318  	event.Text = string(payload)
   319  	// encode Event the way the network would encode it to get the size of the message
   320  	// just to do the size check
   321  	encodedEvent, err := codec.Encode(event)
   322  	require.NoError(t, err)
   323  
   324  	require.InDelta(t, len(encodedEvent), int(size), float64(overhead))
   325  
   326  	return payload
   327  }
   328  
   329  // IsRateLimitedPeerFilter returns a p2p.PeerFilter that will return an error if the peer is rate limited.
   330  func IsRateLimitedPeerFilter(rateLimiter p2p.RateLimiter) p2p.PeerFilter {
   331  	return func(p peer.ID) error {
   332  		if rateLimiter.IsRateLimited(p) {
   333  			return fmt.Errorf("peer is rate limited")
   334  		}
   335  		return nil
   336  	}
   337  }
   338  
   339  // NewRateLimiterConsumer returns a p2p.RateLimiterConsumer fixture that will invoke the callback provided.
   340  func NewRateLimiterConsumer(callback func(pid peer.ID, role, msgType, topic, reason string)) p2p.RateLimiterConsumer {
   341  	return &RateLimitConsumer{callback}
   342  }