github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/node/libp2pStream_test.go (about)

     1  package p2pnode_test
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"regexp"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/libp2p/go-libp2p/core"
    15  	"github.com/libp2p/go-libp2p/core/network"
    16  	"github.com/libp2p/go-libp2p/core/peerstore"
    17  	"github.com/libp2p/go-libp2p/p2p/net/swarm"
    18  	"github.com/stretchr/testify/assert"
    19  	"github.com/stretchr/testify/require"
    20  
    21  	"github.com/onflow/flow-go/config"
    22  	"github.com/onflow/flow-go/model/flow"
    23  	"github.com/onflow/flow-go/module/irrecoverable"
    24  	mockmodule "github.com/onflow/flow-go/module/mock"
    25  	"github.com/onflow/flow-go/network/internal/p2pfixtures"
    26  	"github.com/onflow/flow-go/network/internal/p2putils"
    27  	"github.com/onflow/flow-go/network/p2p"
    28  	p2ptest "github.com/onflow/flow-go/network/p2p/test"
    29  	"github.com/onflow/flow-go/network/p2p/unicast"
    30  	"github.com/onflow/flow-go/network/p2p/unicast/protocols"
    31  	"github.com/onflow/flow-go/network/p2p/utils"
    32  	"github.com/onflow/flow-go/utils/unittest"
    33  )
    34  
    35  // TestStreamClosing tests 1-1 communication with streams closed using libp2p2 handler.FullClose
    36  func TestStreamClosing(t *testing.T) {
    37  	count := 10
    38  	ctx, cancel := context.WithCancel(context.Background())
    39  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    40  
    41  	var msgRegex = regexp.MustCompile("^hello[0-9]")
    42  
    43  	handler, streamCloseWG := mockStreamHandlerForMessages(t, ctx, count, msgRegex)
    44  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
    45  	// Creates nodes
    46  	nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_stream_closing", 2, idProvider, p2ptest.WithDefaultStreamHandler(handler))
    47  	idProvider.SetIdentities(identities)
    48  
    49  	p2ptest.StartNodes(t, signalerCtx, nodes)
    50  	defer p2ptest.StopNodes(t, nodes, cancel)
    51  
    52  	nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton)
    53  	require.NoError(t, err)
    54  
    55  	senderWG := sync.WaitGroup{}
    56  	senderWG.Add(count)
    57  	for i := 0; i < count; i++ {
    58  		go func(i int) {
    59  			// Create stream from node 1 to node 2 (reuse if one already exists)
    60  			nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL)
    61  			err := nodes[0].OpenAndWriteOnStream(ctx, nodeInfo1.ID, t.Name(), func(s network.Stream) error {
    62  				w := bufio.NewWriter(s)
    63  
    64  				// Send message from node 1 to 2
    65  				msg := fmt.Sprintf("hello%d\n", i)
    66  				_, err = w.WriteString(msg)
    67  				assert.NoError(t, err)
    68  
    69  				// Flush the stream
    70  				require.NoError(t, w.Flush())
    71  
    72  				// returning will close the stream
    73  				return nil
    74  			})
    75  			require.NoError(t, err)
    76  
    77  			senderWG.Done()
    78  		}(i)
    79  	}
    80  
    81  	// wait for stream to be closed
    82  	unittest.RequireReturnsBefore(t, senderWG.Wait, 3*time.Second, "could not send messages on time")
    83  	unittest.RequireReturnsBefore(t, streamCloseWG.Wait, 3*time.Second, "could not close stream at receiver side")
    84  }
    85  
    86  // mockStreamHandlerForMessages creates a stream handler that expects receiving `msgCount` unique messages that match the input regexp.
    87  // The returned wait group will be unlocked when all messages are completely received and associated streams are closed.
    88  func mockStreamHandlerForMessages(t *testing.T, ctx context.Context, msgCount int, msgRegexp *regexp.Regexp) (network.StreamHandler, *sync.WaitGroup) {
    89  	streamCloseWG := &sync.WaitGroup{}
    90  	streamCloseWG.Add(msgCount)
    91  
    92  	h := func(s network.Stream) {
    93  		go func(s network.Stream) {
    94  			rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
    95  			for {
    96  				str, err := rw.ReadString('\n')
    97  				if err != nil {
    98  					if errors.Is(err, io.EOF) {
    99  						err := s.Close()
   100  						require.NoError(t, err)
   101  
   102  						streamCloseWG.Done()
   103  						return
   104  					}
   105  					require.Fail(t, fmt.Sprintf("received error %v", err))
   106  					err = s.Reset()
   107  					require.NoError(t, err)
   108  					return
   109  				}
   110  				select {
   111  				case <-ctx.Done():
   112  					return
   113  				default:
   114  					require.True(t, msgRegexp.MatchString(str), str)
   115  				}
   116  			}
   117  		}(s)
   118  
   119  	}
   120  	return h, streamCloseWG
   121  }
   122  
   123  // TestCreateStream_WithDefaultUnicast evaluates correctness of creating default (tcp) unicast streams between two libp2p nodes.
   124  func TestCreateStream_WithDefaultUnicast(t *testing.T) {
   125  	sporkId := unittest.IdentifierFixture()
   126  	testCreateStream(t,
   127  		sporkId,
   128  		nil, // sends nil as preferred unicast so that nodes run on default plain tcp streams.
   129  		protocols.FlowProtocolID(sporkId))
   130  }
   131  
   132  // TestCreateStream_WithPreferredGzipUnicast evaluates correctness of creating gzip-compressed tcp unicast streams between two libp2p nodes.
   133  func TestCreateStream_WithPreferredGzipUnicast(t *testing.T) {
   134  	sporkId := unittest.IdentifierFixture()
   135  	testCreateStream(t,
   136  		sporkId,
   137  		[]protocols.ProtocolName{protocols.GzipCompressionUnicast},
   138  		protocols.FlowGzipProtocolId(sporkId))
   139  }
   140  
   141  // testCreateStreams checks if a new streams of "preferred" type is created each time when CreateStream is called and an existing stream is not
   142  // reused. The "preferred" stream type is the one with the largest index in `unicasts` list.
   143  // To check that the streams are of "preferred" type, it evaluates the protocol id of established stream against the input `protocolID`.
   144  func testCreateStream(t *testing.T, sporkId flow.Identifier, unicasts []protocols.ProtocolName, protocolID core.ProtocolID) {
   145  	count := 2
   146  	ctx, cancel := context.WithCancel(context.Background())
   147  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   148  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   149  	nodes, identities := p2ptest.NodesFixture(t, sporkId, "test_create_stream", count, idProvider, p2ptest.WithPreferredUnicasts(unicasts))
   150  	idProvider.SetIdentities(identities)
   151  	p2ptest.StartNodes(t, signalerCtx, nodes)
   152  
   153  	id2 := identities[1]
   154  
   155  	// Assert that there is no outbound stream to the target yet
   156  	require.Equal(t, 0, p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(protocolID), p2putils.Direction(network.DirOutbound)))
   157  
   158  	// Now attempt to create another 100 outbound stream to the same destination by calling CreateStream
   159  	streamCount := 100
   160  	var streams []network.Stream
   161  	allStreamsClosedWg := sync.WaitGroup{}
   162  	for i := 0; i < streamCount; i++ {
   163  		allStreamsClosedWg.Add(1)
   164  		pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton)
   165  		require.NoError(t, err)
   166  		nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   167  		go func() {
   168  			err := nodes[0].OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error {
   169  				require.NotNil(t, stream)
   170  				streams = append(streams, stream)
   171  				// if we return this function, the stream will be closed, but we need to keep it open for the test
   172  				// hence we wait for the context to be done
   173  				<-ctx.Done()
   174  				allStreamsClosedWg.Done()
   175  				return nil
   176  			})
   177  			if err != nil {
   178  				// we omit errors due to closing the stream. This is because we close the stream in the test.
   179  				require.Contains(t, err.Error(), "failed to close the stream")
   180  			}
   181  		}()
   182  	}
   183  
   184  	require.Eventually(t, func() bool {
   185  		return streamCount == p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(protocolID), p2putils.Direction(network.DirOutbound))
   186  	}, 5*time.Second, 100*time.Millisecond, "could not create streams on time")
   187  
   188  	// checks that the number of connections is 1 despite the number of streams; i.e., all streams are created on the same connection
   189  	require.Len(t, nodes[0].Host().Network().Conns(), 1)
   190  
   191  	// we don't use defer as the moment we stop the nodes, the streams will be closed, and we want to assess the number of streams
   192  	p2ptest.StopNodes(t, nodes, cancel)
   193  
   194  	// wait for all streams to be closed
   195  	unittest.RequireReturnsBefore(t, allStreamsClosedWg.Wait, 1*time.Second, "could not close streams on time")
   196  }
   197  
   198  // TestCreateStream_FallBack checks two libp2p nodes with conflicting supported unicast protocols fall back
   199  // to default (tcp) unicast protocol during their negotiation.
   200  // To do this, a node with preferred gzip-compressed tcp unicast tries creating stream to another node that only
   201  // supports default plain tcp unicast. The test evaluates that the unicast stream established between two nodes
   202  // are of type default plain tcp.
   203  func TestCreateStream_FallBack(t *testing.T) {
   204  	ctx, cancel := context.WithCancel(context.Background())
   205  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   206  
   207  	// Creates two nodes: one with preferred gzip, and other one with default protocol
   208  	sporkId := unittest.IdentifierFixture()
   209  	idProvider := mockmodule.NewIdentityProvider(t)
   210  	thisNode, thisID := p2ptest.NodeFixture(t,
   211  		sporkId,
   212  		t.Name(),
   213  		idProvider,
   214  		p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast}))
   215  	otherNode, otherId := p2ptest.NodeFixture(t,
   216  		sporkId,
   217  		t.Name(),
   218  		idProvider)
   219  	identities := []flow.Identity{thisID, otherId}
   220  	nodes := []p2p.LibP2PNode{thisNode, otherNode}
   221  	for i, node := range nodes {
   222  		idProvider.On("ByPeerID", node.ID()).Return(&identities[i], true).Maybe()
   223  
   224  	}
   225  	p2ptest.StartNodes(t, signalerCtx, nodes)
   226  
   227  	// Assert that there is no outbound stream to the target yet (neither default nor preferred)
   228  	defaultProtocolId := protocols.FlowProtocolID(sporkId)
   229  	preferredProtocolId := protocols.FlowGzipProtocolId(sporkId)
   230  	require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(defaultProtocolId), p2putils.Direction(network.DirOutbound)))
   231  	require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(preferredProtocolId), p2putils.Direction(network.DirOutbound)))
   232  
   233  	// Now attempt to create another 100 outbound stream to the same destination by calling CreateStream
   234  	streamCount := 10
   235  	var streams []network.Stream
   236  	allStreamsClosedWg := sync.WaitGroup{}
   237  	for i := 0; i < streamCount; i++ {
   238  		allStreamsClosedWg.Add(1)
   239  		pInfo, err := utils.PeerAddressInfo(otherId.IdentitySkeleton)
   240  		require.NoError(t, err)
   241  		thisNode.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   242  
   243  		// a new stream must be created
   244  		go func() {
   245  			err = thisNode.OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error {
   246  				require.NotNil(t, stream)
   247  				streams = append(streams, stream)
   248  
   249  				// if we return this function, the stream will be closed, but we need to keep it open for the test
   250  				// hence we wait for the context to be done
   251  				<-ctx.Done()
   252  				allStreamsClosedWg.Done()
   253  				return nil
   254  			})
   255  		}()
   256  	}
   257  
   258  	// wait for the stream to be created on the default protocol id.
   259  	require.Eventually(t, func() bool {
   260  		return streamCount == p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(defaultProtocolId), p2putils.Direction(network.DirOutbound))
   261  	}, 5*time.Second, 100*time.Millisecond, "could not create streams on time")
   262  
   263  	// no stream must be created on the preferred protocol id
   264  	require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(preferredProtocolId), p2putils.Direction(network.DirOutbound)))
   265  
   266  	// checks that the number of connections is 1 despite the number of streams; i.e., all streams are created on the same connection
   267  	require.Len(t, nodes[0].Host().Network().Conns(), 1)
   268  
   269  	// we don't use defer as the moment we stop the nodes, the streams will be closed, and we want to assess the number of streams
   270  	p2ptest.StopNodes(t, nodes, cancel)
   271  
   272  	// wait for all streams to be closed
   273  	unittest.RequireReturnsBefore(t, allStreamsClosedWg.Wait, 1*time.Second, "could not close streams on time")
   274  }
   275  
   276  // TestCreateStreamIsConcurrencySafe tests that the CreateStream is concurrency safe
   277  func TestCreateStreamIsConcurrencySafe(t *testing.T) {
   278  	ctx, cancel := context.WithCancel(context.Background())
   279  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   280  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   281  	// create two nodes
   282  	nodes, identities := p2ptest.NodesFixture(t,
   283  		unittest.IdentifierFixture(),
   284  		t.Name(),
   285  		2,
   286  		idProvider)
   287  	require.Len(t, identities, 2)
   288  	idProvider.SetIdentities(flow.IdentityList{identities[0], identities[1]})
   289  	p2ptest.StartNodes(t, signalerCtx, nodes)
   290  	defer p2ptest.StopNodes(t, nodes, cancel)
   291  
   292  	nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton)
   293  	require.NoError(t, err)
   294  
   295  	wg := sync.WaitGroup{}
   296  
   297  	// create a gate which gates the call to CreateStream for all concurrent go routines
   298  	gate := make(chan struct{})
   299  
   300  	createStream := func() {
   301  		<-gate
   302  		nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL)
   303  		err := nodes[0].OpenAndWriteOnStream(ctx, nodeInfo1.ID, t.Name(), func(stream network.Stream) error {
   304  			// no-op stream writer, we just check that the stream was created
   305  			return nil
   306  		})
   307  		require.NoError(t, err) // assert that stream was successfully created
   308  		wg.Done()
   309  	}
   310  
   311  	// kick off 10 concurrent calls to CreateStream
   312  	for i := 0; i < 10; i++ {
   313  		wg.Add(1)
   314  		go createStream()
   315  	}
   316  	// open the gate by closing the channel
   317  	close(gate)
   318  
   319  	// no call should block
   320  	unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second)
   321  }
   322  
   323  // TestNoBackoffWhenCreatingStream checks that backoff is not enabled between attempts to connect to a remote peer
   324  // for one-to-one direct communication.
   325  func TestNoBackoffWhenCreatingStream(t *testing.T) {
   326  	ctx, cancel := context.WithCancel(context.Background())
   327  	defer cancel()
   328  
   329  	// setup per node contexts so they can be stopped independently
   330  	ctx1, cancel1 := context.WithCancel(ctx)
   331  	signalerCtx1 := irrecoverable.NewMockSignalerContext(t, ctx1)
   332  
   333  	ctx2, cancel2 := context.WithCancel(ctx)
   334  	signalerCtx2 := irrecoverable.NewMockSignalerContext(t, ctx2)
   335  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   336  	count := 2
   337  	// Creates nodes
   338  	nodes, identities := p2ptest.NodesFixture(t,
   339  		unittest.IdentifierFixture(),
   340  		t.Name(),
   341  		count,
   342  		idProvider)
   343  	node1 := nodes[0]
   344  	node2 := nodes[1]
   345  	idProvider.SetIdentities(flow.IdentityList{identities[0], identities[1]})
   346  	p2ptest.StartNode(t, signalerCtx1, node1)
   347  	p2ptest.StartNode(t, signalerCtx2, node2)
   348  
   349  	// stop node 2 immediately
   350  	p2ptest.StopNode(t, node2, cancel2)
   351  	defer p2ptest.StopNode(t, node1, cancel1)
   352  
   353  	id2 := identities[1]
   354  	pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton)
   355  	require.NoError(t, err)
   356  	nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   357  
   358  	cfg, err := config.DefaultConfig()
   359  	require.NoError(t, err)
   360  
   361  	maxTimeToWait := time.Duration(cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes) * unicast.MaxRetryJitter * time.Millisecond
   362  
   363  	// need to add some buffer time so that RequireReturnsBefore waits slightly longer than maxTimeToWait to avoid
   364  	// a race condition
   365  	someGraceTime := 100 * time.Millisecond
   366  	totalWaitTime := maxTimeToWait + someGraceTime
   367  
   368  	// each CreateStream() call may try to connect up to MaxDialRetryAttemptTimes (3) times.
   369  
   370  	// there are 2 scenarios that we need to account for:
   371  	//
   372  	// 1. machines where a timeout occurs on the first connection attempt - this can be due to local firewall rules or other processes running on the machine.
   373  	//   In this case, we need to create a scenario where a backoff would have normally occured. This is why we initiate a second connection attempt.
   374  	//   Libp2p remembers the peer we are trying to connect to between CreateStream() calls and would have initiated a backoff if backoff wasn't turned off.
   375  	//   The second CreateStream() call will make a second connection attempt MaxDialRetryAttemptTimes times and that should never result in a backoff error.
   376  	//
   377  	// 2. machines where a timeout does NOT occur on the first connection attempt - this is on CI machines and some local dev machines without a firewall / too many other processes.
   378  	//   In this case, there will be MaxDialRetryAttemptTimes (3) connection attempts on the first CreateStream() call and MaxDialRetryAttemptTimes (3) attempts on the second CreateStream() call.
   379  
   380  	// make two separate stream creation attempt and assert that no connection back off happened
   381  	for i := 0; i < 2; i++ {
   382  
   383  		// limit the maximum amount of time to wait for a connection to be established by using a context that times out
   384  		ctx, cancel := context.WithTimeout(ctx, maxTimeToWait)
   385  
   386  		unittest.RequireReturnsBefore(t, func() {
   387  			err = node1.OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error {
   388  				// do nothing, this is a no-op stream writer, we just check that the stream was created
   389  				return nil
   390  			})
   391  			require.Error(t, err)
   392  		}, totalWaitTime, fmt.Sprintf("create stream did not error within %s", totalWaitTime.String()))
   393  		require.NotContainsf(t, err.Error(), swarm.ErrDialBackoff.Error(), "swarm dialer unexpectedly did a back off for a one-to-one connection")
   394  		cancel()
   395  	}
   396  }
   397  
   398  // TestUnicastOverStream_WithPlainStream checks two nodes can send and receive unicast messages on libp2p plain streams.
   399  func TestUnicastOverStream_WithPlainStream(t *testing.T) {
   400  	testUnicastOverStream(t)
   401  }
   402  
   403  // TestUnicastOverStream_WithGzipStreamCompression checks two nodes can send and receive unicast messages on gzip compressed streams
   404  // when both nodes have gzip stream compression enabled.
   405  func TestUnicastOverStream_WithGzipStreamCompression(t *testing.T) {
   406  	testUnicastOverStream(t, p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast}))
   407  }
   408  
   409  // testUnicastOverStream sends a message from node 1 to node 2 and then from node 2 to node 1 over a unicast stream.
   410  func testUnicastOverStream(t *testing.T, opts ...p2ptest.NodeFixtureParameterOption) {
   411  	ctx, cancel := context.WithCancel(context.Background())
   412  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   413  
   414  	// Creates nodes
   415  	sporkId := unittest.IdentifierFixture()
   416  	idProvider := mockmodule.NewIdentityProvider(t)
   417  	streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t)
   418  	node1, id1 := p2ptest.NodeFixture(t,
   419  		sporkId,
   420  		t.Name(),
   421  		idProvider,
   422  		append(opts, p2ptest.WithDefaultStreamHandler(streamHandler1))...)
   423  
   424  	streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t)
   425  	node2, id2 := p2ptest.NodeFixture(t,
   426  		sporkId,
   427  		t.Name(),
   428  		idProvider,
   429  		append(opts, p2ptest.WithDefaultStreamHandler(streamHandler2))...)
   430  	ids := flow.IdentityList{&id1, &id2}
   431  	nodes := []p2p.LibP2PNode{node1, node2}
   432  	for i, node := range nodes {
   433  		idProvider.On("ByPeerID", node.ID()).Return(ids[i], true).Maybe()
   434  
   435  	}
   436  	p2ptest.StartNodes(t, signalerCtx, nodes)
   437  	defer p2ptest.StopNodes(t, nodes, cancel)
   438  
   439  	p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids)
   440  
   441  	p2pfixtures.EnsureMessageExchangeOverUnicast(t,
   442  		ctx,
   443  		nodes,
   444  		[]chan string{inbound1, inbound2},
   445  		p2pfixtures.LongStringMessageFactoryFixture(t))
   446  }
   447  
   448  // TestUnicastOverStream_Fallback checks two nodes with asymmetric sets of preferred unicast protocols can create streams and
   449  // send and receive unicasts. Despite the asymmetry, the nodes must fall back to the libp2p plain stream during negotiation.
   450  func TestUnicastOverStream_Fallback(t *testing.T) {
   451  	ctx, cancel := context.WithCancel(context.Background())
   452  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   453  
   454  	// Creates nodes
   455  	// node1: supports only plain unicast protocol
   456  	// node2: supports plain and gzip
   457  	sporkId := unittest.IdentifierFixture()
   458  	idProvider := mockmodule.NewIdentityProvider(t)
   459  	streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t)
   460  	node1, id1 := p2ptest.NodeFixture(t,
   461  		sporkId,
   462  		t.Name(),
   463  		idProvider,
   464  		p2ptest.WithDefaultStreamHandler(streamHandler1))
   465  
   466  	streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t)
   467  	node2, id2 := p2ptest.NodeFixture(t,
   468  		sporkId,
   469  		t.Name(),
   470  		idProvider,
   471  		p2ptest.WithDefaultStreamHandler(streamHandler2),
   472  		p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast}))
   473  
   474  	ids := flow.IdentityList{&id1, &id2}
   475  	nodes := []p2p.LibP2PNode{node1, node2}
   476  	for i, node := range nodes {
   477  		idProvider.On("ByPeerID", node.ID()).Return(ids[i], true).Maybe()
   478  
   479  	}
   480  	p2ptest.StartNodes(t, signalerCtx, nodes)
   481  	defer p2ptest.StopNodes(t, nodes, cancel)
   482  
   483  	p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids)
   484  	p2pfixtures.EnsureMessageExchangeOverUnicast(
   485  		t,
   486  		ctx,
   487  		nodes,
   488  		[]chan string{inbound1, inbound2}, p2pfixtures.LongStringMessageFactoryFixture(t))
   489  }
   490  
   491  // TestCreateStreamTimeoutWithUnresponsiveNode tests that the CreateStream call does not block longer than the
   492  // timeout interval
   493  func TestCreateStreamTimeoutWithUnresponsiveNode(t *testing.T) {
   494  	ctx, cancel := context.WithCancel(context.Background())
   495  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   496  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   497  	// creates a regular node
   498  	nodes, identities := p2ptest.NodesFixture(t,
   499  		unittest.IdentifierFixture(),
   500  		t.Name(),
   501  		1,
   502  		idProvider)
   503  	require.Len(t, identities, 1)
   504  	idProvider.SetIdentities(identities)
   505  	p2ptest.StartNodes(t, signalerCtx, nodes)
   506  	defer p2ptest.StopNodes(t, nodes, cancel)
   507  
   508  	// create a silent node which never replies
   509  	listener, silentNodeId := p2pfixtures.SilentNodeFixture(t)
   510  	defer func() {
   511  		require.NoError(t, listener.Close())
   512  	}()
   513  
   514  	silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton)
   515  	require.NoError(t, err)
   516  
   517  	timeout := 1 * time.Second
   518  	tctx, tcancel := context.WithTimeout(ctx, timeout)
   519  	defer tcancel()
   520  
   521  	// attempt to create a stream from node 1 to node 2 and assert that it fails after timeout
   522  	grace := 100 * time.Millisecond
   523  	unittest.AssertReturnsBefore(t,
   524  		func() {
   525  			nodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL)
   526  			err = nodes[0].OpenAndWriteOnStream(tctx, silentNodeInfo.ID, t.Name(), func(stream network.Stream) error {
   527  				// do nothing, this is a no-op stream writer, we just check that the stream was created
   528  				return nil
   529  			})
   530  			require.Error(t, err)
   531  		}, timeout+grace)
   532  }
   533  
   534  // TestCreateStreamIsConcurrent tests that CreateStream calls can be made concurrently such that one blocked call
   535  // does not block another concurrent call.
   536  func TestCreateStreamIsConcurrent(t *testing.T) {
   537  	ctx, cancel := context.WithCancel(context.Background())
   538  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   539  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   540  	// create two regular node
   541  	goodNodes, goodNodeIds := p2ptest.NodesFixture(t,
   542  		unittest.IdentifierFixture(),
   543  		t.Name(),
   544  		2,
   545  		idProvider)
   546  	require.Len(t, goodNodeIds, 2)
   547  	idProvider.SetIdentities(goodNodeIds)
   548  	p2ptest.StartNodes(t, signalerCtx, goodNodes)
   549  	defer p2ptest.StopNodes(t, goodNodes, cancel)
   550  
   551  	goodNodeInfo1, err := utils.PeerAddressInfo(goodNodeIds[1].IdentitySkeleton)
   552  	require.NoError(t, err)
   553  
   554  	// create a silent node which never replies
   555  	listener, silentNodeId := p2pfixtures.SilentNodeFixture(t)
   556  	defer func() {
   557  		require.NoError(t, listener.Close())
   558  	}()
   559  	silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton)
   560  	require.NoError(t, err)
   561  
   562  	// creates a stream to unresponsive node and makes sure that the stream creation is blocked
   563  	blockedCallCh := unittest.RequireNeverReturnBefore(t,
   564  		func() {
   565  			goodNodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL)
   566  			// the subsequent call will be blocked
   567  			_ = goodNodes[0].OpenAndWriteOnStream(ctx, silentNodeInfo.ID, t.Name(), func(stream network.Stream) error {
   568  				// do nothing, the stream creation will be blocked so this should never be called
   569  				require.Fail(t, "this should never be called")
   570  				return nil
   571  			})
   572  		}, 1*time.Second, "CreateStream attempt to the unresponsive peer did not block")
   573  
   574  	// requires same peer can still connect to the other regular peer without being blocked
   575  	unittest.RequireReturnsBefore(t,
   576  		func() {
   577  			goodNodes[0].Host().Peerstore().AddAddrs(goodNodeInfo1.ID, goodNodeInfo1.Addrs, peerstore.AddressTTL)
   578  			err := goodNodes[0].OpenAndWriteOnStream(ctx, goodNodeInfo1.ID, t.Name(), func(stream network.Stream) error {
   579  				// do nothing, this is a no-op stream writer, we just check that the stream was created
   580  				return nil
   581  			})
   582  			require.NoError(t, err)
   583  		}, 1*time.Second, "creating stream to a responsive node failed while concurrently blocked on unresponsive node")
   584  
   585  	// requires the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the
   586  	// good address
   587  	unittest.RequireNeverClosedWithin(t,
   588  		blockedCallCh,
   589  		1*time.Millisecond,
   590  		"CreateStream attempt to the unresponsive peer did not block after connecting to good node")
   591  }