github.com/koko1123/flow-go-1@v0.29.6/network/p2p/p2pnode/libp2pStream_test.go (about)

     1  package p2pnode_test
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"regexp"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/koko1123/flow-go-1/network/p2p"
    15  	p2ptest "github.com/koko1123/flow-go-1/network/p2p/test"
    16  
    17  	"github.com/libp2p/go-libp2p/core"
    18  	"github.com/libp2p/go-libp2p/core/network"
    19  	"github.com/libp2p/go-libp2p/core/peerstore"
    20  	"github.com/libp2p/go-libp2p/p2p/net/swarm"
    21  	"github.com/stretchr/testify/assert"
    22  	"github.com/stretchr/testify/require"
    23  
    24  	"github.com/koko1123/flow-go-1/model/flow"
    25  	"github.com/koko1123/flow-go-1/module/irrecoverable"
    26  	"github.com/koko1123/flow-go-1/network/internal/p2pfixtures"
    27  	"github.com/koko1123/flow-go-1/network/internal/p2putils"
    28  	"github.com/koko1123/flow-go-1/network/p2p/p2pnode"
    29  	"github.com/koko1123/flow-go-1/network/p2p/unicast"
    30  	"github.com/koko1123/flow-go-1/network/p2p/utils"
    31  	"github.com/koko1123/flow-go-1/utils/unittest"
    32  )
    33  
    34  // TestStreamClosing tests 1-1 communication with streams closed using libp2p2 handler.FullClose
    35  func TestStreamClosing(t *testing.T) {
    36  	count := 10
    37  	ctx, cancel := context.WithCancel(context.Background())
    38  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    39  
    40  	var msgRegex = regexp.MustCompile("^hello[0-9]")
    41  
    42  	handler, streamCloseWG := mockStreamHandlerForMessages(t, ctx, count, msgRegex)
    43  
    44  	// Creates nodes
    45  	nodes, identities := p2ptest.NodesFixture(t,
    46  		unittest.IdentifierFixture(),
    47  		"test_stream_closing",
    48  		2,
    49  		p2ptest.WithDefaultStreamHandler(handler))
    50  
    51  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
    52  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
    53  
    54  	nodeInfo1, err := utils.PeerAddressInfo(*identities[1])
    55  	require.NoError(t, err)
    56  
    57  	senderWG := sync.WaitGroup{}
    58  	senderWG.Add(count)
    59  	for i := 0; i < count; i++ {
    60  		go func(i int) {
    61  			// Create stream from node 1 to node 2 (reuse if one already exists)
    62  			nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL)
    63  			s, err := nodes[0].CreateStream(ctx, nodeInfo1.ID)
    64  			assert.NoError(t, err)
    65  			w := bufio.NewWriter(s)
    66  
    67  			// Send message from node 1 to 2
    68  			msg := fmt.Sprintf("hello%d\n", i)
    69  			_, err = w.WriteString(msg)
    70  			assert.NoError(t, err)
    71  
    72  			// Flush the stream
    73  			assert.NoError(t, w.Flush())
    74  
    75  			// close the stream
    76  			err = s.Close()
    77  			require.NoError(t, err)
    78  
    79  			senderWG.Done()
    80  		}(i)
    81  	}
    82  
    83  	// wait for stream to be closed
    84  	unittest.RequireReturnsBefore(t, senderWG.Wait, 1*time.Second, "could not send messages on time")
    85  	unittest.RequireReturnsBefore(t, streamCloseWG.Wait, 1*time.Second, "could not close stream at receiver side")
    86  }
    87  
    88  // mockStreamHandlerForMessages creates a stream handler that expects receiving `msgCount` unique messages that match the input regexp.
    89  // The returned wait group will be unlocked when all messages are completely received and associated streams are closed.
    90  func mockStreamHandlerForMessages(t *testing.T, ctx context.Context, msgCount int, msgRegexp *regexp.Regexp) (network.StreamHandler, *sync.WaitGroup) {
    91  	streamCloseWG := &sync.WaitGroup{}
    92  	streamCloseWG.Add(msgCount)
    93  
    94  	h := func(s network.Stream) {
    95  		go func(s network.Stream) {
    96  			rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
    97  			for {
    98  				str, err := rw.ReadString('\n')
    99  				if err != nil {
   100  					if errors.Is(err, io.EOF) {
   101  						err := s.Close()
   102  						require.NoError(t, err)
   103  
   104  						streamCloseWG.Done()
   105  						return
   106  					}
   107  					require.Fail(t, fmt.Sprintf("received error %v", err))
   108  					err = s.Reset()
   109  					require.NoError(t, err)
   110  					return
   111  				}
   112  				select {
   113  				case <-ctx.Done():
   114  					return
   115  				default:
   116  					require.True(t, msgRegexp.MatchString(str), str)
   117  				}
   118  			}
   119  		}(s)
   120  
   121  	}
   122  	return h, streamCloseWG
   123  }
   124  
   125  // TestCreateStream_WithDefaultUnicast evaluates correctness of creating default (tcp) unicast streams between two libp2p nodes.
   126  func TestCreateStream_WithDefaultUnicast(t *testing.T) {
   127  	sporkId := unittest.IdentifierFixture()
   128  	testCreateStream(t,
   129  		sporkId,
   130  		nil, // sends nil as preferred unicast so that nodes run on default plain tcp streams.
   131  		unicast.FlowProtocolID(sporkId))
   132  }
   133  
   134  // TestCreateStream_WithPreferredGzipUnicast evaluates correctness of creating gzip-compressed tcp unicast streams between two libp2p nodes.
   135  func TestCreateStream_WithPreferredGzipUnicast(t *testing.T) {
   136  	sporkId := unittest.IdentifierFixture()
   137  	testCreateStream(t,
   138  		sporkId,
   139  		[]unicast.ProtocolName{unicast.GzipCompressionUnicast},
   140  		unicast.FlowGzipProtocolId(sporkId))
   141  }
   142  
   143  // testCreateStreams checks if a new streams of "preferred" type is created each time when CreateStream is called and an existing stream is not
   144  // reused. The "preferred" stream type is the one with the largest index in `unicasts` list.
   145  // To check that the streams are of "preferred" type, it evaluates the protocol id of established stream against the input `protocolID`.
   146  func testCreateStream(t *testing.T, sporkId flow.Identifier, unicasts []unicast.ProtocolName, protocolID core.ProtocolID) {
   147  	count := 2
   148  	ctx, cancel := context.WithCancel(context.Background())
   149  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   150  
   151  	nodes, identities := p2ptest.NodesFixture(t,
   152  		sporkId,
   153  		"test_create_stream",
   154  		count,
   155  		p2ptest.WithPreferredUnicasts(unicasts))
   156  
   157  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   158  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   159  
   160  	id2 := identities[1]
   161  
   162  	// Assert that there is no outbound stream to the target yet
   163  	require.Equal(t, 0, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound))
   164  
   165  	// Now attempt to create another 100 outbound stream to the same destination by calling CreateStream
   166  	streamCount := 100
   167  	var streams []network.Stream
   168  	for i := 0; i < streamCount; i++ {
   169  		pInfo, err := utils.PeerAddressInfo(*id2)
   170  		require.NoError(t, err)
   171  		nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   172  		anotherStream, err := nodes[0].CreateStream(ctx, pInfo.ID)
   173  		// Assert that a stream was returned without error
   174  		require.NoError(t, err)
   175  		require.NotNil(t, anotherStream)
   176  		// assert that the stream count within libp2p incremented (a new stream was created)
   177  		require.Equal(t, i+1, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound))
   178  		// assert that the same connection is reused
   179  		require.Len(t, nodes[0].Host().Network().Conns(), 1)
   180  		streams = append(streams, anotherStream)
   181  	}
   182  
   183  	// reverse loop to close all the streams
   184  	for i := streamCount - 1; i >= 0; i-- {
   185  		s := streams[i]
   186  		wg := sync.WaitGroup{}
   187  		wg.Add(1)
   188  		go func() {
   189  			err := s.Close()
   190  			assert.NoError(t, err)
   191  			wg.Done()
   192  		}()
   193  		unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not close streams on time")
   194  		// assert that the stream count within libp2p decremented
   195  		require.Equal(t, i, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound))
   196  	}
   197  }
   198  
   199  // TestCreateStream_FallBack checks two libp2p nodes with conflicting supported unicast protocols fall back
   200  // to default (tcp) unicast protocol during their negotiation.
   201  // To do this, a node with preferred gzip-compressed tcp unicast tries creating stream to another node that only
   202  // supports default plain tcp unicast. The test evaluates that the unicast stream established between two nodes
   203  // are of type default plain tcp.
   204  func TestCreateStream_FallBack(t *testing.T) {
   205  	ctx, cancel := context.WithCancel(context.Background())
   206  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   207  
   208  	// Creates two nodes: one with preferred gzip, and other one with default protocol
   209  	sporkId := unittest.IdentifierFixture()
   210  	thisNode, _ := p2ptest.NodeFixture(t,
   211  		sporkId,
   212  		"test_create_stream_fallback",
   213  		p2ptest.WithPreferredUnicasts([]unicast.ProtocolName{unicast.GzipCompressionUnicast}))
   214  	otherNode, otherId := p2ptest.NodeFixture(t, sporkId, "test_create_stream_fallback")
   215  
   216  	nodes := []p2p.LibP2PNode{thisNode, otherNode}
   217  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   218  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   219  
   220  	// Assert that there is no outbound stream to the target yet (neither default nor preferred)
   221  	defaultProtocolId := unicast.FlowProtocolID(sporkId)
   222  	preferredProtocolId := unicast.FlowGzipProtocolId(sporkId)
   223  	require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound))
   224  	require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound))
   225  
   226  	// Now attempt to create another 100 outbound stream to the same destination by calling CreateStream
   227  	streamCount := 100
   228  	var streams []network.Stream
   229  	for i := 0; i < streamCount; i++ {
   230  		pInfo, err := utils.PeerAddressInfo(otherId)
   231  		require.NoError(t, err)
   232  		thisNode.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   233  
   234  		// a new stream must be created
   235  		anotherStream, err := thisNode.CreateStream(ctx, pInfo.ID)
   236  		require.NoError(t, err)
   237  		require.NotNil(t, anotherStream)
   238  
   239  		// number of default-protocol streams must be incremented, while preferred ones must be zero, since the other node
   240  		// only supports default ones.
   241  		require.Equal(t, i+1, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound))
   242  		require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound))
   243  
   244  		// assert that the same connection is reused
   245  		require.Len(t, thisNode.Host().Network().Conns(), 1)
   246  		streams = append(streams, anotherStream)
   247  	}
   248  
   249  	// reverse loop to close all the streams
   250  	for i := streamCount - 1; i >= 0; i-- {
   251  		s := streams[i]
   252  		wg := sync.WaitGroup{}
   253  		wg.Add(1)
   254  		go func() {
   255  			err := s.Close()
   256  			assert.NoError(t, err)
   257  			wg.Done()
   258  		}()
   259  		unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not close streams on time")
   260  
   261  		// number of default-protocol streams must be decremented, while preferred ones must be zero, since the other node
   262  		// only supports default ones.
   263  		require.Equal(t, i, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound))
   264  		require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound))
   265  	}
   266  }
   267  
   268  // TestCreateStreamIsConcurrencySafe tests that the CreateStream is concurrency safe
   269  func TestCreateStreamIsConcurrencySafe(t *testing.T) {
   270  	ctx, cancel := context.WithCancel(context.Background())
   271  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   272  
   273  	// create two nodes
   274  	nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_create_stream_is_concurrency_safe", 2)
   275  	require.Len(t, identities, 2)
   276  
   277  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   278  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   279  
   280  	nodeInfo1, err := utils.PeerAddressInfo(*identities[1])
   281  	require.NoError(t, err)
   282  
   283  	wg := sync.WaitGroup{}
   284  
   285  	// create a gate which gates the call to CreateStream for all concurrent go routines
   286  	gate := make(chan struct{})
   287  
   288  	createStream := func() {
   289  		<-gate
   290  		nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL)
   291  		_, err := nodes[0].CreateStream(ctx, nodeInfo1.ID)
   292  		assert.NoError(t, err) // assert that stream was successfully created
   293  		wg.Done()
   294  	}
   295  
   296  	// kick off 10 concurrent calls to CreateStream
   297  	for i := 0; i < 10; i++ {
   298  		wg.Add(1)
   299  		go createStream()
   300  	}
   301  	// open the gate by closing the channel
   302  	close(gate)
   303  
   304  	// no call should block
   305  	unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second)
   306  }
   307  
   308  // TestNoBackoffWhenCreatingStream checks that backoff is not enabled between attempts to connect to a remote peer
   309  // for one-to-one direct communication.
   310  func TestNoBackoffWhenCreatingStream(t *testing.T) {
   311  	ctx, cancel := context.WithCancel(context.Background())
   312  	defer cancel()
   313  
   314  	// setup per node contexts so they can be stopped independently
   315  	ctx1, cancel1 := context.WithCancel(ctx)
   316  	signalerCtx1 := irrecoverable.NewMockSignalerContext(t, ctx1)
   317  
   318  	ctx2, cancel2 := context.WithCancel(ctx)
   319  	signalerCtx2 := irrecoverable.NewMockSignalerContext(t, ctx2)
   320  
   321  	count := 2
   322  	// Creates nodes
   323  	nodes, identities := p2ptest.NodesFixture(t,
   324  		unittest.IdentifierFixture(),
   325  		"test_no_backoff_when_create_stream",
   326  		count,
   327  	)
   328  	node1 := nodes[0]
   329  	node2 := nodes[1]
   330  
   331  	p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond)
   332  	p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond)
   333  
   334  	// stop node 2 immediately
   335  	p2ptest.StopNode(t, node2, cancel2, 100*time.Millisecond)
   336  	defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond)
   337  
   338  	id2 := identities[1]
   339  	pInfo, err := utils.PeerAddressInfo(*id2)
   340  	require.NoError(t, err)
   341  	nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL)
   342  	maxTimeToWait := p2pnode.MaxConnectAttempt * unicast.MaxConnectAttemptSleepDuration * time.Millisecond
   343  
   344  	// need to add some buffer time so that RequireReturnsBefore waits slightly longer than maxTimeToWait to avoid
   345  	// a race condition
   346  	someGraceTime := 100 * time.Millisecond
   347  	totalWaitTime := maxTimeToWait + someGraceTime
   348  
   349  	//each CreateStream() call may try to connect up to MaxConnectAttempt (3) times.
   350  
   351  	//there are 2 scenarios that we need to account for:
   352  	//
   353  	//1. machines where a timeout occurs on the first connection attempt - this can be due to local firewall rules or other processes running on the machine.
   354  	//   In this case, we need to create a scenario where a backoff would have normally occured. This is why we initiate a second connection attempt.
   355  	//   Libp2p remembers the peer we are trying to connect to between CreateStream() calls and would have initiated a backoff if backoff wasn't turned off.
   356  	//   The second CreateStream() call will make a second connection attempt MaxConnectAttempt times and that should never result in a backoff error.
   357  	//
   358  	//2. machines where a timeout does NOT occur on the first connection attempt - this is on CI machines and some local dev machines without a firewall / too many other processes.
   359  	//   In this case, there will be MaxConnectAttempt (3) connection attempts on the first CreateStream() call and MaxConnectAttempt (3) attempts on the second CreateStream() call.
   360  
   361  	// make two separate stream creation attempt and assert that no connection back off happened
   362  	for i := 0; i < 2; i++ {
   363  
   364  		// limit the maximum amount of time to wait for a connection to be established by using a context that times out
   365  		ctx, cancel := context.WithTimeout(ctx, maxTimeToWait)
   366  
   367  		unittest.RequireReturnsBefore(t, func() {
   368  			_, err = node1.CreateStream(ctx, pInfo.ID)
   369  		}, totalWaitTime, fmt.Sprintf("create stream did not error within %s", totalWaitTime.String()))
   370  		require.Error(t, err)
   371  		require.NotContainsf(t, err.Error(), swarm.ErrDialBackoff.Error(), "swarm dialer unexpectedly did a back off for a one-to-one connection")
   372  		cancel()
   373  	}
   374  }
   375  
   376  // TestUnicastOverStream_WithPlainStream checks two nodes can send and receive unicast messages on libp2p plain streams.
   377  func TestUnicastOverStream_WithPlainStream(t *testing.T) {
   378  	testUnicastOverStream(t)
   379  }
   380  
   381  // TestUnicastOverStream_WithGzipStreamCompression checks two nodes can send and receive unicast messages on gzip compressed streams
   382  // when both nodes have gzip stream compression enabled.
   383  func TestUnicastOverStream_WithGzipStreamCompression(t *testing.T) {
   384  	testUnicastOverStream(t, p2ptest.WithPreferredUnicasts([]unicast.ProtocolName{unicast.GzipCompressionUnicast}))
   385  }
   386  
   387  // testUnicastOverStream sends a message from node 1 to node 2 and then from node 2 to node 1 over a unicast stream.
   388  func testUnicastOverStream(t *testing.T, opts ...p2ptest.NodeFixtureParameterOption) {
   389  	ctx, cancel := context.WithCancel(context.Background())
   390  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   391  
   392  	// Creates nodes
   393  	sporkId := unittest.IdentifierFixture()
   394  
   395  	streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t)
   396  	node1, id1 := p2ptest.NodeFixture(
   397  		t,
   398  		sporkId,
   399  		t.Name(),
   400  		append(opts, p2ptest.WithDefaultStreamHandler(streamHandler1))...)
   401  
   402  	streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t)
   403  	node2, id2 := p2ptest.NodeFixture(
   404  		t,
   405  		sporkId,
   406  		t.Name(),
   407  		append(opts, p2ptest.WithDefaultStreamHandler(streamHandler2))...)
   408  
   409  	nodes := []p2p.LibP2PNode{node1, node2}
   410  	ids := flow.IdentityList{&id1, &id2}
   411  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   412  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   413  
   414  	p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids)
   415  
   416  	p2pfixtures.EnsureMessageExchangeOverUnicast(
   417  		t,
   418  		ctx,
   419  		nodes,
   420  		[]chan string{inbound1, inbound2},
   421  		p2pfixtures.LongStringMessageFactoryFixture(t))
   422  }
   423  
   424  // TestUnicastOverStream_Fallback checks two nodes with asymmetric sets of preferred unicast protocols can create streams and
   425  // send and receive unicasts. Despite the asymmetry, the nodes must fall back to the libp2p plain stream during negotiation.
   426  func TestUnicastOverStream_Fallback(t *testing.T) {
   427  	ctx, cancel := context.WithCancel(context.Background())
   428  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   429  
   430  	// Creates nodes
   431  	// node1: supports only plain unicast protocol
   432  	// node2: supports plain and gzip
   433  	sporkId := unittest.IdentifierFixture()
   434  
   435  	streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t)
   436  	node1, id1 := p2ptest.NodeFixture(
   437  		t,
   438  		sporkId,
   439  		t.Name(),
   440  		p2ptest.WithDefaultStreamHandler(streamHandler1),
   441  	)
   442  
   443  	streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t)
   444  	node2, id2 := p2ptest.NodeFixture(
   445  		t,
   446  		sporkId,
   447  		t.Name(),
   448  		p2ptest.WithDefaultStreamHandler(streamHandler2),
   449  		p2ptest.WithPreferredUnicasts([]unicast.ProtocolName{unicast.GzipCompressionUnicast}),
   450  	)
   451  
   452  	nodes := []p2p.LibP2PNode{node1, node2}
   453  	ids := flow.IdentityList{&id1, &id2}
   454  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   455  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   456  
   457  	p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids)
   458  	p2pfixtures.EnsureMessageExchangeOverUnicast(t, ctx, nodes, []chan string{inbound1, inbound2}, p2pfixtures.LongStringMessageFactoryFixture(t))
   459  }
   460  
   461  // TestCreateStreamTimeoutWithUnresponsiveNode tests that the CreateStream call does not block longer than the
   462  // timeout interval
   463  func TestCreateStreamTimeoutWithUnresponsiveNode(t *testing.T) {
   464  	ctx, cancel := context.WithCancel(context.Background())
   465  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   466  
   467  	// creates a regular node
   468  	nodes, identities := p2ptest.NodesFixture(t,
   469  		unittest.IdentifierFixture(),
   470  		"test_create_stream_timeout_with_unresponsive_node",
   471  		1,
   472  	)
   473  	require.Len(t, identities, 1)
   474  
   475  	p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond)
   476  	defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond)
   477  
   478  	// create a silent node which never replies
   479  	listener, silentNodeId := p2pfixtures.SilentNodeFixture(t)
   480  	defer func() {
   481  		require.NoError(t, listener.Close())
   482  	}()
   483  
   484  	silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId)
   485  	require.NoError(t, err)
   486  
   487  	timeout := 1 * time.Second
   488  	tctx, tcancel := context.WithTimeout(ctx, timeout)
   489  	defer tcancel()
   490  
   491  	// attempt to create a stream from node 1 to node 2 and assert that it fails after timeout
   492  	grace := 100 * time.Millisecond
   493  	unittest.AssertReturnsBefore(t,
   494  		func() {
   495  			nodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL)
   496  			_, err = nodes[0].CreateStream(tctx, silentNodeInfo.ID)
   497  		},
   498  		timeout+grace)
   499  	assert.Error(t, err)
   500  }
   501  
   502  // TestCreateStreamIsConcurrent tests that CreateStream calls can be made concurrently such that one blocked call
   503  // does not block another concurrent call.
   504  func TestCreateStreamIsConcurrent(t *testing.T) {
   505  	ctx, cancel := context.WithCancel(context.Background())
   506  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   507  
   508  	// create two regular node
   509  	goodNodes, goodNodeIds := p2ptest.NodesFixture(t,
   510  		unittest.IdentifierFixture(),
   511  		"test_create_stream_is_concurrent",
   512  		2,
   513  	)
   514  	require.Len(t, goodNodeIds, 2)
   515  
   516  	p2ptest.StartNodes(t, signalerCtx, goodNodes, 100*time.Millisecond)
   517  	defer p2ptest.StopNodes(t, goodNodes, cancel, 100*time.Millisecond)
   518  
   519  	goodNodeInfo1, err := utils.PeerAddressInfo(*goodNodeIds[1])
   520  	require.NoError(t, err)
   521  
   522  	// create a silent node which never replies
   523  	listener, silentNodeId := p2pfixtures.SilentNodeFixture(t)
   524  	defer func() {
   525  		require.NoError(t, listener.Close())
   526  	}()
   527  	silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId)
   528  	require.NoError(t, err)
   529  
   530  	// creates a stream to unresponsive node and makes sure that the stream creation is blocked
   531  	blockedCallCh := unittest.RequireNeverReturnBefore(t,
   532  		func() {
   533  			goodNodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL)
   534  			_, _ = goodNodes[0].CreateStream(ctx, silentNodeInfo.ID) // this call will block
   535  		},
   536  		1*time.Second,
   537  		"CreateStream attempt to the unresponsive peer did not block")
   538  
   539  	// requires same peer can still connect to the other regular peer without being blocked
   540  	unittest.RequireReturnsBefore(t,
   541  		func() {
   542  			goodNodes[0].Host().Peerstore().AddAddrs(goodNodeInfo1.ID, goodNodeInfo1.Addrs, peerstore.AddressTTL)
   543  			_, err := goodNodes[0].CreateStream(ctx, goodNodeInfo1.ID)
   544  			require.NoError(t, err)
   545  		},
   546  		1*time.Second, "creating stream to a responsive node failed while concurrently blocked on unresponsive node")
   547  
   548  	// requires the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the
   549  	// good address
   550  	unittest.RequireNeverClosedWithin(t, blockedCallCh, 1*time.Millisecond,
   551  		"CreateStream attempt to the unresponsive peer did not block after connecting to good node")
   552  
   553  }