github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/connection/connManager_test.go (about)

     1  package connection_test
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/libp2p/go-libp2p/core/peer"
    11  	"github.com/rs/zerolog"
    12  	"github.com/stretchr/testify/require"
    13  
    14  	"github.com/onflow/flow-go/config"
    15  	"github.com/onflow/flow-go/model/flow"
    16  	"github.com/onflow/flow-go/module/irrecoverable"
    17  	"github.com/onflow/flow-go/module/metrics"
    18  	"github.com/onflow/flow-go/network/internal/p2pfixtures"
    19  	"github.com/onflow/flow-go/network/netconf"
    20  	"github.com/onflow/flow-go/network/p2p/connection"
    21  	p2ptest "github.com/onflow/flow-go/network/p2p/test"
    22  	"github.com/onflow/flow-go/network/p2p/utils"
    23  	"github.com/onflow/flow-go/utils/unittest"
    24  )
    25  
    26  const (
    27  	protectF     = "protect"
    28  	unprotectF   = "unprotect"
    29  	isProtectedF = "isprotected"
    30  )
    31  
    32  type fun struct {
    33  	funName     string
    34  	expectation bool
    35  }
    36  
    37  var protect = fun{
    38  	protectF,
    39  	false,
    40  }
    41  var unprotect = fun{
    42  	unprotectF,
    43  	false,
    44  }
    45  var isProtected = fun{
    46  	isProtectedF,
    47  	true,
    48  }
    49  var isNotProtected = fun{
    50  	isProtectedF,
    51  	false,
    52  }
    53  
    54  // TestConnectionManagerProtection tests that multiple protected and unprotected calls result in the correct IsProtected
    55  // status for a peer ID
    56  func TestConnectionManagerProtection(t *testing.T) {
    57  
    58  	log := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel)
    59  	flowConfig, err := config.DefaultConfig()
    60  	require.NoError(t, err)
    61  	noopMetrics := metrics.NewNoopCollector()
    62  	connManager, err := connection.NewConnManager(log, noopMetrics, &flowConfig.NetworkConfig.ConnectionManager)
    63  	require.NoError(t, err)
    64  
    65  	testCases := [][]fun{
    66  		// single stream created on a connection
    67  		{protect, isProtected, unprotect, isNotProtected},
    68  		// two streams created on a connection at the same time
    69  		{protect, protect, unprotect, isNotProtected, unprotect, isNotProtected},
    70  		// two streams created on a connection one after another
    71  		{protect, unprotect, isNotProtected, protect, unprotect, isNotProtected},
    72  	}
    73  
    74  	for _, testCase := range testCases {
    75  		testSequence(t, testCase, connManager)
    76  	}
    77  }
    78  
    79  func testSequence(t *testing.T, sequence []fun, connMgr *connection.ConnManager) {
    80  	pID := generatePeerInfo(t)
    81  	for _, s := range sequence {
    82  		switch s.funName {
    83  		case protectF:
    84  			connMgr.Protect(pID, "global")
    85  		case unprotectF:
    86  			connMgr.Unprotect(pID, "global")
    87  		case isProtectedF:
    88  			require.Equal(t, connMgr.IsProtected(pID, ""), s.expectation, fmt.Sprintf("failed sequence: %v", sequence))
    89  		}
    90  	}
    91  }
    92  
    93  func generatePeerInfo(t *testing.T) peer.ID {
    94  	key := p2pfixtures.NetworkingKeyFixtures(t)
    95  	identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0"))
    96  	pInfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton)
    97  	require.NoError(t, err)
    98  	return pInfo.ID
    99  }
   100  
   101  // TestConnectionManager_Watermarking tests that the connection manager prunes connections when the number of connections
   102  // exceeds the high watermark and that it does not prune connections when the number of connections is below the low watermark.
   103  func TestConnectionManager_Watermarking(t *testing.T) {
   104  	sporkId := unittest.IdentifierFixture()
   105  	ctx, cancel := context.WithCancel(context.Background())
   106  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   107  	defer cancel()
   108  
   109  	cfg := &netconf.ConnectionManager{
   110  		HighWatermark: 4,                      // whenever the number of connections exceeds 4, connection manager prune connections.
   111  		LowWatermark:  2,                      // connection manager prune connections until the number of connections is 2.
   112  		GracePeriod:   500 * time.Millisecond, // extra connections will be pruned if they are older than a second (just for testing).
   113  		SilencePeriod: time.Second,            // connection manager prune checking kicks in every 5 seconds (just for testing).
   114  	}
   115  	thisConnMgr, err := connection.NewConnManager(
   116  		unittest.Logger(),
   117  		metrics.NewNoopCollector(),
   118  		cfg)
   119  	require.NoError(t, err)
   120  	idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{})
   121  	thisNode, identity := p2ptest.NodeFixture(
   122  		t,
   123  		sporkId,
   124  		t.Name(),
   125  		idProvider,
   126  		p2ptest.WithConnectionManager(thisConnMgr))
   127  	idProvider.SetIdentities(flow.IdentityList{&identity})
   128  
   129  	otherNodes, _ := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, idProvider)
   130  
   131  	nodes := append(otherNodes, thisNode)
   132  
   133  	p2ptest.StartNodes(t, signalerCtx, nodes)
   134  	defer p2ptest.StopNodes(t, nodes, cancel)
   135  
   136  	// connect this node to all other nodes.
   137  	for _, otherNode := range otherNodes {
   138  		require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.ID())))
   139  	}
   140  
   141  	// ensures this node is connected to all other nodes (based on the number of connections).
   142  	require.Eventuallyf(t, func() bool {
   143  		return len(thisNode.Host().Network().Conns()) == len(otherNodes)
   144  	}, 1*time.Second, 100*time.Millisecond, "expected %d connections, got %d", len(otherNodes), len(thisNode.Host().Network().Conns()))
   145  
   146  	// wait for grace period to expire and connection manager kick in as the number of connections is beyond high watermark.
   147  	time.Sleep(time.Second)
   148  
   149  	// ensures that eventually connection manager closes connections till the low watermark is reached.
   150  	require.Eventuallyf(t, func() bool {
   151  		return len(thisNode.Host().Network().Conns()) == cfg.LowWatermark
   152  	}, 1*time.Second, 100*time.Millisecond, "expected %d connections, got %d", cfg.LowWatermark, len(thisNode.Host().Network().Conns()))
   153  
   154  	// connects this node to one of the other nodes that is pruned by connection manager.
   155  	for _, otherNode := range otherNodes {
   156  		if len(thisNode.Host().Network().ConnsToPeer(otherNode.ID())) == 0 {
   157  			require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.ID())))
   158  			break // we only need to connect to one node.
   159  		}
   160  	}
   161  
   162  	// wait for another grace period to expire and connection manager kick in.
   163  	time.Sleep(time.Second)
   164  
   165  	// ensures that connection manager does not close any connections as the number of connections is below low watermark.
   166  	require.Eventuallyf(t, func() bool {
   167  		return len(thisNode.Host().Network().Conns()) == cfg.LowWatermark+1
   168  	}, 1*time.Second, 100*time.Millisecond, "expected %d connections, got %d", cfg.LowWatermark+1, len(thisNode.Host().Network().Conns()))
   169  }