github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/tracer/internal/rpc_sent_tracker_test.go (about)

     1  package internal
     2  
     3  import (
     4  	"context"
     5  	"os"
     6  	"testing"
     7  	"time"
     8  
     9  	pubsub "github.com/libp2p/go-libp2p-pubsub"
    10  	pb "github.com/libp2p/go-libp2p-pubsub/pb"
    11  	"github.com/rs/zerolog"
    12  	"github.com/stretchr/testify/require"
    13  	"go.uber.org/atomic"
    14  
    15  	"github.com/onflow/flow-go/config"
    16  	"github.com/onflow/flow-go/module/irrecoverable"
    17  	"github.com/onflow/flow-go/module/metrics"
    18  	"github.com/onflow/flow-go/utils/unittest"
    19  )
    20  
    21  // TestNewRPCSentTracker ensures *RPCSenTracker is created as expected.
    22  func TestNewRPCSentTracker(t *testing.T) {
    23  	tracker := mockTracker(t, time.Minute)
    24  	require.NotNil(t, tracker)
    25  }
    26  
    27  // TestRPCSentTracker_IHave ensures *RPCSentTracker tracks sent iHave control messages as expected.
    28  func TestRPCSentTracker_IHave(t *testing.T) {
    29  	ctx, cancel := context.WithCancel(context.Background())
    30  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    31  
    32  	tracker := mockTracker(t, time.Minute)
    33  	require.NotNil(t, tracker)
    34  
    35  	tracker.Start(signalerCtx)
    36  	defer func() {
    37  		cancel()
    38  		unittest.RequireComponentsDoneBefore(t, time.Second, tracker)
    39  	}()
    40  
    41  	t.Run("WasIHaveRPCSent should return false for iHave message Id that has not been tracked", func(t *testing.T) {
    42  		require.False(t, tracker.WasIHaveRPCSent("message_id"))
    43  	})
    44  
    45  	t.Run("WasIHaveRPCSent should return true for iHave message after it is tracked with iHaveRPCSent", func(t *testing.T) {
    46  		numOfMsgIds := 100
    47  		testCases := []struct {
    48  			messageIDS []string
    49  		}{
    50  			{unittest.IdentifierListFixture(numOfMsgIds).Strings()},
    51  			{unittest.IdentifierListFixture(numOfMsgIds).Strings()},
    52  			{unittest.IdentifierListFixture(numOfMsgIds).Strings()},
    53  			{unittest.IdentifierListFixture(numOfMsgIds).Strings()},
    54  		}
    55  		iHaves := make([]*pb.ControlIHave, len(testCases))
    56  		for i, testCase := range testCases {
    57  			testCase := testCase
    58  			iHaves[i] = &pb.ControlIHave{
    59  				MessageIDs: testCase.messageIDS,
    60  			}
    61  		}
    62  		rpc := rpcFixture(withIhaves(iHaves))
    63  		require.NoError(t, tracker.Track(rpc))
    64  
    65  		// eventually we should have tracked numOfMsgIds per single topic
    66  		require.Eventually(t, func() bool {
    67  			return tracker.cache.size() == uint(len(testCases)*numOfMsgIds)
    68  		}, time.Second, 100*time.Millisecond)
    69  
    70  		for _, testCase := range testCases {
    71  			for _, messageID := range testCase.messageIDS {
    72  				require.True(t, tracker.WasIHaveRPCSent(messageID))
    73  			}
    74  		}
    75  	})
    76  
    77  }
    78  
    79  // TestRPCSentTracker_DuplicateMessageID ensures the worker pool of the RPC tracker processes req with the same message ID but different nonce.
    80  func TestRPCSentTracker_DuplicateMessageID(t *testing.T) {
    81  	ctx, cancel := context.WithCancel(context.Background())
    82  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    83  
    84  	processedWorkLogs := atomic.NewInt64(0)
    85  	hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) {
    86  		if level == zerolog.DebugLevel {
    87  			if message == iHaveRPCTrackedLog {
    88  				processedWorkLogs.Inc()
    89  			}
    90  		}
    91  	})
    92  	logger := zerolog.New(os.Stdout).Level(zerolog.DebugLevel).Hook(hook)
    93  
    94  	tracker := mockTracker(t, time.Minute)
    95  	require.NotNil(t, tracker)
    96  	tracker.logger = logger
    97  	tracker.Start(signalerCtx)
    98  	defer func() {
    99  		cancel()
   100  		unittest.RequireComponentsDoneBefore(t, time.Second, tracker)
   101  	}()
   102  
   103  	messageID := unittest.IdentifierFixture().String()
   104  	rpc := rpcFixture(withIhaves([]*pb.ControlIHave{{
   105  		MessageIDs: []string{messageID},
   106  	}}))
   107  	// track duplicate RPC's each will be processed by a worker
   108  	require.NoError(t, tracker.Track(rpc))
   109  	require.NoError(t, tracker.Track(rpc))
   110  
   111  	// eventually we should have processed both RPCs
   112  	require.Eventually(t, func() bool {
   113  		return processedWorkLogs.Load() == 2
   114  	}, time.Second, 100*time.Millisecond)
   115  }
   116  
   117  // TestRPCSentTracker_ConcurrentTracking ensures that all message IDs in RPC's are tracked as expected when tracked concurrently.
   118  func TestRPCSentTracker_ConcurrentTracking(t *testing.T) {
   119  	ctx, cancel := context.WithCancel(context.Background())
   120  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   121  
   122  	tracker := mockTracker(t, time.Minute)
   123  	require.NotNil(t, tracker)
   124  
   125  	tracker.Start(signalerCtx)
   126  	defer func() {
   127  		cancel()
   128  		unittest.RequireComponentsDoneBefore(t, time.Second, tracker)
   129  	}()
   130  
   131  	numOfMsgIds := 100
   132  	numOfRPCs := 100
   133  	rpcs := make([]*pubsub.RPC, numOfRPCs)
   134  	for i := 0; i < numOfRPCs; i++ {
   135  		i := i
   136  		go func() {
   137  			rpc := rpcFixture(withIhaves([]*pb.ControlIHave{{MessageIDs: unittest.IdentifierListFixture(numOfMsgIds).Strings()}}))
   138  			require.NoError(t, tracker.Track(rpc))
   139  			rpcs[i] = rpc
   140  		}()
   141  	}
   142  
   143  	// eventually we should have tracked numOfMsgIds per single topic
   144  	require.Eventually(t, func() bool {
   145  		return tracker.cache.size() == uint(numOfRPCs*numOfMsgIds)
   146  	}, time.Second, 100*time.Millisecond)
   147  
   148  	for _, rpc := range rpcs {
   149  		ihaves := rpc.GetControl().GetIhave()
   150  		for _, messageID := range ihaves[0].GetMessageIDs() {
   151  			require.True(t, tracker.WasIHaveRPCSent(messageID))
   152  		}
   153  	}
   154  }
   155  
   156  // TestRPCSentTracker_IHave ensures *RPCSentTracker tracks the last largest iHave size as expected.
   157  func TestRPCSentTracker_LastHighestIHaveRPCSize(t *testing.T) {
   158  	ctx, cancel := context.WithCancel(context.Background())
   159  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   160  
   161  	tracker := mockTracker(t, 3*time.Second)
   162  	require.NotNil(t, tracker)
   163  
   164  	tracker.Start(signalerCtx)
   165  	defer func() {
   166  		cancel()
   167  		unittest.RequireComponentsDoneBefore(t, time.Second, tracker)
   168  	}()
   169  
   170  	expectedLastHighestSize := 1000
   171  	// adding a single message ID to the iHave enables us to track the expected cache size by the amount of iHaves.
   172  	numOfMessageIds := 1
   173  	testCases := []struct {
   174  		rpcFixture  *pubsub.RPC
   175  		numOfIhaves int
   176  	}{
   177  		{rpcFixture(withIhaves(mockIHaveFixture(10, numOfMessageIds))), 10},
   178  		{rpcFixture(withIhaves(mockIHaveFixture(100, numOfMessageIds))), 100},
   179  		{rpcFixture(withIhaves(mockIHaveFixture(expectedLastHighestSize, numOfMessageIds))), expectedLastHighestSize},
   180  		{rpcFixture(withIhaves(mockIHaveFixture(999, numOfMessageIds))), 999},
   181  		{rpcFixture(withIhaves(mockIHaveFixture(23, numOfMessageIds))), 23},
   182  	}
   183  
   184  	expectedCacheSize := 0
   185  	for _, testCase := range testCases {
   186  		require.NoError(t, tracker.Track(testCase.rpcFixture))
   187  		expectedCacheSize += testCase.numOfIhaves
   188  	}
   189  
   190  	// eventually we should have tracked numOfMsgIds per single topic
   191  	require.Eventually(t, func() bool {
   192  		return tracker.cache.size() == uint(expectedCacheSize)
   193  	}, time.Second, 100*time.Millisecond)
   194  
   195  	require.Equal(t, int64(expectedLastHighestSize), tracker.LastHighestIHaveRPCSize())
   196  
   197  	// after setting sending large RPC lastHighestIHaveRPCSize should reset to 0 after lastHighestIHaveRPCSize reset loop tick
   198  	largeIhave := 50000
   199  	require.NoError(t, tracker.Track(rpcFixture(withIhaves(mockIHaveFixture(largeIhave, numOfMessageIds)))))
   200  	require.Eventually(t, func() bool {
   201  		return tracker.LastHighestIHaveRPCSize() == int64(largeIhave)
   202  	}, 1*time.Second, 100*time.Millisecond)
   203  
   204  	// we expect lastHighestIHaveRPCSize to be set to the current rpc size being tracked if it hasn't been updated since the configured lastHighestIHaveRPCSizeResetInterval
   205  	expectedEventualLastHighest := 8
   206  	require.Eventually(t, func() bool {
   207  		require.NoError(t, tracker.Track(rpcFixture(withIhaves(mockIHaveFixture(expectedEventualLastHighest, numOfMessageIds)))))
   208  		return tracker.LastHighestIHaveRPCSize() == int64(expectedEventualLastHighest)
   209  	}, 4*time.Second, 100*time.Millisecond)
   210  }
   211  
   212  // mockIHaveFixture generate list of iHaves of size n. Each iHave will be created with m number of random message ids.
   213  func mockIHaveFixture(n, m int) []*pb.ControlIHave {
   214  	iHaves := make([]*pb.ControlIHave, n)
   215  	for i := 0; i < n; i++ {
   216  		// topic does not have to be a valid flow topic, for teting purposes we can use a random string
   217  		topic := unittest.IdentifierFixture().String()
   218  		iHaves[i] = &pb.ControlIHave{
   219  			TopicID:    &topic,
   220  			MessageIDs: unittest.IdentifierListFixture(m).Strings(),
   221  		}
   222  	}
   223  	return iHaves
   224  }
   225  
   226  func mockTracker(t *testing.T, lastHighestIhavesSentResetInterval time.Duration) *RPCSentTracker {
   227  	cfg, err := config.DefaultConfig()
   228  	require.NoError(t, err)
   229  	tracker := NewRPCSentTracker(&RPCSentTrackerConfig{
   230  		Logger:                             zerolog.Nop(),
   231  		RPCSentCacheSize:                   cfg.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerCacheSize,
   232  		RPCSentCacheCollector:              metrics.NewNoopCollector(),
   233  		WorkerQueueCacheCollector:          metrics.NewNoopCollector(),
   234  		WorkerQueueCacheSize:               cfg.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerQueueCacheSize,
   235  		NumOfWorkers:                       1,
   236  		LastHighestIhavesSentResetInterval: lastHighestIhavesSentResetInterval,
   237  	})
   238  	return tracker
   239  }
   240  
   241  type rpcFixtureOpt func(*pubsub.RPC)
   242  
   243  func withIhaves(iHave []*pb.ControlIHave) rpcFixtureOpt {
   244  	return func(rpc *pubsub.RPC) {
   245  		rpc.Control.Ihave = iHave
   246  	}
   247  }
   248  
   249  func rpcFixture(opts ...rpcFixtureOpt) *pubsub.RPC {
   250  	rpc := &pubsub.RPC{
   251  		RPC: pb.RPC{
   252  			Control: &pb.ControlMessage{},
   253  		},
   254  	}
   255  	for _, opt := range opts {
   256  		opt(rpc)
   257  	}
   258  	return rpc
   259  }