github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/utils/ratelimiter/internal/rate_limiter_map_test.go (about)

     1  package internal_test
     2  
     3  import (
     4  	"context"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/libp2p/go-libp2p/core/peer"
     9  	"github.com/stretchr/testify/require"
    10  	"golang.org/x/time/rate"
    11  
    12  	"github.com/onflow/flow-go/module/irrecoverable"
    13  	"github.com/onflow/flow-go/network/p2p/utils/ratelimiter/internal"
    14  )
    15  
    16  // TestLimiterMap_get checks true is returned for stored items and false for missing items.
    17  func TestLimiterMap_get(t *testing.T) {
    18  	m := internal.NewLimiterMap(time.Second, time.Second)
    19  	peerID := peer.ID("id")
    20  	m.Store(peerID, rate.NewLimiter(0, 0))
    21  
    22  	_, ok := m.Get(peerID)
    23  	require.True(t, ok)
    24  	_, ok = m.Get("fake")
    25  	require.False(t, ok)
    26  }
    27  
    28  // TestLimiterMap_remove checks the map removes keys as expected.
    29  func TestLimiterMap_remove(t *testing.T) {
    30  	m := internal.NewLimiterMap(time.Second, time.Second)
    31  	peerID := peer.ID("id")
    32  	m.Store(peerID, rate.NewLimiter(0, 0))
    33  
    34  	_, ok := m.Get(peerID)
    35  	require.True(t, ok)
    36  
    37  	m.Remove(peerID)
    38  	_, ok = m.Get(peerID)
    39  	require.False(t, ok)
    40  }
    41  
    42  // TestLimiterMap_cleanup checks the map removes expired keys as expected.
    43  func TestLimiterMap_cleanup(t *testing.T) {
    44  	// set fake ttl to 10 minutes
    45  	ttl := 10 * time.Minute
    46  
    47  	// set short tick to kick off cleanup
    48  	tick := 10 * time.Millisecond
    49  
    50  	m := internal.NewLimiterMap(ttl, tick)
    51  
    52  	start := time.Now()
    53  
    54  	// Store some peerID's
    55  	peerID1 := peer.ID("id1")
    56  	m.Store(peerID1, rate.NewLimiter(0, 0))
    57  
    58  	peerID2 := peer.ID("id2")
    59  	m.Store(peerID2, rate.NewLimiter(0, 0))
    60  
    61  	peerID3 := peer.ID("id3")
    62  	m.Store(peerID3, rate.NewLimiter(0, 0))
    63  
    64  	// manually set lastAccessed on 2 items so that they are removed during Cleanup
    65  	limiter, _ := m.Get(peerID1)
    66  	limiter.SetLastAccessed(start.Add(-10 * time.Minute))
    67  
    68  	limiter, _ = m.Get(peerID2)
    69  	limiter.SetLastAccessed(start.Add(-10 * time.Minute))
    70  
    71  	limiter, _ = m.Get(peerID3)
    72  	limiter.SetLastAccessed(start.Add(-20 * time.Minute))
    73  
    74  	ctx, cancel := context.WithCancel(context.Background())
    75  	defer cancel()
    76  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
    77  
    78  	// kick off clean up process, tick should happen immediately
    79  	go m.CleanupLoop(signalerCtx)
    80  	time.Sleep(100 * time.Millisecond)
    81  	_, ok := m.Get(peerID1)
    82  	require.False(t, ok)
    83  	_, ok = m.Get(peerID2)
    84  	require.False(t, ok)
    85  	_, ok = m.Get(peerID3)
    86  	require.False(t, ok)
    87  }
    88  
    89  // TestLimiterMap_cleanupLoopCtxCanceled checks that the Cleanup loop runs when ctx is canceled before cleanup loop exits.
    90  func TestLimiterMap_cleanupLoopCtxCanceled(t *testing.T) {
    91  	// set fake ttl to 10 minutes
    92  	ttl := 10 * time.Minute
    93  
    94  	// set long tick so that clean up is only done when ctx is canceled
    95  	tick := time.Hour
    96  
    97  	m := internal.NewLimiterMap(ttl, tick)
    98  
    99  	start := time.Now()
   100  
   101  	// Store some peerID's
   102  	peerID1 := peer.ID("id1")
   103  	m.Store(peerID1, rate.NewLimiter(0, 0))
   104  
   105  	peerID2 := peer.ID("id2")
   106  	m.Store(peerID2, rate.NewLimiter(0, 0))
   107  
   108  	peerID3 := peer.ID("id3")
   109  	m.Store(peerID3, rate.NewLimiter(0, 0))
   110  
   111  	// manually set lastAccessed on 2 items so that they are removed during Cleanup
   112  	limiter, _ := m.Get(peerID1)
   113  	limiter.SetLastAccessed(start.Add(-10 * time.Minute))
   114  
   115  	limiter, _ = m.Get(peerID2)
   116  	limiter.SetLastAccessed(start.Add(-10 * time.Minute))
   117  
   118  	limiter, _ = m.Get(peerID3)
   119  	limiter.SetLastAccessed(start.Add(-20 * time.Minute))
   120  
   121  	ctx, cancel := context.WithCancel(context.Background())
   122  	signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx)
   123  
   124  	// kick off clean up loop
   125  	go m.CleanupLoop(signalerCtx)
   126  
   127  	// clean up should be kicked off when SignalerContext is canceled
   128  	cancel()
   129  	// sleep for 100ms
   130  	time.Sleep(100 * time.Millisecond)
   131  	_, ok := m.Get(peerID1)
   132  	require.False(t, ok)
   133  	_, ok = m.Get(peerID2)
   134  	require.False(t, ok)
   135  	_, ok = m.Get(peerID3)
   136  	require.False(t, ok)
   137  }