github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/p2p/tracer/internal/duplicate_msgs_counter_cache_test.go (about) 1 package internal 2 3 import ( 4 "sync" 5 "testing" 6 "time" 7 8 "github.com/libp2p/go-libp2p/core/peer" 9 "github.com/rs/zerolog" 10 "github.com/stretchr/testify/require" 11 12 "github.com/onflow/flow-go/module" 13 "github.com/onflow/flow-go/module/metrics" 14 "github.com/onflow/flow-go/utils/unittest" 15 ) 16 17 const defaultDecay = .99 18 const defaultSkipDecayThreshold = 0.1 19 20 // TestDuplicateMessageTrackerCache_Init tests the Init method of the RecordCache. 21 // It ensures that the method returns true when a new record is initialized 22 // and false when an existing record is initialized. 23 func TestDuplicateMessageTrackerCache_Init(t *testing.T) { 24 cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 25 26 peerID1 := unittest.PeerIdFixture(t) 27 peerID2 := unittest.PeerIdFixture(t) 28 29 // test initializing a record for an node ID that doesn't exist in the cache 30 gauge, ok, err := cache.GetWithInit(peerID1) 31 require.NoError(t, err) 32 require.True(t, ok, "expected record to exist") 33 require.Zerof(t, gauge, "expected gauge to be 0") 34 require.Equal(t, uint(1), cache.c.Size(), "expected cache to have one additional record") 35 36 // test initializing a record for an node ID that already exists in the cache 37 gaugeAgain, ok, err := cache.GetWithInit(peerID1) 38 require.NoError(t, err) 39 require.True(t, ok, "expected record to still exist") 40 require.Zerof(t, gaugeAgain, "expected same gauge to be 0") 41 require.Equal(t, gauge, gaugeAgain, "expected records to be the same") 42 require.Equal(t, uint(1), cache.c.Size(), "expected cache to still have one additional record") 43 44 // test initializing a record for another node ID 45 gauge2, ok, err := cache.GetWithInit(peerID2) 46 require.NoError(t, err) 47 require.True(t, ok, "expected record to exist") 48 require.Zerof(t, gauge2, "expected second gauge to be 0") 49 require.Equal(t, uint(2), cache.c.Size(), "expected cache to have two additional records") 50 } 51 52 // TestDuplicateMessageTrackerCache_ConcurrentInit tests the concurrent initialization of records. 53 // The test covers the following scenarios: 54 // 1. Multiple goroutines initializing records for different node IDs. 55 // 2. Ensuring that all records are correctly initialized. 56 func TestDuplicateMessageTrackerCache_ConcurrentInit(t *testing.T) { 57 cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 58 59 peerIDs := unittest.PeerIdFixtures(t, 10) 60 61 var wg sync.WaitGroup 62 wg.Add(len(peerIDs)) 63 64 for _, peerID := range peerIDs { 65 go func(id peer.ID) { 66 defer wg.Done() 67 gauge, found, err := cache.GetWithInit(id) 68 require.NoError(t, err) 69 require.True(t, found) 70 require.Zerof(t, gauge, "expected all gauge values to be initialized to 0") 71 }(peerID) 72 } 73 74 unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") 75 } 76 77 // TestDuplicateMessageTrackerCache_ConcurrentSameRecordInit tests the concurrent initialization of the same record. 78 // The test covers the following scenarios: 79 // 1. Multiple goroutines attempting to initialize the same record concurrently. 80 // 2. Only one goroutine successfully initializes the record, and others receive false on initialization. 81 // 3. The record is correctly initialized in the cache and can be retrieved using the GetWithInit method. 82 func TestDuplicateMessageTrackerCache_ConcurrentSameRecordInit(t *testing.T) { 83 cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 84 85 peerID := unittest.PeerIdFixture(t) 86 const concurrentAttempts = 10 87 88 var wg sync.WaitGroup 89 wg.Add(concurrentAttempts) 90 91 for i := 0; i < concurrentAttempts; i++ { 92 go func() { 93 defer wg.Done() 94 gauge, found, err := cache.GetWithInit(peerID) 95 require.NoError(t, err) 96 require.True(t, found) 97 require.Zero(t, gauge) 98 }() 99 } 100 101 unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") 102 103 // ensure that only one goroutine successfully initialized the record 104 require.Equal(t, uint(1), cache.c.Size()) 105 } 106 107 // TestDuplicateMessageTrackerCache_DuplicateMessageReceived tests the DuplicateMessageReceived method of the RecordCache. 108 // The test covers the following scenarios: 109 // 1. Updating a record gauge for an existing peer ID. 110 // 2. Attempting to update a record gauge for a non-existing peer ID should not result in error. DuplicateMessageReceived should always attempt to initialize the gauge. 111 // 3. Multiple updates on the same record only initialize the record once. 112 func TestDuplicateMessageTrackerCache_DuplicateMessageReceived(t *testing.T) { 113 cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 114 115 peerID1 := unittest.PeerIdFixture(t) 116 peerID2 := unittest.PeerIdFixture(t) 117 118 gauge, err := cache.DuplicateMessageReceived(peerID1) 119 require.NoError(t, err) 120 require.Equal(t, float64(1), gauge) 121 122 // get will apply a slightl decay resulting 123 // in a gauge value less than gauge which is 1 but greater than 0.9 124 currentGauge, ok, err := cache.GetWithInit(peerID1) 125 require.NoError(t, err) 126 require.True(t, ok) 127 require.LessOrEqual(t, currentGauge, gauge) 128 require.Greater(t, currentGauge, 0.9) 129 130 _, ok, err = cache.GetWithInit(peerID2) 131 require.NoError(t, err) 132 require.True(t, ok) 133 134 // test adjusting the spam record for a non-existing node ID 135 peerID3 := unittest.PeerIdFixture(t) 136 gauge3, err := cache.DuplicateMessageReceived(peerID3) 137 require.NoError(t, err) 138 require.Equal(t, float64(1), gauge3) 139 140 // when updated the value should be incremented from 1 -> 2 and slightly decayed resulting 141 // in a gauge value less than 2 but greater than 1.9 142 gauge3, err = cache.DuplicateMessageReceived(peerID3) 143 require.NoError(t, err) 144 require.LessOrEqual(t, gauge3, 2.0) 145 require.Greater(t, gauge3, 1.9) 146 } 147 148 // TestDuplicateMessageTrackerCache_ConcurrentDuplicateMessageReceived tests the concurrent adjustments and reads of records for different 149 // node IDs. The test covers the following scenarios: 150 // 1. Multiple goroutines adjusting records for different peer IDs concurrently. 151 // 2. Multiple goroutines getting records for different peer IDs concurrently. 152 // 3. The adjusted records are correctly updated in the cache. 153 // 4. Ensure records are decayed as expected. 154 func TestDuplicateMessageTrackerCache_ConcurrentDuplicateMessageReceived(t *testing.T) { 155 cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 156 157 peerIDs := unittest.PeerIdFixtures(t, 10) 158 for _, peerID := range peerIDs { 159 _, ok, err := cache.GetWithInit(peerID) 160 require.NoError(t, err) 161 require.True(t, ok) 162 } 163 164 var wg sync.WaitGroup 165 wg.Add(len(peerIDs) * 2) 166 167 for _, peerID := range peerIDs { 168 // adjust spam records concurrently 169 go func(id peer.ID) { 170 defer wg.Done() 171 _, err := cache.DuplicateMessageReceived(id) 172 require.NoError(t, err) 173 }(peerID) 174 175 // get spam records concurrently 176 go func(id peer.ID) { 177 defer wg.Done() 178 _, found, err := cache.GetWithInit(id) 179 require.NoError(t, err) 180 require.True(t, found) 181 }(peerID) 182 } 183 184 unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") 185 186 // ensure that the records are correctly updated in the cache 187 for _, nodeID := range peerIDs { 188 gauge, found, err := cache.GetWithInit(nodeID) 189 require.NoError(t, err) 190 require.True(t, found) 191 // slight decay will result in 0.9 < gauge < 1 192 require.LessOrEqual(t, gauge, 1.0) 193 require.Greater(t, gauge, 0.9) 194 } 195 } 196 197 // TestDuplicateMessageTrackerCache_UpdateDecay ensures that a counter value in the record cache is eventually decayed back to 0 after some time. 198 func TestDuplicateMessageTrackerCache_Decay(t *testing.T) { 199 cache := duplicateMessageTrackerCacheFixture(t, 100, 0.09, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) 200 201 peerID := unittest.PeerIdFixture(t) 202 203 // initialize spam records for peerID and nodeID2 204 gauge, err := cache.DuplicateMessageReceived(peerID) 205 require.Equal(t, float64(1), gauge) 206 require.NoError(t, err) 207 gauge, ok, err := cache.GetWithInit(peerID) 208 require.True(t, ok) 209 require.NoError(t, err) 210 // gauge should have been delayed slightly 211 require.True(t, gauge < float64(1)) 212 213 time.Sleep(time.Second) 214 215 gauge, ok, err = cache.GetWithInit(peerID) 216 require.True(t, ok) 217 require.NoError(t, err) 218 // gauge should have been delayed slightly, but closer to 0 219 require.Less(t, gauge, 0.1) 220 } 221 222 // rpcSentCacheFixture returns a new *DuplicateMessageTrackerCache. 223 func duplicateMessageTrackerCacheFixture(t *testing.T, sizeLimit uint32, decay, skipDecayThreshold float64, logger zerolog.Logger, collector module.HeroCacheMetrics) *DuplicateMessageTrackerCache { 224 r := NewDuplicateMessageTrackerCache(sizeLimit, decay, skipDecayThreshold, logger, collector) 225 // expect cache to be empty 226 require.Equalf(t, uint(0), r.c.Size(), "cache size must be 0") 227 require.NotNil(t, r) 228 return r 229 }