github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/common/follower/cache/cache_test.go (about) 1 package cache 2 3 import ( 4 "math/rand" 5 "sync" 6 "testing" 7 "time" 8 9 "github.com/stretchr/testify/require" 10 "github.com/stretchr/testify/suite" 11 "go.uber.org/atomic" 12 "golang.org/x/exp/slices" 13 14 "github.com/onflow/flow-go/consensus/hotstuff/mocks" 15 "github.com/onflow/flow-go/consensus/hotstuff/model" 16 "github.com/onflow/flow-go/model/flow" 17 "github.com/onflow/flow-go/module/metrics" 18 "github.com/onflow/flow-go/utils/unittest" 19 ) 20 21 func TestCache(t *testing.T) { 22 suite.Run(t, new(CacheSuite)) 23 } 24 25 const defaultHeroCacheLimit = 1000 26 27 // CacheSuite holds minimal state for testing Cache in different test scenarios. 28 type CacheSuite struct { 29 suite.Suite 30 31 consumer *mocks.ProposalViolationConsumer 32 cache *Cache 33 } 34 35 func (s *CacheSuite) SetupTest() { 36 collector := metrics.NewNoopCollector() 37 s.consumer = mocks.NewProposalViolationConsumer(s.T()) 38 s.cache = NewCache(unittest.Logger(), defaultHeroCacheLimit, collector, s.consumer) 39 } 40 41 // TestPeek tests if previously added blocks can be queried by block ID. 42 func (s *CacheSuite) TestPeek() { 43 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 44 _, _, err := s.cache.AddBlocks(blocks) 45 require.NoError(s.T(), err) 46 for _, block := range blocks { 47 actual := s.cache.Peek(block.ID()) 48 require.NotNil(s.T(), actual) 49 require.Equal(s.T(), actual.ID(), block.ID()) 50 } 51 } 52 53 // TestBlocksEquivocation tests that cache tracks blocks equivocation when adding blocks that have the same view 54 // but different block ID. Equivocation is a symptom of byzantine actions and needs to be detected and addressed. 55 func (s *CacheSuite) TestBlocksEquivocation() { 56 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 57 _, _, err := s.cache.AddBlocks(blocks) 58 require.NoError(s.T(), err) 59 // adding same blocks again shouldn't result in any equivocation events 60 _, _, err = s.cache.AddBlocks(blocks) 61 require.NoError(s.T(), err) 62 63 equivocatedBlocks, _, _ := unittest.ChainFixture(len(blocks) - 1) 64 // we will skip genesis block as it will be the same 65 for i := 1; i < len(equivocatedBlocks); i++ { 66 block := equivocatedBlocks[i] 67 // update view to be the same as already submitted batch to trigger equivocation 68 block.Header.View = blocks[i].Header.View 69 // update parentID so blocks are still connected 70 block.Header.ParentID = equivocatedBlocks[i-1].ID() 71 s.consumer.On("OnDoubleProposeDetected", 72 model.BlockFromFlow(blocks[i].Header), model.BlockFromFlow(block.Header)).Return().Once() 73 } 74 _, _, err = s.cache.AddBlocks(equivocatedBlocks) 75 require.NoError(s.T(), err) 76 } 77 78 // TestBlocksAreNotConnected tests that passing a batch without sequential ordering of blocks and without gaps 79 // results in error. 80 func (s *CacheSuite) TestBlocksAreNotConnected() { 81 s.Run("blocks-not-sequential", func() { 82 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 83 84 // shuffling blocks will break the order between them rendering batch as not sequential 85 rand.Shuffle(len(blocks), func(i, j int) { 86 blocks[i], blocks[j] = blocks[j], blocks[i] 87 }) 88 89 _, _, err := s.cache.AddBlocks(blocks) 90 require.ErrorIs(s.T(), err, ErrDisconnectedBatch) 91 }) 92 s.Run("blocks-with-gaps", func() { 93 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 94 95 // altering payload hash will break ParentID in next block rendering batch as not sequential 96 blocks[len(blocks)/2].Header.PayloadHash = unittest.IdentifierFixture() 97 98 _, _, err := s.cache.AddBlocks(blocks) 99 require.ErrorIs(s.T(), err, ErrDisconnectedBatch) 100 }) 101 } 102 103 // TestAddBlocksChildCertifiesParent tests a scenario: A <- B[QC_A]. 104 // First we add A and then B, in two different batches. 105 // We expect that A will get certified after adding B. 106 func (s *CacheSuite) TestChildCertifiesParent() { 107 block := unittest.BlockFixture() 108 certifiedBatch, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&block}) 109 require.NoError(s.T(), err) 110 require.Empty(s.T(), certifiedBatch) 111 require.Nil(s.T(), certifyingQC) 112 child := unittest.BlockWithParentFixture(block.Header) 113 certifiedBatch, certifyingQC, err = s.cache.AddBlocks([]*flow.Block{child}) 114 require.NoError(s.T(), err) 115 require.Len(s.T(), certifiedBatch, 1) 116 require.NotNil(s.T(), certifyingQC) 117 require.Equal(s.T(), block.ID(), certifyingQC.BlockID) 118 require.Equal(s.T(), certifiedBatch[0], &block) 119 } 120 121 // TestChildBeforeParent tests a scenario: A <- B[QC_A]. 122 // First we add B and then A, in two different batches. 123 // We expect that A will get certified after adding A. 124 func (s *CacheSuite) TestChildBeforeParent() { 125 blocks := unittest.ChainFixtureFrom(2, unittest.BlockHeaderFixture()) 126 _, _, err := s.cache.AddBlocks([]*flow.Block{blocks[1]}) 127 require.NoError(s.T(), err) 128 certifiedBatch, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{blocks[0]}) 129 require.NoError(s.T(), err) 130 require.Len(s.T(), certifiedBatch, 1) 131 require.NotNil(s.T(), certifyingQC) 132 require.Equal(s.T(), blocks[0].ID(), certifyingQC.BlockID) 133 require.Equal(s.T(), certifiedBatch[0], blocks[0]) 134 } 135 136 // TestBlockInTheMiddle tests a scenario: A <- B[QC_A] <- C[QC_B]. 137 // We add blocks one by one: C, A, B, we expect that after adding B, we will be able to 138 // certify [A, B] with QC_B as certifying QC. 139 func (s *CacheSuite) TestBlockInTheMiddle() { 140 blocks := unittest.ChainFixtureFrom(3, unittest.BlockHeaderFixture()) 141 // add C 142 certifiedBlocks, certifiedQC, err := s.cache.AddBlocks(blocks[2:]) 143 require.NoError(s.T(), err) 144 require.Empty(s.T(), certifiedBlocks) 145 require.Nil(s.T(), certifiedQC) 146 147 // add A 148 certifiedBlocks, certifiedQC, err = s.cache.AddBlocks(blocks[:1]) 149 require.NoError(s.T(), err) 150 require.Empty(s.T(), certifiedBlocks) 151 require.Nil(s.T(), certifiedQC) 152 153 // add B 154 certifiedBlocks, certifiedQC, err = s.cache.AddBlocks(blocks[1:2]) 155 require.NoError(s.T(), err) 156 require.Equal(s.T(), blocks[:2], certifiedBlocks) 157 require.Equal(s.T(), blocks[2].Header.QuorumCertificate(), certifiedQC) 158 } 159 160 // TestAddBatch tests a scenario: B1 <- ... <- BN added in one batch. 161 // We expect that all blocks except the last one will be certified. 162 // Certifying QC will be taken from last block. 163 func (s *CacheSuite) TestAddBatch() { 164 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 165 certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) 166 require.NoError(s.T(), err) 167 require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) 168 require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) 169 } 170 171 // TestDuplicatedBatch checks that processing redundant inputs rejects batches where all blocks 172 // already reside in the cache. Batches that have at least one new block should be accepted. 173 func (s *CacheSuite) TestDuplicatedBatch() { 174 blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) 175 176 certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks[1:]) 177 require.NoError(s.T(), err) 178 require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) 179 require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) 180 181 // add same batch again, this has to be rejected as redundant input 182 certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks[1:]) 183 require.NoError(s.T(), err) 184 require.Empty(s.T(), certifiedBatch) 185 require.Nil(s.T(), certifyingQC) 186 187 // add batch with one extra leading block, this has to accepted even though 9 out of 10 blocks 188 // were already processed 189 certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks) 190 require.NoError(s.T(), err) 191 require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) 192 require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) 193 } 194 195 // TestPruneUpToView tests that blocks lower than pruned height will be properly filtered out from incoming batch. 196 func (s *CacheSuite) TestPruneUpToView() { 197 blocks := unittest.ChainFixtureFrom(3, unittest.BlockHeaderFixture()) 198 s.cache.PruneUpToView(blocks[1].Header.View) 199 certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) 200 require.NoError(s.T(), err) 201 require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) 202 require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) 203 } 204 205 // TestConcurrentAdd simulates multiple workers adding batches of blocks out of order. 206 // We use the following setup: 207 // Number of workers - workers 208 // Number of batches submitted by worker - batchesPerWorker 209 // Number of blocks in each batch submitted by worker - blocksPerBatch 210 // Each worker submits batchesPerWorker*blocksPerBatch blocks 211 // In total we will submit workers*batchesPerWorker*blocksPerBatch 212 // After submitting all blocks we expect that chain of blocks except last one will get certified. 213 func (s *CacheSuite) TestConcurrentAdd() { 214 workers := 5 215 batchesPerWorker := 10 216 blocksPerBatch := 10 217 blocksPerWorker := blocksPerBatch * batchesPerWorker 218 // ChainFixture generates N+1 blocks since it adds a root block 219 blocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, unittest.BlockHeaderFixture()) 220 221 var wg sync.WaitGroup 222 wg.Add(workers) 223 224 var certifiedBlocksLock sync.Mutex 225 var allCertifiedBlocks []*flow.Block 226 for i := 0; i < workers; i++ { 227 go func(blocks []*flow.Block) { 228 defer wg.Done() 229 for batch := 0; batch < batchesPerWorker; batch++ { 230 certifiedBlocks, _, err := s.cache.AddBlocks(blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch]) 231 require.NoError(s.T(), err) 232 certifiedBlocksLock.Lock() 233 allCertifiedBlocks = append(allCertifiedBlocks, certifiedBlocks...) 234 certifiedBlocksLock.Unlock() 235 } 236 }(blocks[i*blocksPerWorker : (i+1)*blocksPerWorker]) 237 } 238 239 unittest.RequireReturnsBefore(s.T(), wg.Wait, time.Millisecond*500, "should submit blocks before timeout") 240 241 require.Len(s.T(), allCertifiedBlocks, len(blocks)-1) 242 slices.SortFunc(allCertifiedBlocks, func(lhs *flow.Block, rhs *flow.Block) int { 243 return int(lhs.Header.Height) - int(rhs.Header.Height) 244 }) 245 require.Equal(s.T(), blocks[:len(blocks)-1], allCertifiedBlocks) 246 } 247 248 // TestSecondaryIndexCleanup tests if ejected entities are correctly cleaned up from secondary index 249 func (s *CacheSuite) TestSecondaryIndexCleanup() { 250 // create blocks more than limit 251 blocks := unittest.ChainFixtureFrom(2*defaultHeroCacheLimit, unittest.BlockHeaderFixture()) 252 _, _, err := s.cache.AddBlocks(blocks) 253 require.NoError(s.T(), err) 254 require.Len(s.T(), s.cache.byView, defaultHeroCacheLimit) 255 require.Len(s.T(), s.cache.byParent, defaultHeroCacheLimit) 256 } 257 258 // TestMultipleChildrenForSameParent tests a scenario where we have: 259 // / A <- B 260 // / <- C 261 // We insert: 262 // 1. [B] 263 // 2. [C] 264 // 3. [A] 265 // We should be able to certify A since B and C are in cache, any QC will work. 266 func (s *CacheSuite) TestMultipleChildrenForSameParent() { 267 A := unittest.BlockFixture() 268 B := unittest.BlockWithParentFixture(A.Header) 269 C := unittest.BlockWithParentFixture(A.Header) 270 C.Header.View = B.Header.View + 1 // make sure views are different 271 272 _, _, err := s.cache.AddBlocks([]*flow.Block{B}) 273 require.NoError(s.T(), err) 274 _, _, err = s.cache.AddBlocks([]*flow.Block{C}) 275 require.NoError(s.T(), err) 276 certifiedBlocks, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&A}) 277 require.NoError(s.T(), err) 278 require.Len(s.T(), certifiedBlocks, 1) 279 require.Equal(s.T(), &A, certifiedBlocks[0]) 280 require.Equal(s.T(), A.ID(), certifyingQC.BlockID) 281 } 282 283 // TestChildEjectedBeforeAddingParent tests a scenario where we have: 284 // / A <- B 285 // / <- C 286 // We insert: 287 // 1. [B] 288 // 2. [C] 289 // 3. [A] 290 // Between 2. and 3. B gets ejected, we should be able to certify A since C is still in cache. 291 func (s *CacheSuite) TestChildEjectedBeforeAddingParent() { 292 A := unittest.BlockFixture() 293 B := unittest.BlockWithParentFixture(A.Header) 294 C := unittest.BlockWithParentFixture(A.Header) 295 C.Header.View = B.Header.View + 1 // make sure views are different 296 297 _, _, err := s.cache.AddBlocks([]*flow.Block{B}) 298 require.NoError(s.T(), err) 299 _, _, err = s.cache.AddBlocks([]*flow.Block{C}) 300 require.NoError(s.T(), err) 301 // eject B 302 s.cache.backend.Remove(B.ID()) 303 s.cache.handleEjectedEntity(B) 304 305 certifiedBlocks, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&A}) 306 require.NoError(s.T(), err) 307 require.Len(s.T(), certifiedBlocks, 1) 308 require.Equal(s.T(), &A, certifiedBlocks[0]) 309 require.Equal(s.T(), A.ID(), certifyingQC.BlockID) 310 } 311 312 // TestAddOverCacheLimit tests a scenario where caller feeds blocks to the cache in concurrent way 313 // largely exceeding internal cache capacity leading to ejection of large number of blocks. 314 // Expect to eventually certify all possible blocks assuming producer continue to push same blocks over and over again. 315 // This test scenario emulates sync engine pushing blocks from other committee members. 316 func (s *CacheSuite) TestAddOverCacheLimit() { 317 // create blocks more than limit 318 workers := 10 319 blocksPerWorker := 10 320 s.cache = NewCache(unittest.Logger(), uint32(blocksPerWorker), metrics.NewNoopCollector(), s.consumer) 321 322 blocks := unittest.ChainFixtureFrom(blocksPerWorker*workers, unittest.BlockHeaderFixture()) 323 324 var uniqueBlocksLock sync.Mutex 325 // AddBlocks can certify same blocks, especially when we push same blocks over and over 326 // use a map to track those. Using a lock to provide concurrency safety. 327 uniqueBlocks := make(map[flow.Identifier]struct{}, 0) 328 329 // all workers will submit blocks unless condition is satisfied 330 // whenever len(uniqueBlocks) == certifiedGoal it means we have certified all available blocks. 331 done := atomic.NewBool(false) 332 certifiedGoal := len(blocks) - 1 333 334 var wg sync.WaitGroup 335 wg.Add(workers) 336 for i := 0; i < workers; i++ { 337 go func(blocks []*flow.Block) { 338 defer wg.Done() 339 for !done.Load() { 340 // worker submits blocks while condition is not satisfied 341 for _, block := range blocks { 342 // push blocks one by one, pairing with randomness of scheduler 343 // blocks will be delivered chaotically 344 certifiedBlocks, _, err := s.cache.AddBlocks([]*flow.Block{block}) 345 require.NoError(s.T(), err) 346 if len(certifiedBlocks) > 0 { 347 uniqueBlocksLock.Lock() 348 for _, block := range certifiedBlocks { 349 uniqueBlocks[block.ID()] = struct{}{} 350 } 351 if len(uniqueBlocks) == certifiedGoal { 352 done.Store(true) 353 } 354 uniqueBlocksLock.Unlock() 355 } 356 } 357 } 358 }(blocks[i*blocksPerWorker : (i+1)*blocksPerWorker]) 359 } 360 unittest.RequireReturnsBefore(s.T(), wg.Wait, time.Millisecond*500, "should submit blocks before timeout") 361 }