github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/consensus/ingestion/engine_test.go (about) 1 package ingestion 2 3 import ( 4 "context" 5 "sync" 6 "testing" 7 "time" 8 9 "github.com/stretchr/testify/mock" 10 "github.com/stretchr/testify/require" 11 "github.com/stretchr/testify/suite" 12 "go.uber.org/atomic" 13 14 "github.com/onflow/flow-go/engine" 15 "github.com/onflow/flow-go/module/irrecoverable" 16 "github.com/onflow/flow-go/module/metrics" 17 mockmodule "github.com/onflow/flow-go/module/mock" 18 netint "github.com/onflow/flow-go/network" 19 "github.com/onflow/flow-go/network/channels" 20 "github.com/onflow/flow-go/network/mocknetwork" 21 "github.com/onflow/flow-go/utils/unittest" 22 ) 23 24 func TestIngestionEngine(t *testing.T) { 25 suite.Run(t, new(IngestionSuite)) 26 } 27 28 type IngestionSuite struct { 29 IngestionCoreSuite 30 31 con *mocknetwork.Conduit 32 net *mocknetwork.Network 33 cancel context.CancelFunc 34 35 ingest *Engine 36 } 37 38 func (s *IngestionSuite) SetupTest() { 39 s.IngestionCoreSuite.SetupTest() 40 41 s.con = &mocknetwork.Conduit{} 42 43 // set up network module mock 44 s.net = &mocknetwork.Network{} 45 s.net.On("Register", channels.ReceiveGuarantees, mock.Anything).Return( 46 func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { 47 return s.con 48 }, 49 nil, 50 ) 51 52 // setup my own identity 53 me := &mockmodule.Local{} 54 me.On("NodeID").Return(s.conID) // we use the first consensus node as our local identity 55 56 ctx, cancel := context.WithCancel(context.Background()) 57 s.cancel = cancel 58 signalerCtx, _ := irrecoverable.WithSignaler(ctx) 59 60 metrics := metrics.NewNoopCollector() 61 ingest, err := New(unittest.Logger(), metrics, s.net, me, s.core) 62 require.NoError(s.T(), err) 63 s.ingest = ingest 64 s.ingest.Start(signalerCtx) 65 <-s.ingest.Ready() 66 } 67 68 func (s *IngestionSuite) TearDownTest() { 69 s.cancel() 70 <-s.ingest.Done() 71 } 72 73 // TestSubmittingMultipleEntries tests processing of multiple collection guarantees in concurrent way. 74 // In happy path we expect that all messages are dispatched to worker goroutines and executed by core. 75 func (s *IngestionSuite) TestSubmittingMultipleEntries() { 76 originID := s.collID 77 count := uint64(15) 78 79 processed := atomic.NewUint64(0) 80 81 var wg sync.WaitGroup 82 wg.Add(1) 83 go func() { 84 for i := 0; i < int(count); i++ { 85 guarantee := s.validGuarantee() 86 s.pool.On("Has", guarantee.ID()).Return(false) 87 s.pool.On("Add", guarantee).Run(func(args mock.Arguments) { 88 processed.Add(1) 89 }).Return(true) 90 91 // execute the vote submission 92 _ = s.ingest.Process(channels.ProvideCollections, originID, guarantee) 93 } 94 wg.Done() 95 }() 96 97 wg.Wait() 98 99 require.Eventually(s.T(), func() bool { 100 return processed.Load() == count 101 }, time.Millisecond*200, time.Millisecond*20) 102 103 s.pool.AssertExpectations(s.T()) 104 } 105 106 // TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type 107 // was submitted from network layer. 108 func (s *IngestionSuite) TestProcessUnsupportedMessageType() { 109 invalidEvent := uint64(42) 110 err := s.ingest.Process("ch", unittest.IdentifierFixture(), invalidEvent) 111 // shouldn't result in error since byzantine inputs are expected 112 require.NoError(s.T(), err) 113 // in case of local processing error cannot be consumed since all inputs are trusted 114 err = s.ingest.ProcessLocal(invalidEvent) 115 require.Error(s.T(), err) 116 require.True(s.T(), engine.IsIncompatibleInputTypeError(err)) 117 }