github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/engine/consensus/compliance/engine.go (about)

     1  package compliance
     2  
     3  import (
     4  	"fmt"
     5  
     6  	"github.com/rs/zerolog"
     7  
     8  	"github.com/onflow/flow-go/consensus/hotstuff"
     9  	"github.com/onflow/flow-go/consensus/hotstuff/model"
    10  	"github.com/onflow/flow-go/engine"
    11  	"github.com/onflow/flow-go/engine/common/fifoqueue"
    12  	"github.com/onflow/flow-go/engine/consensus"
    13  	"github.com/onflow/flow-go/model/flow"
    14  	"github.com/onflow/flow-go/model/messages"
    15  	"github.com/onflow/flow-go/module"
    16  	"github.com/onflow/flow-go/module/component"
    17  	"github.com/onflow/flow-go/module/events"
    18  	"github.com/onflow/flow-go/module/irrecoverable"
    19  	"github.com/onflow/flow-go/module/metrics"
    20  	"github.com/onflow/flow-go/state/protocol"
    21  	"github.com/onflow/flow-go/storage"
    22  )
    23  
    24  // defaultBlockQueueCapacity maximum capacity of inbound queue for `messages.BlockProposal`s
    25  const defaultBlockQueueCapacity = 10_000
    26  
    27  // Engine is a wrapper around `compliance.Core`. The Engine queues inbound messages, relevant
    28  // node-internal notifications, and manages the worker routines processing the inbound events,
    29  // and forwards outbound messages to the networking layer.
    30  // `compliance.Core` implements the actual compliance logic.
    31  // Implements consensus.Compliance interface.
    32  type Engine struct {
    33  	component.Component
    34  	hotstuff.FinalizationConsumer
    35  
    36  	log                   zerolog.Logger
    37  	mempoolMetrics        module.MempoolMetrics
    38  	engineMetrics         module.EngineMetrics
    39  	me                    module.Local
    40  	headers               storage.Headers
    41  	payloads              storage.Payloads
    42  	tracer                module.Tracer
    43  	state                 protocol.State
    44  	core                  *Core
    45  	pendingBlocks         *fifoqueue.FifoQueue // queue for processing inbound blocks
    46  	pendingBlocksNotifier engine.Notifier
    47  }
    48  
    49  var _ consensus.Compliance = (*Engine)(nil)
    50  
    51  func NewEngine(
    52  	log zerolog.Logger,
    53  	me module.Local,
    54  	core *Core,
    55  ) (*Engine, error) {
    56  
    57  	// Inbound FIFO queue for `messages.BlockProposal`s
    58  	blocksQueue, err := fifoqueue.NewFifoQueue(
    59  		defaultBlockQueueCapacity,
    60  		fifoqueue.WithLengthObserver(func(len int) { core.mempoolMetrics.MempoolEntries(metrics.ResourceBlockProposalQueue, uint(len)) }),
    61  	)
    62  	if err != nil {
    63  		return nil, fmt.Errorf("failed to create queue for inbound block proposals: %w", err)
    64  	}
    65  
    66  	eng := &Engine{
    67  		log:                   log.With().Str("compliance", "engine").Logger(),
    68  		me:                    me,
    69  		mempoolMetrics:        core.mempoolMetrics,
    70  		engineMetrics:         core.engineMetrics,
    71  		headers:               core.headers,
    72  		payloads:              core.payloads,
    73  		pendingBlocks:         blocksQueue,
    74  		state:                 core.state,
    75  		tracer:                core.tracer,
    76  		core:                  core,
    77  		pendingBlocksNotifier: engine.NewNotifier(),
    78  	}
    79  	finalizationActor, finalizationWorker := events.NewFinalizationActor(eng.processOnFinalizedBlock)
    80  	eng.FinalizationConsumer = finalizationActor
    81  	// create the component manager and worker threads
    82  	eng.Component = component.NewComponentManagerBuilder().
    83  		AddWorker(eng.processBlocksLoop).
    84  		AddWorker(finalizationWorker).
    85  		Build()
    86  
    87  	return eng, nil
    88  }
    89  
    90  // processBlocksLoop processes available block, vote, and timeout messages as they are queued.
    91  func (e *Engine) processBlocksLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) {
    92  	ready()
    93  
    94  	doneSignal := ctx.Done()
    95  	newMessageSignal := e.pendingBlocksNotifier.Channel()
    96  	for {
    97  		select {
    98  		case <-doneSignal:
    99  			return
   100  		case <-newMessageSignal:
   101  			err := e.processQueuedBlocks(doneSignal) // no errors expected during normal operations
   102  			if err != nil {
   103  				ctx.Throw(err)
   104  			}
   105  		}
   106  	}
   107  }
   108  
   109  // processQueuedBlocks processes any available messages until the message queue is empty.
   110  // Only returns when all inbound queues are empty (or the engine is terminated).
   111  // No errors are expected during normal operation. All returned exceptions are potential
   112  // symptoms of internal state corruption and should be fatal.
   113  func (e *Engine) processQueuedBlocks(doneSignal <-chan struct{}) error {
   114  	for {
   115  		select {
   116  		case <-doneSignal:
   117  			return nil
   118  		default:
   119  		}
   120  
   121  		msg, ok := e.pendingBlocks.Pop()
   122  		if ok {
   123  			batch := msg.(flow.Slashable[[]*messages.BlockProposal])
   124  			for _, block := range batch.Message {
   125  				err := e.core.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{
   126  					OriginID: batch.OriginID,
   127  					Message:  block,
   128  				})
   129  				e.core.engineMetrics.MessageHandled(metrics.EngineCompliance, metrics.MessageBlockProposal)
   130  				if err != nil {
   131  					return fmt.Errorf("could not handle block proposal: %w", err)
   132  				}
   133  			}
   134  			continue
   135  		}
   136  
   137  		// when there are no more messages in the queue, back to the processBlocksLoop to wait
   138  		// for the next incoming message to arrive.
   139  		return nil
   140  	}
   141  }
   142  
   143  // OnBlockProposal feeds a new block proposal into the processing pipeline.
   144  // Incoming proposals are queued and eventually dispatched by worker.
   145  func (e *Engine) OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal]) {
   146  	e.core.engineMetrics.MessageReceived(metrics.EngineCompliance, metrics.MessageBlockProposal)
   147  	proposalAsList := flow.Slashable[[]*messages.BlockProposal]{
   148  		OriginID: proposal.OriginID,
   149  		Message:  []*messages.BlockProposal{proposal.Message},
   150  	}
   151  	if e.pendingBlocks.Push(proposalAsList) {
   152  		e.pendingBlocksNotifier.Notify()
   153  	} else {
   154  		e.core.engineMetrics.InboundMessageDropped(metrics.EngineCompliance, metrics.MessageBlockProposal)
   155  	}
   156  }
   157  
   158  // OnSyncedBlocks feeds a batch of blocks obtained via sync into the processing pipeline.
   159  // Blocks in batch aren't required to be in any particular order.
   160  // Incoming proposals are queued and eventually dispatched by worker.
   161  func (e *Engine) OnSyncedBlocks(blocks flow.Slashable[[]*messages.BlockProposal]) {
   162  	e.core.engineMetrics.MessageReceived(metrics.EngineCompliance, metrics.MessageSyncedBlocks)
   163  	if e.pendingBlocks.Push(blocks) {
   164  		e.pendingBlocksNotifier.Notify()
   165  	} else {
   166  		e.core.engineMetrics.InboundMessageDropped(metrics.EngineCompliance, metrics.MessageSyncedBlocks)
   167  	}
   168  }
   169  
   170  // processOnFinalizedBlock informs compliance.Core about finalization of the respective block.
   171  // The input to this callback is treated as trusted. This method should be executed on
   172  // `OnFinalizedBlock` notifications from the node-internal consensus instance.
   173  // No errors expected during normal operations.
   174  func (e *Engine) processOnFinalizedBlock(block *model.Block) error {
   175  	// retrieve the latest finalized header, so we know the height
   176  	finalHeader, err := e.headers.ByBlockID(block.BlockID)
   177  	if err != nil { // no expected errors
   178  		return fmt.Errorf("could not get finalized header: %w", err)
   179  	}
   180  	e.core.ProcessFinalizedBlock(finalHeader)
   181  	return nil
   182  }