github.com/ethereum-optimism/optimism@v1.7.2/op-node/rollup/driver/state.go (about)

     1  package driver
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	gosync "sync"
    11  	"time"
    12  
    13  	"github.com/ethereum/go-ethereum/common"
    14  	"github.com/ethereum/go-ethereum/log"
    15  
    16  	"github.com/ethereum-optimism/optimism/op-node/rollup"
    17  	"github.com/ethereum-optimism/optimism/op-node/rollup/async"
    18  	"github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
    19  	"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
    20  	"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
    21  	"github.com/ethereum-optimism/optimism/op-service/eth"
    22  	"github.com/ethereum-optimism/optimism/op-service/retry"
    23  )
    24  
    25  var (
    26  	ErrSequencerAlreadyStarted = errors.New("sequencer already running")
    27  	ErrSequencerAlreadyStopped = errors.New("sequencer not running")
    28  )
    29  
    30  // Deprecated: use eth.SyncStatus instead.
    31  type SyncStatus = eth.SyncStatus
    32  
    33  // sealingDuration defines the expected time it takes to seal the block
    34  const sealingDuration = time.Millisecond * 50
    35  
    36  type Driver struct {
    37  	l1State L1StateIface
    38  
    39  	// The derivation pipeline is reset whenever we reorg.
    40  	// The derivation pipeline determines the new l2Safe.
    41  	derivation DerivationPipeline
    42  
    43  	// The engine controller is used by the sequencer & derivation components.
    44  	// We will also use it for EL sync in a future PR.
    45  	engineController *derive.EngineController
    46  
    47  	// Requests to block the event loop for synchronous execution to avoid reading an inconsistent state
    48  	stateReq chan chan struct{}
    49  
    50  	// Upon receiving a channel in this channel, the derivation pipeline is forced to be reset.
    51  	// It tells the caller that the reset occurred by closing the passed in channel.
    52  	forceReset chan chan struct{}
    53  
    54  	// Upon receiving a hash in this channel, the sequencer is started at the given hash.
    55  	// It tells the caller that the sequencer started by closing the passed in channel (or returning an error).
    56  	startSequencer chan hashAndErrorChannel
    57  
    58  	// Upon receiving a channel in this channel, the sequencer is stopped.
    59  	// It tells the caller that the sequencer stopped by returning the latest sequenced L2 block hash.
    60  	stopSequencer chan chan hashAndError
    61  
    62  	// Upon receiving a channel in this channel, the current sequencer status is queried.
    63  	// It tells the caller the status by outputting a boolean to the provided channel:
    64  	// true when the sequencer is active, false when it is not.
    65  	sequencerActive chan chan bool
    66  
    67  	// sequencerNotifs is notified when the sequencer is started or stopped
    68  	sequencerNotifs SequencerStateListener
    69  
    70  	// Rollup config: rollup chain configuration
    71  	config *rollup.Config
    72  
    73  	sequencerConductor conductor.SequencerConductor
    74  
    75  	// Driver config: verifier and sequencer settings
    76  	driverConfig *Config
    77  
    78  	// Sync Mod Config
    79  	syncCfg *sync.Config
    80  
    81  	// L1 Signals:
    82  	//
    83  	// Not all L1 blocks, or all changes, have to be signalled:
    84  	// the derivation process traverses the chain and handles reorgs as necessary,
    85  	// the driver just needs to be aware of the *latest* signals enough so to not
    86  	// lag behind actionable data.
    87  	l1HeadSig      chan eth.L1BlockRef
    88  	l1SafeSig      chan eth.L1BlockRef
    89  	l1FinalizedSig chan eth.L1BlockRef
    90  
    91  	// Interface to signal the L2 block range to sync.
    92  	altSync AltSync
    93  
    94  	// async gossiper for payloads to be gossiped without
    95  	// blocking the event loop or waiting for insertion
    96  	asyncGossiper async.AsyncGossiper
    97  
    98  	// L2 Signals:
    99  
   100  	unsafeL2Payloads chan *eth.ExecutionPayloadEnvelope
   101  
   102  	l1        L1Chain
   103  	l2        L2Chain
   104  	sequencer SequencerIface
   105  	network   Network // may be nil, network for is optional
   106  
   107  	metrics     Metrics
   108  	log         log.Logger
   109  	snapshotLog log.Logger
   110  
   111  	wg gosync.WaitGroup
   112  
   113  	driverCtx    context.Context
   114  	driverCancel context.CancelFunc
   115  }
   116  
   117  // Start starts up the state loop.
   118  // The loop will have been started iff err is not nil.
   119  func (s *Driver) Start() error {
   120  	s.derivation.Reset()
   121  
   122  	log.Info("Starting driver", "sequencerEnabled", s.driverConfig.SequencerEnabled, "sequencerStopped", s.driverConfig.SequencerStopped)
   123  	if s.driverConfig.SequencerEnabled {
   124  		// Notify the initial sequencer state
   125  		// This ensures persistence can write the state correctly and that the state file exists
   126  		var err error
   127  		if s.driverConfig.SequencerStopped {
   128  			err = s.sequencerNotifs.SequencerStopped()
   129  		} else {
   130  			err = s.sequencerNotifs.SequencerStarted()
   131  		}
   132  		if err != nil {
   133  			return fmt.Errorf("persist initial sequencer state: %w", err)
   134  		}
   135  	}
   136  
   137  	s.asyncGossiper.Start()
   138  
   139  	s.wg.Add(1)
   140  	go s.eventLoop()
   141  
   142  	return nil
   143  }
   144  
   145  func (s *Driver) Close() error {
   146  	s.driverCancel()
   147  	s.wg.Wait()
   148  	s.asyncGossiper.Stop()
   149  	s.sequencerConductor.Close()
   150  	return nil
   151  }
   152  
   153  // OnL1Head signals the driver that the L1 chain changed the "unsafe" block,
   154  // also known as head of the chain, or "latest".
   155  func (s *Driver) OnL1Head(ctx context.Context, unsafe eth.L1BlockRef) error {
   156  	select {
   157  	case <-ctx.Done():
   158  		return ctx.Err()
   159  	case s.l1HeadSig <- unsafe:
   160  		return nil
   161  	}
   162  }
   163  
   164  // OnL1Safe signals the driver that the L1 chain changed the "safe",
   165  // also known as the justified checkpoint (as seen on L1 beacon-chain).
   166  func (s *Driver) OnL1Safe(ctx context.Context, safe eth.L1BlockRef) error {
   167  	select {
   168  	case <-ctx.Done():
   169  		return ctx.Err()
   170  	case s.l1SafeSig <- safe:
   171  		return nil
   172  	}
   173  }
   174  
   175  func (s *Driver) OnL1Finalized(ctx context.Context, finalized eth.L1BlockRef) error {
   176  	select {
   177  	case <-ctx.Done():
   178  		return ctx.Err()
   179  	case s.l1FinalizedSig <- finalized:
   180  		return nil
   181  	}
   182  }
   183  
   184  func (s *Driver) OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error {
   185  	select {
   186  	case <-ctx.Done():
   187  		return ctx.Err()
   188  	case s.unsafeL2Payloads <- envelope:
   189  		return nil
   190  	}
   191  }
   192  
   193  func (s *Driver) logSyncProgress(reason string) {
   194  	s.log.Info("Sync progress",
   195  		"reason", reason,
   196  		"l2_finalized", s.engineController.Finalized(),
   197  		"l2_safe", s.engineController.SafeL2Head(),
   198  		"l2_pending_safe", s.engineController.PendingSafeL2Head(),
   199  		"l2_unsafe", s.engineController.UnsafeL2Head(),
   200  		"l2_time", s.engineController.UnsafeL2Head().Time,
   201  		"l1_derived", s.derivation.Origin(),
   202  	)
   203  }
   204  
   205  // the eventLoop responds to L1 changes and internal timers to produce L2 blocks.
   206  func (s *Driver) eventLoop() {
   207  	defer s.wg.Done()
   208  	s.log.Info("State loop started")
   209  	defer s.log.Info("State loop returned")
   210  
   211  	defer s.driverCancel()
   212  
   213  	// stepReqCh is used to request that the driver attempts to step forward by one L1 block.
   214  	stepReqCh := make(chan struct{}, 1)
   215  
   216  	// channel, nil by default (not firing), but used to schedule re-attempts with delay
   217  	var delayedStepReq <-chan time.Time
   218  
   219  	// keep track of consecutive failed attempts, to adjust the backoff time accordingly
   220  	bOffStrategy := retry.Exponential()
   221  	stepAttempts := 0
   222  
   223  	// step requests a derivation step to be taken. Won't deadlock if the channel is full.
   224  	step := func() {
   225  		select {
   226  		case stepReqCh <- struct{}{}:
   227  		// Don't deadlock if the channel is already full
   228  		default:
   229  		}
   230  	}
   231  
   232  	// reqStep requests a derivation step nicely, with a delay if this is a reattempt, or not at all if we already scheduled a reattempt.
   233  	reqStep := func() {
   234  		if stepAttempts > 0 {
   235  			// if this is not the first attempt, we re-schedule with a backoff, *without blocking other events*
   236  			if delayedStepReq == nil {
   237  				delay := bOffStrategy.Duration(stepAttempts)
   238  				s.log.Debug("scheduling re-attempt with delay", "attempts", stepAttempts, "delay", delay)
   239  				delayedStepReq = time.After(delay)
   240  			} else {
   241  				s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", stepAttempts)
   242  			}
   243  		} else {
   244  			step()
   245  		}
   246  	}
   247  
   248  	// We call reqStep right away to finish syncing to the tip of the chain if we're behind.
   249  	// reqStep will also be triggered when the L1 head moves forward or if there was a reorg on the
   250  	// L1 chain that we need to handle.
   251  	reqStep()
   252  
   253  	sequencerTimer := time.NewTimer(0)
   254  	var sequencerCh <-chan time.Time
   255  	planSequencerAction := func() {
   256  		delay := s.sequencer.PlanNextSequencerAction()
   257  		sequencerCh = sequencerTimer.C
   258  		if len(sequencerCh) > 0 { // empty if not already drained before resetting
   259  			<-sequencerCh
   260  		}
   261  		sequencerTimer.Reset(delay)
   262  	}
   263  
   264  	// Create a ticker to check if there is a gap in the engine queue. Whenever
   265  	// there is, we send requests to sync source to retrieve the missing payloads.
   266  	syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2
   267  	altSyncTicker := time.NewTicker(syncCheckInterval)
   268  	defer altSyncTicker.Stop()
   269  	lastUnsafeL2 := s.engineController.UnsafeL2Head()
   270  
   271  	for {
   272  		if s.driverCtx.Err() != nil { // don't try to schedule/handle more work when we are closing.
   273  			return
   274  		}
   275  
   276  		// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
   277  		// This may adjust at any time based on fork-choice changes or previous errors.
   278  		// And avoid sequencing if the derivation pipeline indicates the engine is not ready.
   279  		if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped &&
   280  			s.l1State.L1Head() != (eth.L1BlockRef{}) && s.derivation.EngineReady() {
   281  			if s.driverConfig.SequencerMaxSafeLag > 0 && s.engineController.SafeL2Head().Number+s.driverConfig.SequencerMaxSafeLag <= s.engineController.UnsafeL2Head().Number {
   282  				// If the safe head has fallen behind by a significant number of blocks, delay creating new blocks
   283  				// until the safe lag is below SequencerMaxSafeLag.
   284  				if sequencerCh != nil {
   285  					s.log.Warn(
   286  						"Delay creating new block since safe lag exceeds limit",
   287  						"safe_l2", s.engineController.SafeL2Head(),
   288  						"unsafe_l2", s.engineController.UnsafeL2Head(),
   289  					)
   290  					sequencerCh = nil
   291  				}
   292  			} else if s.sequencer.BuildingOnto().ID() != s.engineController.UnsafeL2Head().ID() {
   293  				// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
   294  				// This may adjust at any time based on fork-choice changes or previous errors.
   295  				//
   296  				// update sequencer time if the head changed
   297  				planSequencerAction()
   298  			}
   299  		} else {
   300  			sequencerCh = nil
   301  		}
   302  
   303  		// If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync:
   304  		// there is no need to request L2 blocks when we are syncing already.
   305  		if head := s.engineController.UnsafeL2Head(); head != lastUnsafeL2 || !s.derivation.EngineReady() {
   306  			lastUnsafeL2 = head
   307  			altSyncTicker.Reset(syncCheckInterval)
   308  		}
   309  
   310  		select {
   311  		case <-sequencerCh:
   312  			// the payload publishing is handled by the async gossiper, which will begin gossiping as soon as available
   313  			// so, we don't need to receive the payload here
   314  			_, err := s.sequencer.RunNextSequencerAction(s.driverCtx, s.asyncGossiper, s.sequencerConductor)
   315  			if errors.Is(err, derive.ErrReset) {
   316  				s.derivation.Reset()
   317  			} else if err != nil {
   318  				s.log.Error("Sequencer critical error", "err", err)
   319  				return
   320  			}
   321  			planSequencerAction() // schedule the next sequencer action to keep the sequencing looping
   322  		case <-altSyncTicker.C:
   323  			// Check if there is a gap in the current unsafe payload queue.
   324  			ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*2)
   325  			err := s.checkForGapInUnsafeQueue(ctx)
   326  			cancel()
   327  			if err != nil {
   328  				s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err)
   329  			}
   330  		case envelope := <-s.unsafeL2Payloads:
   331  			s.snapshot("New unsafe payload")
   332  			// If we are doing CL sync or done with engine syncing, fallback to the unsafe payload queue & CL P2P sync.
   333  			if s.syncCfg.SyncMode == sync.CLSync || !s.engineController.IsEngineSyncing() {
   334  				s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", envelope.ExecutionPayload.ID())
   335  				s.derivation.AddUnsafePayload(envelope)
   336  				s.metrics.RecordReceivedUnsafePayload(envelope)
   337  				reqStep()
   338  			} else if s.syncCfg.SyncMode == sync.ELSync {
   339  				ref, err := derive.PayloadToBlockRef(s.config, envelope.ExecutionPayload)
   340  				if err != nil {
   341  					s.log.Info("Failed to turn execution payload into a block ref", "id", envelope.ExecutionPayload.ID(), "err", err)
   342  					continue
   343  				}
   344  				if ref.Number <= s.engineController.UnsafeL2Head().Number {
   345  					continue
   346  				}
   347  				s.log.Info("Optimistically inserting unsafe L2 execution payload to drive EL sync", "id", envelope.ExecutionPayload.ID())
   348  				if err := s.engineController.InsertUnsafePayload(s.driverCtx, envelope, ref); err != nil {
   349  					s.log.Warn("Failed to insert unsafe payload for EL sync", "id", envelope.ExecutionPayload.ID(), "err", err)
   350  				}
   351  				s.logSyncProgress("unsafe payload from sequencer while in EL sync")
   352  			}
   353  		case newL1Head := <-s.l1HeadSig:
   354  			s.l1State.HandleNewL1HeadBlock(newL1Head)
   355  			reqStep() // a new L1 head may mean we have the data to not get an EOF again.
   356  		case newL1Safe := <-s.l1SafeSig:
   357  			s.l1State.HandleNewL1SafeBlock(newL1Safe)
   358  			// no step, justified L1 information does not do anything for L2 derivation or status
   359  		case newL1Finalized := <-s.l1FinalizedSig:
   360  			s.l1State.HandleNewL1FinalizedBlock(newL1Finalized)
   361  			s.derivation.Finalize(newL1Finalized)
   362  			reqStep() // we may be able to mark more L2 data as finalized now
   363  		case <-delayedStepReq:
   364  			delayedStepReq = nil
   365  			step()
   366  		case <-stepReqCh:
   367  			// Don't start the derivation pipeline until we are done with EL sync
   368  			if s.engineController.IsEngineSyncing() {
   369  				continue
   370  			}
   371  			s.metrics.SetDerivationIdle(false)
   372  			s.log.Debug("Derivation process step", "onto_origin", s.derivation.Origin(), "attempts", stepAttempts)
   373  			err := s.derivation.Step(s.driverCtx)
   374  			stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress.
   375  			if err == io.EOF {
   376  				s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin(), "err", err)
   377  				stepAttempts = 0
   378  				s.metrics.SetDerivationIdle(true)
   379  				continue
   380  			} else if err != nil && errors.Is(err, derive.EngineELSyncing) {
   381  				s.log.Debug("Derivation process went idle because the engine is syncing", "progress", s.derivation.Origin(), "unsafe_head", s.engineController.UnsafeL2Head(), "err", err)
   382  				stepAttempts = 0
   383  				s.metrics.SetDerivationIdle(true)
   384  				continue
   385  			} else if err != nil && errors.Is(err, derive.ErrReset) {
   386  				// If the pipeline corrupts, e.g. due to a reorg, simply reset it
   387  				s.log.Warn("Derivation pipeline is reset", "err", err)
   388  				s.derivation.Reset()
   389  				s.metrics.RecordPipelineReset()
   390  				continue
   391  			} else if err != nil && errors.Is(err, derive.ErrTemporary) {
   392  				s.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err)
   393  				reqStep()
   394  				continue
   395  			} else if err != nil && errors.Is(err, derive.ErrCritical) {
   396  				s.log.Error("Derivation process critical error", "err", err)
   397  				return
   398  			} else if err != nil && errors.Is(err, derive.NotEnoughData) {
   399  				stepAttempts = 0 // don't do a backoff for this error
   400  				reqStep()
   401  				continue
   402  			} else if err != nil {
   403  				s.log.Error("Derivation process error", "attempts", stepAttempts, "err", err)
   404  				reqStep()
   405  				continue
   406  			} else {
   407  				stepAttempts = 0
   408  				reqStep() // continue with the next step if we can
   409  			}
   410  		case respCh := <-s.stateReq:
   411  			respCh <- struct{}{}
   412  		case respCh := <-s.forceReset:
   413  			s.log.Warn("Derivation pipeline is manually reset")
   414  			s.derivation.Reset()
   415  			s.metrics.RecordPipelineReset()
   416  			close(respCh)
   417  		case resp := <-s.startSequencer:
   418  			unsafeHead := s.engineController.UnsafeL2Head().Hash
   419  			if !s.driverConfig.SequencerStopped {
   420  				resp.err <- ErrSequencerAlreadyStarted
   421  			} else if !bytes.Equal(unsafeHead[:], resp.hash[:]) {
   422  				resp.err <- fmt.Errorf("block hash does not match: head %s, received %s", unsafeHead.String(), resp.hash.String())
   423  			} else {
   424  				if err := s.sequencerNotifs.SequencerStarted(); err != nil {
   425  					resp.err <- fmt.Errorf("sequencer start notification: %w", err)
   426  					continue
   427  				}
   428  				s.log.Info("Sequencer has been started")
   429  				s.driverConfig.SequencerStopped = false
   430  				close(resp.err)
   431  				planSequencerAction() // resume sequencing
   432  			}
   433  		case respCh := <-s.stopSequencer:
   434  			if s.driverConfig.SequencerStopped {
   435  				respCh <- hashAndError{err: ErrSequencerAlreadyStopped}
   436  			} else {
   437  				if err := s.sequencerNotifs.SequencerStopped(); err != nil {
   438  					respCh <- hashAndError{err: fmt.Errorf("sequencer start notification: %w", err)}
   439  					continue
   440  				}
   441  				s.log.Warn("Sequencer has been stopped")
   442  				s.driverConfig.SequencerStopped = true
   443  				// Cancel any inflight block building. If we don't cancel this, we can resume sequencing an old block
   444  				// even if we've received new unsafe heads in the interim, causing us to introduce a re-org.
   445  				s.sequencer.CancelBuildingBlock(s.driverCtx)
   446  				respCh <- hashAndError{hash: s.engineController.UnsafeL2Head().Hash}
   447  			}
   448  		case respCh := <-s.sequencerActive:
   449  			respCh <- !s.driverConfig.SequencerStopped
   450  		case <-s.driverCtx.Done():
   451  			return
   452  		}
   453  	}
   454  }
   455  
   456  // ResetDerivationPipeline forces a reset of the derivation pipeline.
   457  // It waits for the reset to occur. It simply unblocks the caller rather
   458  // than fully cancelling the reset request upon a context cancellation.
   459  func (s *Driver) ResetDerivationPipeline(ctx context.Context) error {
   460  	respCh := make(chan struct{}, 1)
   461  	select {
   462  	case <-ctx.Done():
   463  		return ctx.Err()
   464  	case s.forceReset <- respCh:
   465  		select {
   466  		case <-ctx.Done():
   467  			return ctx.Err()
   468  		case <-respCh:
   469  			return nil
   470  		}
   471  	}
   472  }
   473  
   474  func (s *Driver) StartSequencer(ctx context.Context, blockHash common.Hash) error {
   475  	if !s.driverConfig.SequencerEnabled {
   476  		return errors.New("sequencer is not enabled")
   477  	}
   478  	if isLeader, err := s.sequencerConductor.Leader(ctx); err != nil {
   479  		return fmt.Errorf("sequencer leader check failed: %w", err)
   480  	} else if !isLeader {
   481  		return errors.New("sequencer is not the leader, aborting.")
   482  	}
   483  	h := hashAndErrorChannel{
   484  		hash: blockHash,
   485  		err:  make(chan error, 1),
   486  	}
   487  	select {
   488  	case <-ctx.Done():
   489  		return ctx.Err()
   490  	case s.startSequencer <- h:
   491  		select {
   492  		case <-ctx.Done():
   493  			return ctx.Err()
   494  		case e := <-h.err:
   495  			return e
   496  		}
   497  	}
   498  }
   499  
   500  func (s *Driver) StopSequencer(ctx context.Context) (common.Hash, error) {
   501  	if !s.driverConfig.SequencerEnabled {
   502  		return common.Hash{}, errors.New("sequencer is not enabled")
   503  	}
   504  	respCh := make(chan hashAndError, 1)
   505  	select {
   506  	case <-ctx.Done():
   507  		return common.Hash{}, ctx.Err()
   508  	case s.stopSequencer <- respCh:
   509  		select {
   510  		case <-ctx.Done():
   511  			return common.Hash{}, ctx.Err()
   512  		case he := <-respCh:
   513  			return he.hash, he.err
   514  		}
   515  	}
   516  }
   517  
   518  func (s *Driver) SequencerActive(ctx context.Context) (bool, error) {
   519  	if !s.driverConfig.SequencerEnabled {
   520  		return false, nil
   521  	}
   522  	respCh := make(chan bool, 1)
   523  	select {
   524  	case <-ctx.Done():
   525  		return false, ctx.Err()
   526  	case s.sequencerActive <- respCh:
   527  		select {
   528  		case <-ctx.Done():
   529  			return false, ctx.Err()
   530  		case active := <-respCh:
   531  			return active, nil
   532  		}
   533  	}
   534  }
   535  
   536  // syncStatus returns the current sync status, and should only be called synchronously with
   537  // the driver event loop to avoid retrieval of an inconsistent status.
   538  func (s *Driver) syncStatus() *eth.SyncStatus {
   539  	return &eth.SyncStatus{
   540  		CurrentL1:          s.derivation.Origin(),
   541  		CurrentL1Finalized: s.derivation.FinalizedL1(),
   542  		HeadL1:             s.l1State.L1Head(),
   543  		SafeL1:             s.l1State.L1Safe(),
   544  		FinalizedL1:        s.l1State.L1Finalized(),
   545  		UnsafeL2:           s.engineController.UnsafeL2Head(),
   546  		SafeL2:             s.engineController.SafeL2Head(),
   547  		FinalizedL2:        s.engineController.Finalized(),
   548  		PendingSafeL2:      s.engineController.PendingSafeL2Head(),
   549  	}
   550  }
   551  
   552  // SyncStatus blocks the driver event loop and captures the syncing status.
   553  // If the event loop is too busy and the context expires, a context error is returned.
   554  func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) {
   555  	wait := make(chan struct{})
   556  	select {
   557  	case s.stateReq <- wait:
   558  		resp := s.syncStatus()
   559  		<-wait
   560  		return resp, nil
   561  	case <-ctx.Done():
   562  		return nil, ctx.Err()
   563  	}
   564  }
   565  
   566  // BlockRefWithStatus blocks the driver event loop and captures the syncing status,
   567  // along with an L2 block reference by number consistent with that same status.
   568  // If the event loop is too busy and the context expires, a context error is returned.
   569  func (s *Driver) BlockRefWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, *eth.SyncStatus, error) {
   570  	wait := make(chan struct{})
   571  	select {
   572  	case s.stateReq <- wait:
   573  		resp := s.syncStatus()
   574  		ref, err := s.l2.L2BlockRefByNumber(ctx, num)
   575  		<-wait
   576  		return ref, resp, err
   577  	case <-ctx.Done():
   578  		return eth.L2BlockRef{}, nil, ctx.Err()
   579  	}
   580  }
   581  
   582  // deferJSONString helps avoid a JSON-encoding performance hit if the snapshot logger does not run
   583  type deferJSONString struct {
   584  	x any
   585  }
   586  
   587  func (v deferJSONString) String() string {
   588  	out, _ := json.Marshal(v.x)
   589  	return string(out)
   590  }
   591  
   592  func (s *Driver) snapshot(event string) {
   593  	s.snapshotLog.Info("Rollup State Snapshot",
   594  		"event", event,
   595  		"l1Head", deferJSONString{s.l1State.L1Head()},
   596  		"l1Current", deferJSONString{s.derivation.Origin()},
   597  		"l2Head", deferJSONString{s.engineController.UnsafeL2Head()},
   598  		"l2Safe", deferJSONString{s.engineController.SafeL2Head()},
   599  		"l2FinalizedHead", deferJSONString{s.engineController.Finalized()})
   600  }
   601  
   602  type hashAndError struct {
   603  	hash common.Hash
   604  	err  error
   605  }
   606  
   607  type hashAndErrorChannel struct {
   608  	hash common.Hash
   609  	err  chan error
   610  }
   611  
   612  // checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method.
   613  // WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved.
   614  // Results are received through OnUnsafeL2Payload.
   615  func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error {
   616  	start := s.engineController.UnsafeL2Head()
   617  	end := s.derivation.LowestQueuedUnsafeBlock()
   618  	// Check if we have missing blocks between the start and end. Request them if we do.
   619  	if end == (eth.L2BlockRef{}) {
   620  		s.log.Debug("requesting sync with open-end range", "start", start)
   621  		return s.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{})
   622  	} else if end.Number > start.Number+1 {
   623  		s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number)
   624  		return s.altSync.RequestL2Range(ctx, start, end)
   625  	}
   626  	return nil
   627  }