github.com/ethereum-optimism/optimism@v1.7.2/op-node/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"sync/atomic"
     9  	"time"
    10  
    11  	"github.com/ethereum-optimism/optimism/op-node/node/safedb"
    12  	"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
    13  	plasma "github.com/ethereum-optimism/optimism/op-plasma"
    14  	"github.com/ethereum-optimism/optimism/op-service/httputil"
    15  
    16  	"github.com/hashicorp/go-multierror"
    17  	"github.com/libp2p/go-libp2p/core/peer"
    18  
    19  	"github.com/ethereum/go-ethereum"
    20  	"github.com/ethereum/go-ethereum/event"
    21  	"github.com/ethereum/go-ethereum/log"
    22  
    23  	"github.com/ethereum-optimism/optimism/op-node/heartbeat"
    24  	"github.com/ethereum-optimism/optimism/op-node/metrics"
    25  	"github.com/ethereum-optimism/optimism/op-node/p2p"
    26  	"github.com/ethereum-optimism/optimism/op-node/rollup/conductor"
    27  	"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
    28  	"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
    29  	"github.com/ethereum-optimism/optimism/op-node/version"
    30  	"github.com/ethereum-optimism/optimism/op-service/client"
    31  	"github.com/ethereum-optimism/optimism/op-service/eth"
    32  	"github.com/ethereum-optimism/optimism/op-service/oppprof"
    33  	"github.com/ethereum-optimism/optimism/op-service/retry"
    34  	"github.com/ethereum-optimism/optimism/op-service/sources"
    35  )
    36  
    37  var ErrAlreadyClosed = errors.New("node is already closed")
    38  
    39  type closableSafeDB interface {
    40  	derive.SafeHeadListener
    41  	SafeDBReader
    42  	io.Closer
    43  }
    44  
    45  type OpNode struct {
    46  	log        log.Logger
    47  	appVersion string
    48  	metrics    *metrics.Metrics
    49  
    50  	l1HeadsSub     ethereum.Subscription // Subscription to get L1 heads (automatically re-subscribes on error)
    51  	l1SafeSub      ethereum.Subscription // Subscription to get L1 safe blocks, a.k.a. justified data (polling)
    52  	l1FinalizedSub ethereum.Subscription // Subscription to get L1 safe blocks, a.k.a. justified data (polling)
    53  
    54  	l1Source  *sources.L1Client     // L1 Client to fetch data from
    55  	l2Driver  *driver.Driver        // L2 Engine to Sync
    56  	l2Source  *sources.EngineClient // L2 Execution Engine RPC bindings
    57  	server    *rpcServer            // RPC server hosting the rollup-node API
    58  	p2pNode   *p2p.NodeP2P          // P2P node functionality
    59  	p2pSigner p2p.Signer            // p2p gogssip application messages will be signed with this signer
    60  	tracer    Tracer                // tracer to get events for testing/debugging
    61  	runCfg    *RuntimeConfig        // runtime configurables
    62  
    63  	safeDB closableSafeDB
    64  
    65  	rollupHalt string // when to halt the rollup, disabled if empty
    66  
    67  	pprofService *oppprof.Service
    68  	metricsSrv   *httputil.HTTPServer
    69  
    70  	beacon *sources.L1BeaconClient
    71  
    72  	// some resources cannot be stopped directly, like the p2p gossipsub router (not our design),
    73  	// and depend on this ctx to be closed.
    74  	resourcesCtx   context.Context
    75  	resourcesClose context.CancelFunc
    76  
    77  	// Indicates when it's safe to close data sources used by the runtimeConfig bg loader
    78  	runtimeConfigReloaderDone chan struct{}
    79  
    80  	closed atomic.Bool
    81  
    82  	// cancels execution prematurely, e.g. to halt. This may be nil.
    83  	cancel context.CancelCauseFunc
    84  	halted atomic.Bool
    85  }
    86  
    87  // The OpNode handles incoming gossip
    88  var _ p2p.GossipIn = (*OpNode)(nil)
    89  
    90  // New creates a new OpNode instance.
    91  // The provided ctx argument is for the span of initialization only;
    92  // the node will immediately Stop(ctx) before finishing initialization if the context is canceled during initialization.
    93  func New(ctx context.Context, cfg *Config, log log.Logger, snapshotLog log.Logger, appVersion string, m *metrics.Metrics) (*OpNode, error) {
    94  	if err := cfg.Check(); err != nil {
    95  		return nil, err
    96  	}
    97  
    98  	n := &OpNode{
    99  		log:        log,
   100  		appVersion: appVersion,
   101  		metrics:    m,
   102  		rollupHalt: cfg.RollupHalt,
   103  		cancel:     cfg.Cancel,
   104  	}
   105  	// not a context leak, gossipsub is closed with a context.
   106  	n.resourcesCtx, n.resourcesClose = context.WithCancel(context.Background())
   107  
   108  	err := n.init(ctx, cfg, snapshotLog)
   109  	if err != nil {
   110  		log.Error("Error initializing the rollup node", "err", err)
   111  		// ensure we always close the node resources if we fail to initialize the node.
   112  		if closeErr := n.Stop(ctx); closeErr != nil {
   113  			return nil, multierror.Append(err, closeErr)
   114  		}
   115  		return nil, err
   116  	}
   117  	return n, nil
   118  }
   119  
   120  func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
   121  	n.log.Info("Initializing rollup node", "version", n.appVersion)
   122  	if err := n.initTracer(ctx, cfg); err != nil {
   123  		return fmt.Errorf("failed to init the trace: %w", err)
   124  	}
   125  	if err := n.initL1(ctx, cfg); err != nil {
   126  		return fmt.Errorf("failed to init L1: %w", err)
   127  	}
   128  	if err := n.initL1BeaconAPI(ctx, cfg); err != nil {
   129  		return err
   130  	}
   131  	if err := n.initL2(ctx, cfg, snapshotLog); err != nil {
   132  		return fmt.Errorf("failed to init L2: %w", err)
   133  	}
   134  	if err := n.initRuntimeConfig(ctx, cfg); err != nil { // depends on L2, to signal initial runtime values to
   135  		return fmt.Errorf("failed to init the runtime config: %w", err)
   136  	}
   137  	if err := n.initP2PSigner(ctx, cfg); err != nil {
   138  		return fmt.Errorf("failed to init the P2P signer: %w", err)
   139  	}
   140  	if err := n.initP2P(ctx, cfg); err != nil {
   141  		return fmt.Errorf("failed to init the P2P stack: %w", err)
   142  	}
   143  	// Only expose the server at the end, ensuring all RPC backend components are initialized.
   144  	if err := n.initRPCServer(cfg); err != nil {
   145  		return fmt.Errorf("failed to init the RPC server: %w", err)
   146  	}
   147  	if err := n.initMetricsServer(cfg); err != nil {
   148  		return fmt.Errorf("failed to init the metrics server: %w", err)
   149  	}
   150  	n.metrics.RecordInfo(n.appVersion)
   151  	n.metrics.RecordUp()
   152  	n.initHeartbeat(cfg)
   153  	if err := n.initPProf(cfg); err != nil {
   154  		return fmt.Errorf("failed to init profiling: %w", err)
   155  	}
   156  	return nil
   157  }
   158  
   159  func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
   160  	if cfg.Tracer != nil {
   161  		n.tracer = cfg.Tracer
   162  	} else {
   163  		n.tracer = new(noOpTracer)
   164  	}
   165  	return nil
   166  }
   167  
   168  func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
   169  	l1Node, rpcCfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
   170  	if err != nil {
   171  		return fmt.Errorf("failed to get L1 RPC client: %w", err)
   172  	}
   173  
   174  	// Set the RethDB path in the EthClientConfig, if there is one configured.
   175  	rpcCfg.EthClientConfig.RethDBPath = cfg.RethDBPath
   176  
   177  	n.l1Source, err = sources.NewL1Client(
   178  		client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg)
   179  	if err != nil {
   180  		return fmt.Errorf("failed to create L1 source: %w", err)
   181  	}
   182  
   183  	if err := cfg.Rollup.ValidateL1Config(ctx, n.l1Source); err != nil {
   184  		return fmt.Errorf("failed to validate the L1 config: %w", err)
   185  	}
   186  
   187  	// Keep subscribed to the L1 heads, which keeps the L1 maintainer pointing to the best headers to sync
   188  	n.l1HeadsSub = event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) {
   189  		if err != nil {
   190  			n.log.Warn("resubscribing after failed L1 subscription", "err", err)
   191  		}
   192  		return eth.WatchHeadChanges(ctx, n.l1Source, n.OnNewL1Head)
   193  	})
   194  	go func() {
   195  		err, ok := <-n.l1HeadsSub.Err()
   196  		if !ok {
   197  			return
   198  		}
   199  		n.log.Error("l1 heads subscription error", "err", err)
   200  	}()
   201  
   202  	// Poll for the safe L1 block and finalized block,
   203  	// which only change once per epoch at most and may be delayed.
   204  	n.l1SafeSub = eth.PollBlockChanges(n.log, n.l1Source, n.OnNewL1Safe, eth.Safe,
   205  		cfg.L1EpochPollInterval, time.Second*10)
   206  	n.l1FinalizedSub = eth.PollBlockChanges(n.log, n.l1Source, n.OnNewL1Finalized, eth.Finalized,
   207  		cfg.L1EpochPollInterval, time.Second*10)
   208  	return nil
   209  }
   210  
   211  func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
   212  	// attempt to load runtime config, repeat N times
   213  	n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup)
   214  
   215  	confDepth := cfg.Driver.VerifierConfDepth
   216  	reload := func(ctx context.Context) (eth.L1BlockRef, error) {
   217  		fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10)
   218  		l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe)
   219  		fetchCancel()
   220  		if err != nil {
   221  			n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err)
   222  			return eth.L1BlockRef{}, err
   223  		}
   224  
   225  		// Apply confirmation-distance
   226  		blNum := l1Head.Number
   227  		if blNum >= confDepth {
   228  			blNum -= confDepth
   229  		}
   230  		fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
   231  		confirmed, err := n.l1Source.L1BlockRefByNumber(fetchCtx, blNum)
   232  		fetchCancel()
   233  		if err != nil {
   234  			n.log.Error("failed to fetch confirmed L1 block for runtime config loading", "err", err, "number", blNum)
   235  			return eth.L1BlockRef{}, err
   236  		}
   237  
   238  		fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
   239  		err = n.runCfg.Load(fetchCtx, confirmed)
   240  		fetchCancel()
   241  		if err != nil {
   242  			n.log.Error("failed to fetch runtime config data", "err", err)
   243  			return l1Head, err
   244  		}
   245  
   246  		err = n.handleProtocolVersionsUpdate(ctx)
   247  		return l1Head, err
   248  	}
   249  
   250  	// initialize the runtime config before unblocking
   251  	if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) {
   252  		ref, err := reload(ctx)
   253  		if errors.Is(err, errNodeHalt) { // don't retry on halt error
   254  			err = nil
   255  		}
   256  		return ref, err
   257  	}); err != nil {
   258  		return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err)
   259  	}
   260  
   261  	// start a background loop, to keep reloading it at the configured reload interval
   262  	reloader := func(ctx context.Context, reloadInterval time.Duration) {
   263  		if reloadInterval <= 0 {
   264  			n.log.Debug("not running runtime-config reloading background loop")
   265  			return
   266  		}
   267  		ticker := time.NewTicker(reloadInterval)
   268  		defer ticker.Stop()
   269  		for {
   270  			select {
   271  			case <-ticker.C:
   272  				// If the reload fails, we will try again the next interval.
   273  				// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
   274  				l1Head, err := reload(ctx)
   275  				if err != nil {
   276  					if errors.Is(err, errNodeHalt) {
   277  						n.halted.Store(true)
   278  						if n.cancel != nil { // node cancellation is always available when started as CLI app
   279  							n.cancel(errNodeHalt)
   280  							return
   281  						} else {
   282  							n.log.Debug("opted to halt, but cannot halt node", "l1_head", l1Head)
   283  						}
   284  					} else {
   285  						n.log.Warn("failed to reload runtime config", "err", err)
   286  					}
   287  				} else {
   288  					n.log.Debug("reloaded runtime config", "l1_head", l1Head)
   289  				}
   290  			case <-ctx.Done():
   291  				return
   292  			}
   293  		}
   294  	}
   295  
   296  	n.runtimeConfigReloaderDone = make(chan struct{})
   297  	// Manages the lifetime of reloader. In order to safely Close the OpNode
   298  	go func(ctx context.Context, reloadInterval time.Duration) {
   299  		reloader(ctx, reloadInterval)
   300  		close(n.runtimeConfigReloaderDone)
   301  	}(n.resourcesCtx, cfg.RuntimeConfigReloadInterval) // this keeps running after initialization
   302  	return nil
   303  }
   304  
   305  func (n *OpNode) initL1BeaconAPI(ctx context.Context, cfg *Config) error {
   306  	// If Ecotone upgrade is not scheduled yet, then there is no need for a Beacon API.
   307  	if cfg.Rollup.EcotoneTime == nil {
   308  		return nil
   309  	}
   310  	// Once the Ecotone upgrade is scheduled, we must have initialized the Beacon API settings.
   311  	if cfg.Beacon == nil {
   312  		return fmt.Errorf("missing L1 Beacon Endpoint configuration: this API is mandatory for Ecotone upgrade at t=%d", *cfg.Rollup.EcotoneTime)
   313  	}
   314  
   315  	// We always initialize a client. We will get an error on requests if the client does not work.
   316  	// This way the op-node can continue non-L1 functionality when the user chooses to ignore the Beacon API requirement.
   317  	beaconClient, fallbacks, err := cfg.Beacon.Setup(ctx, n.log)
   318  	if err != nil {
   319  		return fmt.Errorf("failed to setup L1 Beacon API client: %w", err)
   320  	}
   321  	beaconCfg := sources.L1BeaconClientConfig{
   322  		FetchAllSidecars: cfg.Beacon.ShouldFetchAllSidecars(),
   323  	}
   324  	n.beacon = sources.NewL1BeaconClient(beaconClient, beaconCfg, fallbacks...)
   325  
   326  	// Retry retrieval of the Beacon API version, to be more robust on startup against Beacon API connection issues.
   327  	beaconVersion, missingEndpoint, err := retry.Do2[string, bool](ctx, 5, retry.Exponential(), func() (string, bool, error) {
   328  		ctx, cancel := context.WithTimeout(ctx, time.Second*10)
   329  		defer cancel()
   330  		beaconVersion, err := n.beacon.GetVersion(ctx)
   331  		if err != nil {
   332  			if errors.Is(err, client.ErrNoEndpoint) {
   333  				return "", true, nil // don't return an error, we do not have to retry when there is a config issue.
   334  			}
   335  			return "", false, err
   336  		}
   337  		return beaconVersion, false, nil
   338  	})
   339  	if missingEndpoint {
   340  		// Allow the user to continue if they explicitly ignore the requirement of the endpoint.
   341  		if cfg.Beacon.ShouldIgnoreBeaconCheck() {
   342  			n.log.Warn("This endpoint is required for the Ecotone upgrade, but is missing, and configured to be ignored. " +
   343  				"The node may be unable to retrieve EIP-4844 blobs data.")
   344  			return nil
   345  		} else {
   346  			// If the client tells us the endpoint was not configured,
   347  			// then explain why we need it, and what the user can do to ignore this.
   348  			n.log.Error("The Ecotone upgrade requires a L1 Beacon API endpoint, to retrieve EIP-4844 blobs data. " +
   349  				"This can be ignored with the --l1.beacon.ignore option, " +
   350  				"but the node may be unable to sync from L1 without this endpoint.")
   351  			return errors.New("missing L1 Beacon API endpoint")
   352  		}
   353  	} else if err != nil {
   354  		if cfg.Beacon.ShouldIgnoreBeaconCheck() {
   355  			n.log.Warn("Failed to check L1 Beacon API version, but configuration ignores results. "+
   356  				"The node may be unable to retrieve EIP-4844 blobs data.", "err", err)
   357  			return nil
   358  		} else {
   359  			return fmt.Errorf("failed to check L1 Beacon API version: %w", err)
   360  		}
   361  	} else {
   362  		n.log.Info("Connected to L1 Beacon API, ready for EIP-4844 blobs retrieval.", "version", beaconVersion)
   363  		return nil
   364  	}
   365  }
   366  
   367  func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
   368  	rpcClient, rpcCfg, err := cfg.L2.Setup(ctx, n.log, &cfg.Rollup)
   369  	if err != nil {
   370  		return fmt.Errorf("failed to setup L2 execution-engine RPC client: %w", err)
   371  	}
   372  
   373  	n.l2Source, err = sources.NewEngineClient(
   374  		client.NewInstrumentedRPC(rpcClient, n.metrics), n.log, n.metrics.L2SourceCache, rpcCfg,
   375  	)
   376  	if err != nil {
   377  		return fmt.Errorf("failed to create Engine client: %w", err)
   378  	}
   379  
   380  	if err := cfg.Rollup.ValidateL2Config(ctx, n.l2Source, cfg.Sync.SyncMode == sync.ELSync); err != nil {
   381  		return err
   382  	}
   383  
   384  	var sequencerConductor conductor.SequencerConductor = &conductor.NoOpConductor{}
   385  	if cfg.ConductorEnabled {
   386  		sequencerConductor = NewConductorClient(cfg, n.log, n.metrics)
   387  	}
   388  
   389  	// if plasma is not explicitly activated in the node CLI, the config + any error will be ignored.
   390  	rpCfg, err := cfg.Rollup.PlasmaConfig()
   391  	if cfg.Plasma.Enabled && err != nil {
   392  		return fmt.Errorf("failed to get plasma config: %w", err)
   393  	}
   394  	plasmaDA := plasma.NewPlasmaDA(n.log, cfg.Plasma, rpCfg, n.metrics.PlasmaMetrics)
   395  	if cfg.SafeDBPath != "" {
   396  		n.log.Info("Safe head database enabled", "path", cfg.SafeDBPath)
   397  		safeDB, err := safedb.NewSafeDB(n.log, cfg.SafeDBPath)
   398  		if err != nil {
   399  			return fmt.Errorf("failed to create safe head database at %v: %w", cfg.SafeDBPath, err)
   400  		}
   401  		n.safeDB = safeDB
   402  	} else {
   403  		n.safeDB = safedb.Disabled
   404  	}
   405  	n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n.beacon, n, n, n.log, snapshotLog, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, plasmaDA)
   406  	return nil
   407  }
   408  
   409  func (n *OpNode) initRPCServer(cfg *Config) error {
   410  	server, err := newRPCServer(&cfg.RPC, &cfg.Rollup, n.l2Source.L2Client, n.l2Driver, n.safeDB, n.log, n.appVersion, n.metrics)
   411  	if err != nil {
   412  		return err
   413  	}
   414  	if n.p2pNode != nil {
   415  		server.EnableP2P(p2p.NewP2PAPIBackend(n.p2pNode, n.log, n.metrics))
   416  	}
   417  	if cfg.RPC.EnableAdmin {
   418  		server.EnableAdminAPI(NewAdminAPI(n.l2Driver, n.metrics, n.log))
   419  		n.log.Info("Admin RPC enabled")
   420  	}
   421  	n.log.Info("Starting JSON-RPC server")
   422  	if err := server.Start(); err != nil {
   423  		return fmt.Errorf("unable to start RPC server: %w", err)
   424  	}
   425  	n.server = server
   426  	return nil
   427  }
   428  
   429  func (n *OpNode) initMetricsServer(cfg *Config) error {
   430  	if !cfg.Metrics.Enabled {
   431  		n.log.Info("metrics disabled")
   432  		return nil
   433  	}
   434  	n.log.Debug("starting metrics server", "addr", cfg.Metrics.ListenAddr, "port", cfg.Metrics.ListenPort)
   435  	metricsSrv, err := n.metrics.StartServer(cfg.Metrics.ListenAddr, cfg.Metrics.ListenPort)
   436  	if err != nil {
   437  		return fmt.Errorf("failed to start metrics server: %w", err)
   438  	}
   439  	n.log.Info("started metrics server", "addr", metricsSrv.Addr())
   440  	n.metricsSrv = metricsSrv
   441  	return nil
   442  }
   443  
   444  func (n *OpNode) initHeartbeat(cfg *Config) {
   445  	if !cfg.Heartbeat.Enabled {
   446  		return
   447  	}
   448  	var peerID string
   449  	if cfg.P2P.Disabled() {
   450  		peerID = "disabled"
   451  	} else {
   452  		peerID = n.P2P().Host().ID().String()
   453  	}
   454  
   455  	payload := &heartbeat.Payload{
   456  		Version: version.Version,
   457  		Meta:    version.Meta,
   458  		Moniker: cfg.Heartbeat.Moniker,
   459  		PeerID:  peerID,
   460  		ChainID: cfg.Rollup.L2ChainID.Uint64(),
   461  	}
   462  
   463  	go func(url string) {
   464  		if err := heartbeat.Beat(n.resourcesCtx, n.log, url, payload); err != nil {
   465  			log.Error("heartbeat goroutine crashed", "err", err)
   466  		}
   467  	}(cfg.Heartbeat.URL)
   468  }
   469  
   470  func (n *OpNode) initPProf(cfg *Config) error {
   471  	n.pprofService = oppprof.New(
   472  		cfg.Pprof.ListenEnabled,
   473  		cfg.Pprof.ListenAddr,
   474  		cfg.Pprof.ListenPort,
   475  		cfg.Pprof.ProfileType,
   476  		cfg.Pprof.ProfileDir,
   477  		cfg.Pprof.ProfileFilename,
   478  	)
   479  
   480  	if err := n.pprofService.Start(); err != nil {
   481  		return fmt.Errorf("failed to start pprof service: %w", err)
   482  	}
   483  
   484  	return nil
   485  }
   486  
   487  func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error {
   488  	if cfg.P2P != nil {
   489  		// TODO(protocol-quest/97): Use EL Sync instead of CL Alt sync for fetching missing blocks in the payload queue.
   490  		p2pNode, err := p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, n, n.l2Source, n.runCfg, n.metrics, false)
   491  		if err != nil || p2pNode == nil {
   492  			return err
   493  		}
   494  		n.p2pNode = p2pNode
   495  		if n.p2pNode.Dv5Udp() != nil {
   496  			go n.p2pNode.DiscoveryProcess(n.resourcesCtx, n.log, &cfg.Rollup, cfg.P2P.TargetPeers())
   497  		}
   498  	}
   499  	return nil
   500  }
   501  
   502  func (n *OpNode) initP2PSigner(ctx context.Context, cfg *Config) error {
   503  	// the p2p signer setup is optional
   504  	if cfg.P2PSigner == nil {
   505  		return nil
   506  	}
   507  	// p2pSigner may still be nil, the signer setup may not create any signer, the signer is optional
   508  	var err error
   509  	n.p2pSigner, err = cfg.P2PSigner.SetupSigner(ctx)
   510  	return err
   511  }
   512  
   513  func (n *OpNode) Start(ctx context.Context) error {
   514  	n.log.Info("Starting execution engine driver")
   515  	// start driving engine: sync blocks by deriving them from L1 and driving them into the engine
   516  	if err := n.l2Driver.Start(); err != nil {
   517  		n.log.Error("Could not start a rollup node", "err", err)
   518  		return err
   519  	}
   520  	log.Info("Rollup node started")
   521  	return nil
   522  }
   523  
   524  func (n *OpNode) OnNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
   525  	n.tracer.OnNewL1Head(ctx, sig)
   526  
   527  	if n.l2Driver == nil {
   528  		return
   529  	}
   530  	// Pass on the event to the L2 Engine
   531  	ctx, cancel := context.WithTimeout(ctx, time.Second*10)
   532  	defer cancel()
   533  	if err := n.l2Driver.OnL1Head(ctx, sig); err != nil {
   534  		n.log.Warn("failed to notify engine driver of L1 head change", "err", err)
   535  	}
   536  }
   537  
   538  func (n *OpNode) OnNewL1Safe(ctx context.Context, sig eth.L1BlockRef) {
   539  	if n.l2Driver == nil {
   540  		return
   541  	}
   542  	// Pass on the event to the L2 Engine
   543  	ctx, cancel := context.WithTimeout(ctx, time.Second*10)
   544  	defer cancel()
   545  	if err := n.l2Driver.OnL1Safe(ctx, sig); err != nil {
   546  		n.log.Warn("failed to notify engine driver of L1 safe block change", "err", err)
   547  	}
   548  }
   549  
   550  func (n *OpNode) OnNewL1Finalized(ctx context.Context, sig eth.L1BlockRef) {
   551  	if n.l2Driver == nil {
   552  		return
   553  	}
   554  	// Pass on the event to the L2 Engine
   555  	ctx, cancel := context.WithTimeout(ctx, time.Second*10)
   556  	defer cancel()
   557  	if err := n.l2Driver.OnL1Finalized(ctx, sig); err != nil {
   558  		n.log.Warn("failed to notify engine driver of L1 finalized block change", "err", err)
   559  	}
   560  }
   561  
   562  func (n *OpNode) PublishL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error {
   563  	n.tracer.OnPublishL2Payload(ctx, envelope)
   564  
   565  	// publish to p2p, if we are running p2p at all
   566  	if n.p2pNode != nil {
   567  		payload := envelope.ExecutionPayload
   568  		if n.p2pSigner == nil {
   569  			return fmt.Errorf("node has no p2p signer, payload %s cannot be published", payload.ID())
   570  		}
   571  		n.log.Info("Publishing signed execution payload on p2p", "id", payload.ID())
   572  		return n.p2pNode.GossipOut().PublishL2Payload(ctx, envelope, n.p2pSigner)
   573  	}
   574  	// if p2p is not enabled then we just don't publish the payload
   575  	return nil
   576  }
   577  
   578  func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, envelope *eth.ExecutionPayloadEnvelope) error {
   579  	// ignore if it's from ourselves
   580  	if n.p2pNode != nil && from == n.p2pNode.Host().ID() {
   581  		return nil
   582  	}
   583  
   584  	n.tracer.OnUnsafeL2Payload(ctx, from, envelope)
   585  
   586  	n.log.Info("Received signed execution payload from p2p", "id", envelope.ExecutionPayload.ID(), "peer", from)
   587  
   588  	// Pass on the event to the L2 Engine
   589  	ctx, cancel := context.WithTimeout(ctx, time.Second*30)
   590  	defer cancel()
   591  
   592  	if err := n.l2Driver.OnUnsafeL2Payload(ctx, envelope); err != nil {
   593  		n.log.Warn("failed to notify engine driver of new L2 payload", "err", err, "id", envelope.ExecutionPayload.ID())
   594  	}
   595  
   596  	return nil
   597  }
   598  
   599  func (n *OpNode) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error {
   600  	if n.p2pNode != nil && n.p2pNode.AltSyncEnabled() {
   601  		if unixTimeStale(start.Time, 12*time.Hour) {
   602  			n.log.Debug("ignoring request to sync L2 range, timestamp is too old for p2p", "start", start, "end", end, "start_time", start.Time)
   603  			return nil
   604  		}
   605  		return n.p2pNode.RequestL2Range(ctx, start, end)
   606  	}
   607  	n.log.Debug("ignoring request to sync L2 range, no sync method available", "start", start, "end", end)
   608  	return nil
   609  }
   610  
   611  // unixTimeStale returns true if the unix timestamp is before the current time minus the supplied duration.
   612  func unixTimeStale(timestamp uint64, duration time.Duration) bool {
   613  	return time.Unix(int64(timestamp), 0).Before(time.Now().Add(-1 * duration))
   614  }
   615  
   616  func (n *OpNode) P2P() p2p.Node {
   617  	return n.p2pNode
   618  }
   619  
   620  func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
   621  	return n.runCfg
   622  }
   623  
   624  // Stop stops the node and closes all resources.
   625  // If the provided ctx is expired, the node will accelerate the stop where possible, but still fully close.
   626  func (n *OpNode) Stop(ctx context.Context) error {
   627  	if n.closed.Load() {
   628  		return ErrAlreadyClosed
   629  	}
   630  
   631  	var result *multierror.Error
   632  
   633  	if n.server != nil {
   634  		if err := n.server.Stop(ctx); err != nil {
   635  			result = multierror.Append(result, fmt.Errorf("failed to close RPC server: %w", err))
   636  		}
   637  	}
   638  	if n.p2pNode != nil {
   639  		if err := n.p2pNode.Close(); err != nil {
   640  			result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err))
   641  		}
   642  	}
   643  	if n.p2pSigner != nil {
   644  		if err := n.p2pSigner.Close(); err != nil {
   645  			result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %w", err))
   646  		}
   647  	}
   648  
   649  	if n.resourcesClose != nil {
   650  		n.resourcesClose()
   651  	}
   652  
   653  	// stop L1 heads feed
   654  	if n.l1HeadsSub != nil {
   655  		n.l1HeadsSub.Unsubscribe()
   656  	}
   657  	// stop polling for L1 safe-head changes
   658  	if n.l1SafeSub != nil {
   659  		n.l1SafeSub.Unsubscribe()
   660  	}
   661  	// stop polling for L1 finalized-head changes
   662  	if n.l1FinalizedSub != nil {
   663  		n.l1FinalizedSub.Unsubscribe()
   664  	}
   665  
   666  	// close L2 driver
   667  	if n.l2Driver != nil {
   668  		if err := n.l2Driver.Close(); err != nil {
   669  			result = multierror.Append(result, fmt.Errorf("failed to close L2 engine driver cleanly: %w", err))
   670  		}
   671  	}
   672  
   673  	if n.safeDB != nil {
   674  		if err := n.safeDB.Close(); err != nil {
   675  			result = multierror.Append(result, fmt.Errorf("failed to close safe head db: %w", err))
   676  		}
   677  	}
   678  
   679  	// Wait for the runtime config loader to be done using the data sources before closing them
   680  	if n.runtimeConfigReloaderDone != nil {
   681  		<-n.runtimeConfigReloaderDone
   682  	}
   683  
   684  	// close L2 engine RPC client
   685  	if n.l2Source != nil {
   686  		n.l2Source.Close()
   687  	}
   688  
   689  	// close L1 data source
   690  	if n.l1Source != nil {
   691  		n.l1Source.Close()
   692  	}
   693  
   694  	if result == nil { // mark as closed if we successfully fully closed
   695  		n.closed.Store(true)
   696  	}
   697  
   698  	if n.halted.Load() {
   699  		// if we had a halt upon initialization, idle for a while, with open metrics, to prevent a rapid restart-loop
   700  		tim := time.NewTimer(time.Minute * 5)
   701  		n.log.Warn("halted, idling to avoid immediate shutdown repeats")
   702  		defer tim.Stop()
   703  		select {
   704  		case <-tim.C:
   705  		case <-ctx.Done():
   706  		}
   707  	}
   708  
   709  	// Close metrics and pprof only after we are done idling
   710  	if n.pprofService != nil {
   711  		if err := n.pprofService.Stop(ctx); err != nil {
   712  			result = multierror.Append(result, fmt.Errorf("failed to close pprof server: %w", err))
   713  		}
   714  	}
   715  	if n.metricsSrv != nil {
   716  		if err := n.metricsSrv.Stop(ctx); err != nil {
   717  			result = multierror.Append(result, fmt.Errorf("failed to close metrics server: %w", err))
   718  		}
   719  	}
   720  
   721  	return result.ErrorOrNil()
   722  }
   723  
   724  func (n *OpNode) Stopped() bool {
   725  	return n.closed.Load()
   726  }
   727  
   728  func (n *OpNode) HTTPEndpoint() string {
   729  	if n.server == nil {
   730  		return ""
   731  	}
   732  	return fmt.Sprintf("http://%s", n.server.Addr().String())
   733  }