github.com/ethereum-optimism/optimism@v1.7.2/op-node/rollup/derive/data_source.go (about)

     1  package derive
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  
     7  	"github.com/ethereum/go-ethereum/common"
     8  	"github.com/ethereum/go-ethereum/core/types"
     9  	"github.com/ethereum/go-ethereum/log"
    10  
    11  	"github.com/ethereum-optimism/optimism/op-node/rollup"
    12  	plasma "github.com/ethereum-optimism/optimism/op-plasma"
    13  	"github.com/ethereum-optimism/optimism/op-service/eth"
    14  )
    15  
    16  type DataIter interface {
    17  	Next(ctx context.Context) (eth.Data, error)
    18  }
    19  
    20  type L1TransactionFetcher interface {
    21  	InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error)
    22  }
    23  
    24  type L1BlobsFetcher interface {
    25  	// GetBlobs fetches blobs that were confirmed in the given L1 block with the given indexed hashes.
    26  	GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error)
    27  }
    28  
    29  type PlasmaInputFetcher interface {
    30  	// GetInput fetches the input for the given commitment at the given block number from the DA storage service.
    31  	GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.Keccak256Commitment, blockId eth.BlockID) (eth.Data, error)
    32  	// AdvanceL1Origin advances the L1 origin to the given block number, syncing the DA challenge events.
    33  	AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error
    34  	// Reset the challenge origin in case of L1 reorg
    35  	Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
    36  	// Notify L1 finalized head so plasma finality is always behind L1
    37  	Finalize(ref eth.L1BlockRef)
    38  	// Set the engine finalization signal callback
    39  	OnFinalizedHeadSignal(f plasma.HeadSignalFn)
    40  }
    41  
    42  // DataSourceFactory reads raw transactions from a given block & then filters for
    43  // batch submitter transactions.
    44  // This is not a stage in the pipeline, but a wrapper for another stage in the pipeline
    45  type DataSourceFactory struct {
    46  	log           log.Logger
    47  	dsCfg         DataSourceConfig
    48  	fetcher       L1Fetcher
    49  	blobsFetcher  L1BlobsFetcher
    50  	plasmaFetcher PlasmaInputFetcher
    51  	ecotoneTime   *uint64
    52  }
    53  
    54  func NewDataSourceFactory(log log.Logger, cfg *rollup.Config, fetcher L1Fetcher, blobsFetcher L1BlobsFetcher, plasmaFetcher PlasmaInputFetcher) *DataSourceFactory {
    55  	config := DataSourceConfig{
    56  		l1Signer:          cfg.L1Signer(),
    57  		batchInboxAddress: cfg.BatchInboxAddress,
    58  		plasmaEnabled:     cfg.UsePlasma,
    59  	}
    60  	return &DataSourceFactory{
    61  		log:           log,
    62  		dsCfg:         config,
    63  		fetcher:       fetcher,
    64  		blobsFetcher:  blobsFetcher,
    65  		plasmaFetcher: plasmaFetcher,
    66  		ecotoneTime:   cfg.EcotoneTime,
    67  	}
    68  }
    69  
    70  // OpenData returns the appropriate data source for the L1 block `ref`.
    71  func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address) (DataIter, error) {
    72  	// Creates a data iterator from blob or calldata source so we can forward it to the plasma source
    73  	// if enabled as it still requires an L1 data source for fetching input commmitments.
    74  	var src DataIter
    75  	if ds.ecotoneTime != nil && ref.Time >= *ds.ecotoneTime {
    76  		if ds.blobsFetcher == nil {
    77  			return nil, fmt.Errorf("ecotone upgrade active but beacon endpoint not configured")
    78  		}
    79  		src = NewBlobDataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ds.blobsFetcher, ref, batcherAddr)
    80  	} else {
    81  		src = NewCalldataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ref, batcherAddr)
    82  	}
    83  	if ds.dsCfg.plasmaEnabled {
    84  		// plasma([calldata | blobdata](l1Ref)) -> data
    85  		return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref.ID()), nil
    86  	}
    87  	return src, nil
    88  }
    89  
    90  // DataSourceConfig regroups the mandatory rollup.Config fields needed for DataFromEVMTransactions.
    91  type DataSourceConfig struct {
    92  	l1Signer          types.Signer
    93  	batchInboxAddress common.Address
    94  	plasmaEnabled     bool
    95  }
    96  
    97  // isValidBatchTx returns true if:
    98  //  1. the transaction has a To() address that matches the batch inbox address, and
    99  //  2. the transaction has a valid signature from the batcher address
   100  func isValidBatchTx(tx *types.Transaction, l1Signer types.Signer, batchInboxAddr, batcherAddr common.Address) bool {
   101  	to := tx.To()
   102  	if to == nil || *to != batchInboxAddr {
   103  		return false
   104  	}
   105  	seqDataSubmitter, err := l1Signer.Sender(tx) // optimization: only derive sender if To is correct
   106  	if err != nil {
   107  		log.Warn("tx in inbox with invalid signature", "hash", tx.Hash(), "err", err)
   108  		return false
   109  	}
   110  	// some random L1 user might have sent a transaction to our batch inbox, ignore them
   111  	if seqDataSubmitter != batcherAddr {
   112  		log.Warn("tx in inbox with unauthorized submitter", "addr", seqDataSubmitter, "hash", tx.Hash(), "err", err)
   113  		return false
   114  	}
   115  	return true
   116  }