github.com/ethereum-optimism/optimism@v1.7.2/op-node/rollup/derive/blob_data_source.go (about)

     1  package derive
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  
     9  	"github.com/ethereum/go-ethereum"
    10  	"github.com/ethereum/go-ethereum/common"
    11  	"github.com/ethereum/go-ethereum/core/types"
    12  	"github.com/ethereum/go-ethereum/log"
    13  
    14  	"github.com/ethereum-optimism/optimism/op-service/eth"
    15  )
    16  
    17  type blobOrCalldata struct {
    18  	// union type. exactly one of calldata or blob should be non-nil
    19  	blob     *eth.Blob
    20  	calldata *eth.Data
    21  }
    22  
    23  // BlobDataSource fetches blobs or calldata as appropriate and transforms them into usable rollup
    24  // data.
    25  type BlobDataSource struct {
    26  	data         []blobOrCalldata
    27  	ref          eth.L1BlockRef
    28  	batcherAddr  common.Address
    29  	dsCfg        DataSourceConfig
    30  	fetcher      L1TransactionFetcher
    31  	blobsFetcher L1BlobsFetcher
    32  	log          log.Logger
    33  }
    34  
    35  // NewBlobDataSource creates a new blob data source.
    36  func NewBlobDataSource(ctx context.Context, log log.Logger, dsCfg DataSourceConfig, fetcher L1TransactionFetcher, blobsFetcher L1BlobsFetcher, ref eth.L1BlockRef, batcherAddr common.Address) DataIter {
    37  	return &BlobDataSource{
    38  		ref:          ref,
    39  		dsCfg:        dsCfg,
    40  		fetcher:      fetcher,
    41  		log:          log.New("origin", ref),
    42  		batcherAddr:  batcherAddr,
    43  		blobsFetcher: blobsFetcher,
    44  	}
    45  }
    46  
    47  // Next returns the next piece of batcher data, or an io.EOF error if no data remains. It returns
    48  // ResetError if it cannot find the referenced block or a referenced blob, or TemporaryError for
    49  // any other failure to fetch a block or blob.
    50  func (ds *BlobDataSource) Next(ctx context.Context) (eth.Data, error) {
    51  	if ds.data == nil {
    52  		var err error
    53  		if ds.data, err = ds.open(ctx); err != nil {
    54  			return nil, err
    55  		}
    56  	}
    57  
    58  	if len(ds.data) == 0 {
    59  		return nil, io.EOF
    60  	}
    61  
    62  	next := ds.data[0]
    63  	ds.data = ds.data[1:]
    64  	if next.calldata != nil {
    65  		return *next.calldata, nil
    66  	}
    67  
    68  	data, err := next.blob.ToData()
    69  	if err != nil {
    70  		ds.log.Error("ignoring blob due to parse failure", "err", err)
    71  		return ds.Next(ctx)
    72  	}
    73  	return data, nil
    74  }
    75  
    76  // open fetches and returns the blob or calldata (as appropriate) from all valid batcher
    77  // transactions in the referenced block. Returns an empty (non-nil) array if no batcher
    78  // transactions are found. It returns ResetError if it cannot find the referenced block or a
    79  // referenced blob, or TemporaryError for any other failure to fetch a block or blob.
    80  func (ds *BlobDataSource) open(ctx context.Context) ([]blobOrCalldata, error) {
    81  	_, txs, err := ds.fetcher.InfoAndTxsByHash(ctx, ds.ref.Hash)
    82  	if err != nil {
    83  		if errors.Is(err, ethereum.NotFound) {
    84  			return nil, NewResetError(fmt.Errorf("failed to open blob data source: %w", err))
    85  		}
    86  		return nil, NewTemporaryError(fmt.Errorf("failed to open blob data source: %w", err))
    87  	}
    88  
    89  	data, hashes := dataAndHashesFromTxs(txs, &ds.dsCfg, ds.batcherAddr)
    90  
    91  	if len(hashes) == 0 {
    92  		// there are no blobs to fetch so we can return immediately
    93  		return data, nil
    94  	}
    95  
    96  	// download the actual blob bodies corresponding to the indexed blob hashes
    97  	blobs, err := ds.blobsFetcher.GetBlobs(ctx, ds.ref, hashes)
    98  	if errors.Is(err, ethereum.NotFound) {
    99  		// If the L1 block was available, then the blobs should be available too. The only
   100  		// exception is if the blob retention window has expired, which we will ultimately handle
   101  		// by failing over to a blob archival service.
   102  		return nil, NewResetError(fmt.Errorf("failed to fetch blobs: %w", err))
   103  	} else if err != nil {
   104  		return nil, NewTemporaryError(fmt.Errorf("failed to fetch blobs: %w", err))
   105  	}
   106  
   107  	// go back over the data array and populate the blob pointers
   108  	if err := fillBlobPointers(data, blobs); err != nil {
   109  		// this shouldn't happen unless there is a bug in the blobs fetcher
   110  		return nil, NewResetError(fmt.Errorf("failed to fill blob pointers: %w", err))
   111  	}
   112  	return data, nil
   113  }
   114  
   115  // dataAndHashesFromTxs extracts calldata and datahashes from the input transactions and returns them. It
   116  // creates a placeholder blobOrCalldata element for each returned blob hash that must be populated
   117  // by fillBlobPointers after blob bodies are retrieved.
   118  func dataAndHashesFromTxs(txs types.Transactions, config *DataSourceConfig, batcherAddr common.Address) ([]blobOrCalldata, []eth.IndexedBlobHash) {
   119  	data := []blobOrCalldata{}
   120  	var hashes []eth.IndexedBlobHash
   121  	blobIndex := 0 // index of each blob in the block's blob sidecar
   122  	for _, tx := range txs {
   123  		// skip any non-batcher transactions
   124  		if !isValidBatchTx(tx, config.l1Signer, config.batchInboxAddress, batcherAddr) {
   125  			blobIndex += len(tx.BlobHashes())
   126  			continue
   127  		}
   128  		// handle non-blob batcher transactions by extracting their calldata
   129  		if tx.Type() != types.BlobTxType {
   130  			calldata := eth.Data(tx.Data())
   131  			data = append(data, blobOrCalldata{nil, &calldata})
   132  			continue
   133  		}
   134  		// handle blob batcher transactions by extracting their blob hashes, ignoring any calldata.
   135  		if len(tx.Data()) > 0 {
   136  			log.Warn("blob tx has calldata, which will be ignored", "txhash", tx.Hash())
   137  		}
   138  		for _, h := range tx.BlobHashes() {
   139  			idh := eth.IndexedBlobHash{
   140  				Index: uint64(blobIndex),
   141  				Hash:  h,
   142  			}
   143  			hashes = append(hashes, idh)
   144  			data = append(data, blobOrCalldata{nil, nil}) // will fill in blob pointers after we download them below
   145  			blobIndex += 1
   146  		}
   147  	}
   148  	return data, hashes
   149  }
   150  
   151  // fillBlobPointers goes back through the data array and fills in the pointers to the fetched blob
   152  // bodies. There should be exactly one placeholder blobOrCalldata element for each blob, otherwise
   153  // error is returned.
   154  func fillBlobPointers(data []blobOrCalldata, blobs []*eth.Blob) error {
   155  	blobIndex := 0
   156  	for i := range data {
   157  		if data[i].calldata != nil {
   158  			continue
   159  		}
   160  		if blobIndex >= len(blobs) {
   161  			return fmt.Errorf("didn't get enough blobs")
   162  		}
   163  		if blobs[blobIndex] == nil {
   164  			return fmt.Errorf("found a nil blob")
   165  		}
   166  		data[i].blob = blobs[blobIndex]
   167  		blobIndex++
   168  	}
   169  	if blobIndex != len(blobs) {
   170  		return fmt.Errorf("got too many blobs")
   171  	}
   172  	return nil
   173  }