github.com/ethereum-optimism/optimism@v1.7.2/op-node/rollup/derive/batch_queue_test.go (about)

     1  package derive
     2  
     3  import (
     4  	"context"
     5  	"encoding/binary"
     6  	"errors"
     7  	"io"
     8  	"math/big"
     9  	"math/rand"
    10  	"testing"
    11  
    12  	"github.com/ethereum/go-ethereum/common"
    13  	"github.com/ethereum/go-ethereum/common/hexutil"
    14  	"github.com/ethereum/go-ethereum/core/types"
    15  	"github.com/ethereum/go-ethereum/log"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	"github.com/ethereum-optimism/optimism/op-node/rollup"
    19  	"github.com/ethereum-optimism/optimism/op-service/eth"
    20  	"github.com/ethereum-optimism/optimism/op-service/testlog"
    21  	"github.com/ethereum-optimism/optimism/op-service/testutils"
    22  )
    23  
    24  type fakeBatchQueueInput struct {
    25  	i       int
    26  	batches []Batch
    27  	errors  []error
    28  	origin  eth.L1BlockRef
    29  }
    30  
    31  func (f *fakeBatchQueueInput) Origin() eth.L1BlockRef {
    32  	return f.origin
    33  }
    34  
    35  func (f *fakeBatchQueueInput) NextBatch(ctx context.Context) (Batch, error) {
    36  	if f.i >= len(f.batches) {
    37  		return nil, io.EOF
    38  	}
    39  	b := f.batches[f.i]
    40  	e := f.errors[f.i]
    41  	f.i += 1
    42  	return b, e
    43  }
    44  
    45  func mockHash(time uint64, layer uint8) common.Hash {
    46  	hash := common.Hash{31: layer} // indicate L1 or L2
    47  	binary.LittleEndian.PutUint64(hash[:], time)
    48  	return hash
    49  }
    50  
    51  func b(chainId *big.Int, timestamp uint64, epoch eth.L1BlockRef) *SingularBatch {
    52  	rng := rand.New(rand.NewSource(int64(timestamp)))
    53  	signer := types.NewLondonSigner(chainId)
    54  	tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
    55  	txData, _ := tx.MarshalBinary()
    56  	return &SingularBatch{
    57  		ParentHash:   mockHash(timestamp-2, 2),
    58  		Timestamp:    timestamp,
    59  		EpochNum:     rollup.Epoch(epoch.Number),
    60  		EpochHash:    epoch.Hash,
    61  		Transactions: []hexutil.Bytes{txData},
    62  	}
    63  }
    64  
    65  func buildSpanBatches(t *testing.T, parent *eth.L2BlockRef, singularBatches []*SingularBatch, blockCounts []int, chainId *big.Int) []Batch {
    66  	var spanBatches []Batch
    67  	idx := 0
    68  	for _, count := range blockCounts {
    69  		span := NewSpanBatch(singularBatches[idx : idx+count])
    70  		spanBatches = append(spanBatches, span)
    71  		idx += count
    72  	}
    73  	return spanBatches
    74  }
    75  
    76  func getDeltaTime(batchType int) *uint64 {
    77  	minTs := uint64(0)
    78  	if batchType == SpanBatchType {
    79  		return &minTs
    80  	}
    81  	return nil
    82  }
    83  
    84  func l1InfoDepositTx(t *testing.T, l1BlockNum uint64) hexutil.Bytes {
    85  	l1Info := L1BlockInfo{
    86  		Number:  l1BlockNum,
    87  		BaseFee: big.NewInt(0),
    88  	}
    89  	infoData, err := l1Info.marshalBinaryBedrock()
    90  	require.NoError(t, err)
    91  	depositTx := &types.DepositTx{
    92  		Data: infoData,
    93  	}
    94  	txData, err := types.NewTx(depositTx).MarshalBinary()
    95  	require.NoError(t, err)
    96  	return txData
    97  }
    98  
    99  func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.ExecutionPayloadEnvelope {
   100  	txs := []hexutil.Bytes{l1InfoDepositTx(t, uint64(batch.EpochNum))}
   101  	txs = append(txs, batch.Transactions...)
   102  	return eth.ExecutionPayloadEnvelope{
   103  		ExecutionPayload: &eth.ExecutionPayload{
   104  			BlockHash:    mockHash(batch.Timestamp, 2),
   105  			ParentHash:   batch.ParentHash,
   106  			BlockNumber:  hexutil.Uint64(blockNumber),
   107  			Timestamp:    hexutil.Uint64(batch.Timestamp),
   108  			Transactions: txs,
   109  		},
   110  	}
   111  }
   112  
   113  func singularBatchToBlockRef(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.L2BlockRef {
   114  	return eth.L2BlockRef{
   115  		Hash:       mockHash(batch.Timestamp, 2),
   116  		Number:     blockNumber,
   117  		ParentHash: batch.ParentHash,
   118  		Time:       batch.Timestamp,
   119  		L1Origin:   eth.BlockID{Hash: batch.EpochHash, Number: uint64(batch.EpochNum)},
   120  	}
   121  }
   122  
   123  func L1Chain(l1Times []uint64) []eth.L1BlockRef {
   124  	var out []eth.L1BlockRef
   125  	var parentHash common.Hash
   126  	for i, time := range l1Times {
   127  		hash := mockHash(time, 1)
   128  		out = append(out, eth.L1BlockRef{
   129  			Hash:       hash,
   130  			Number:     uint64(i),
   131  			ParentHash: parentHash,
   132  			Time:       time,
   133  		})
   134  		parentHash = hash
   135  	}
   136  	return out
   137  }
   138  
   139  func TestBatchQueue(t *testing.T) {
   140  	tests := []struct {
   141  		name string
   142  		f    func(t *testing.T, batchType int)
   143  	}{
   144  		{"BatchQueueNewOrigin", BatchQueueNewOrigin},
   145  		{"BatchQueueEager", BatchQueueEager},
   146  		{"BatchQueueInvalidInternalAdvance", BatchQueueInvalidInternalAdvance},
   147  		{"BatchQueueMissing", BatchQueueMissing},
   148  		{"BatchQueueAdvancedEpoch", BatchQueueAdvancedEpoch},
   149  		{"BatchQueueShuffle", BatchQueueShuffle},
   150  	}
   151  	for _, test := range tests {
   152  		test := test
   153  		t.Run(test.name+"_SingularBatch", func(t *testing.T) {
   154  			test.f(t, SingularBatchType)
   155  		})
   156  	}
   157  
   158  	for _, test := range tests {
   159  		test := test
   160  		t.Run(test.name+"_SpanBatch", func(t *testing.T) {
   161  			test.f(t, SpanBatchType)
   162  		})
   163  	}
   164  }
   165  
   166  // BatchQueueNewOrigin tests that the batch queue properly saves the new origin
   167  // when the safehead's origin is ahead of the pipeline's origin (as is after a reset).
   168  // This issue was fixed in https://github.com/ethereum-optimism/optimism/pull/3694
   169  func BatchQueueNewOrigin(t *testing.T, batchType int) {
   170  	log := testlog.Logger(t, log.LevelCrit)
   171  	l1 := L1Chain([]uint64{10, 15, 20, 25})
   172  	safeHead := eth.L2BlockRef{
   173  		Hash:           mockHash(10, 2),
   174  		Number:         0,
   175  		ParentHash:     common.Hash{},
   176  		Time:           20,
   177  		L1Origin:       l1[2].ID(),
   178  		SequenceNumber: 0,
   179  	}
   180  	cfg := &rollup.Config{
   181  		Genesis: rollup.Genesis{
   182  			L2Time: 10,
   183  		},
   184  		BlockTime:         2,
   185  		MaxSequencerDrift: 600,
   186  		SeqWindowSize:     2,
   187  		DeltaTime:         getDeltaTime(batchType),
   188  	}
   189  
   190  	input := &fakeBatchQueueInput{
   191  		batches: []Batch{nil},
   192  		errors:  []error{io.EOF},
   193  		origin:  l1[0],
   194  	}
   195  
   196  	bq := NewBatchQueue(log, cfg, input, nil)
   197  	_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
   198  	require.Equal(t, []eth.L1BlockRef{l1[0]}, bq.l1Blocks)
   199  
   200  	// Prev Origin: 0; Safehead Origin: 2; Internal Origin: 0
   201  	// Should return no data but keep the same origin
   202  	data, _, err := bq.NextBatch(context.Background(), safeHead)
   203  	require.Nil(t, data)
   204  	require.Equal(t, io.EOF, err)
   205  	require.Equal(t, []eth.L1BlockRef{l1[0]}, bq.l1Blocks)
   206  	require.Equal(t, l1[0], bq.origin)
   207  
   208  	// Prev Origin: 1; Safehead Origin: 2; Internal Origin: 0
   209  	// Should wipe l1blocks + advance internal origin
   210  	input.origin = l1[1]
   211  	data, _, err = bq.NextBatch(context.Background(), safeHead)
   212  	require.Nil(t, data)
   213  	require.Equal(t, io.EOF, err)
   214  	require.Empty(t, bq.l1Blocks)
   215  	require.Equal(t, l1[1], bq.origin)
   216  
   217  	// Prev Origin: 2; Safehead Origin: 2; Internal Origin: 1
   218  	// Should add to l1Blocks + advance internal origin
   219  	input.origin = l1[2]
   220  	data, _, err = bq.NextBatch(context.Background(), safeHead)
   221  	require.Nil(t, data)
   222  	require.Equal(t, io.EOF, err)
   223  	require.Equal(t, []eth.L1BlockRef{l1[2]}, bq.l1Blocks)
   224  	require.Equal(t, l1[2], bq.origin)
   225  }
   226  
   227  // BatchQueueEager adds a bunch of contiguous batches and asserts that
   228  // enough calls to `NextBatch` return all of those batches.
   229  func BatchQueueEager(t *testing.T, batchType int) {
   230  	log := testlog.Logger(t, log.LevelCrit)
   231  	l1 := L1Chain([]uint64{10, 20, 30})
   232  	chainId := big.NewInt(1234)
   233  	safeHead := eth.L2BlockRef{
   234  		Hash:           mockHash(10, 2),
   235  		Number:         0,
   236  		ParentHash:     common.Hash{},
   237  		Time:           10,
   238  		L1Origin:       l1[0].ID(),
   239  		SequenceNumber: 0,
   240  	}
   241  	cfg := &rollup.Config{
   242  		Genesis: rollup.Genesis{
   243  			L2Time: 10,
   244  		},
   245  		BlockTime:         2,
   246  		MaxSequencerDrift: 600,
   247  		SeqWindowSize:     30,
   248  		DeltaTime:         getDeltaTime(batchType),
   249  		L2ChainID:         chainId,
   250  	}
   251  
   252  	// expected output of BatchQueue.NextBatch()
   253  	expectedOutputBatches := []*SingularBatch{
   254  		b(cfg.L2ChainID, 12, l1[0]),
   255  		b(cfg.L2ChainID, 14, l1[0]),
   256  		b(cfg.L2ChainID, 16, l1[0]),
   257  		b(cfg.L2ChainID, 18, l1[0]),
   258  		b(cfg.L2ChainID, 20, l1[0]),
   259  		b(cfg.L2ChainID, 22, l1[0]),
   260  		nil,
   261  	}
   262  	// expected error of BatchQueue.NextBatch()
   263  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
   264  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   265  	inputErrors := expectedOutputErrors
   266  	// batches will be returned by fakeBatchQueueInput
   267  	var inputBatches []Batch
   268  	if batchType == SpanBatchType {
   269  		spanBlockCounts := []int{1, 2, 3}
   270  		inputErrors = []error{nil, nil, nil, io.EOF}
   271  		inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId)
   272  		inputBatches = append(inputBatches, nil)
   273  	} else {
   274  		for _, singularBatch := range expectedOutputBatches {
   275  			inputBatches = append(inputBatches, singularBatch)
   276  		}
   277  	}
   278  
   279  	input := &fakeBatchQueueInput{
   280  		batches: inputBatches,
   281  		errors:  inputErrors,
   282  		origin:  l1[0],
   283  	}
   284  
   285  	bq := NewBatchQueue(log, cfg, input, nil)
   286  	_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
   287  	// Advance the origin
   288  	input.origin = l1[1]
   289  
   290  	for i := 0; i < len(expectedOutputBatches); i++ {
   291  		b, _, e := bq.NextBatch(context.Background(), safeHead)
   292  		require.ErrorIs(t, e, expectedOutputErrors[i])
   293  		if b == nil {
   294  			require.Nil(t, expectedOutputBatches[i])
   295  		} else {
   296  			require.Equal(t, expectedOutputBatches[i], b)
   297  			safeHead.Number += 1
   298  			safeHead.Time += cfg.BlockTime
   299  			safeHead.Hash = mockHash(b.Timestamp, 2)
   300  			safeHead.L1Origin = b.Epoch()
   301  		}
   302  	}
   303  }
   304  
   305  // BatchQueueInvalidInternalAdvance asserts that we do not miss an epoch when generating batches.
   306  // This is a regression test for CLI-3378.
   307  func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) {
   308  	log := testlog.Logger(t, log.LevelTrace)
   309  	l1 := L1Chain([]uint64{10, 15, 20, 25, 30})
   310  	chainId := big.NewInt(1234)
   311  	safeHead := eth.L2BlockRef{
   312  		Hash:           mockHash(10, 2),
   313  		Number:         0,
   314  		ParentHash:     common.Hash{},
   315  		Time:           10,
   316  		L1Origin:       l1[0].ID(),
   317  		SequenceNumber: 0,
   318  	}
   319  	cfg := &rollup.Config{
   320  		Genesis: rollup.Genesis{
   321  			L2Time: 10,
   322  		},
   323  		BlockTime:         2,
   324  		MaxSequencerDrift: 600,
   325  		SeqWindowSize:     2,
   326  		DeltaTime:         getDeltaTime(batchType),
   327  		L2ChainID:         chainId,
   328  	}
   329  
   330  	// expected output of BatchQueue.NextBatch()
   331  	expectedOutputBatches := []*SingularBatch{
   332  		b(cfg.L2ChainID, 12, l1[0]),
   333  		b(cfg.L2ChainID, 14, l1[0]),
   334  		b(cfg.L2ChainID, 16, l1[0]),
   335  		b(cfg.L2ChainID, 18, l1[0]),
   336  		b(cfg.L2ChainID, 20, l1[0]),
   337  		b(cfg.L2ChainID, 22, l1[0]),
   338  		nil,
   339  	}
   340  	// expected error of BatchQueue.NextBatch()
   341  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
   342  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   343  	inputErrors := expectedOutputErrors
   344  	// batches will be returned by fakeBatchQueueInput
   345  	var inputBatches []Batch
   346  	if batchType == SpanBatchType {
   347  		spanBlockCounts := []int{1, 2, 3}
   348  		inputErrors = []error{nil, nil, nil, io.EOF}
   349  		inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId)
   350  		inputBatches = append(inputBatches, nil)
   351  	} else {
   352  		for _, singularBatch := range expectedOutputBatches {
   353  			inputBatches = append(inputBatches, singularBatch)
   354  		}
   355  	}
   356  
   357  	input := &fakeBatchQueueInput{
   358  		batches: inputBatches,
   359  		errors:  inputErrors,
   360  		origin:  l1[0],
   361  	}
   362  
   363  	bq := NewBatchQueue(log, cfg, input, nil)
   364  	_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
   365  
   366  	// Load continuous batches for epoch 0
   367  	for i := 0; i < len(expectedOutputBatches); i++ {
   368  		b, _, e := bq.NextBatch(context.Background(), safeHead)
   369  		require.ErrorIs(t, e, expectedOutputErrors[i])
   370  		if b == nil {
   371  			require.Nil(t, expectedOutputBatches[i])
   372  		} else {
   373  			require.Equal(t, expectedOutputBatches[i], b)
   374  			safeHead.Number += 1
   375  			safeHead.Time += 2
   376  			safeHead.Hash = mockHash(b.Timestamp, 2)
   377  			safeHead.L1Origin = b.Epoch()
   378  		}
   379  	}
   380  
   381  	// Advance to origin 1. No forced batches yet.
   382  	input.origin = l1[1]
   383  	b, _, e := bq.NextBatch(context.Background(), safeHead)
   384  	require.ErrorIs(t, e, io.EOF)
   385  	require.Nil(t, b)
   386  
   387  	// Advance to origin 2. No forced batches yet because we are still on epoch 0
   388  	// & have batches for epoch 0.
   389  	input.origin = l1[2]
   390  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   391  	require.ErrorIs(t, e, io.EOF)
   392  	require.Nil(t, b)
   393  
   394  	// Advance to origin 3. Should generate one empty batch.
   395  	input.origin = l1[3]
   396  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   397  	require.Nil(t, e)
   398  	require.NotNil(t, b)
   399  	require.Equal(t, safeHead.Time+2, b.Timestamp)
   400  	require.Equal(t, rollup.Epoch(1), b.EpochNum)
   401  	safeHead.Number += 1
   402  	safeHead.Time += 2
   403  	safeHead.Hash = mockHash(b.Timestamp, 2)
   404  	safeHead.L1Origin = b.Epoch()
   405  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   406  	require.ErrorIs(t, e, io.EOF)
   407  	require.Nil(t, b)
   408  
   409  	// Advance to origin 4. Should generate one empty batch.
   410  	input.origin = l1[4]
   411  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   412  	require.Nil(t, e)
   413  	require.NotNil(t, b)
   414  	require.Equal(t, rollup.Epoch(2), b.EpochNum)
   415  	require.Equal(t, safeHead.Time+2, b.Timestamp)
   416  	safeHead.Number += 1
   417  	safeHead.Time += 2
   418  	safeHead.Hash = mockHash(b.Timestamp, 2)
   419  	safeHead.L1Origin = b.Epoch()
   420  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   421  	require.ErrorIs(t, e, io.EOF)
   422  	require.Nil(t, b)
   423  
   424  }
   425  
   426  func BatchQueueMissing(t *testing.T, batchType int) {
   427  	log := testlog.Logger(t, log.LevelCrit)
   428  	l1 := L1Chain([]uint64{10, 15, 20, 25})
   429  	chainId := big.NewInt(1234)
   430  	safeHead := eth.L2BlockRef{
   431  		Hash:           mockHash(10, 2),
   432  		Number:         0,
   433  		ParentHash:     common.Hash{},
   434  		Time:           10,
   435  		L1Origin:       l1[0].ID(),
   436  		SequenceNumber: 0,
   437  	}
   438  	cfg := &rollup.Config{
   439  		Genesis: rollup.Genesis{
   440  			L2Time: 10,
   441  		},
   442  		BlockTime:         2,
   443  		MaxSequencerDrift: 600,
   444  		SeqWindowSize:     2,
   445  		DeltaTime:         getDeltaTime(batchType),
   446  		L2ChainID:         chainId,
   447  	}
   448  
   449  	// The inputBatches at 18 and 20 are skipped to stop 22 from being eagerly processed.
   450  	// This test checks that batch timestamp 12 & 14 are created, 16 is used, and 18 is advancing the epoch.
   451  	// Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin.a
   452  
   453  	// expected output of BatchQueue.NextBatch()
   454  	expectedOutputBatches := []*SingularBatch{
   455  		b(cfg.L2ChainID, 16, l1[0]),
   456  		b(cfg.L2ChainID, 22, l1[1]),
   457  	}
   458  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   459  	inputErrors := []error{nil, nil}
   460  	// batches will be returned by fakeBatchQueueInput
   461  	var inputBatches []Batch
   462  	if batchType == SpanBatchType {
   463  		spanBlockCounts := []int{1, 1}
   464  		inputErrors = []error{nil, nil, nil, io.EOF}
   465  		inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId)
   466  	} else {
   467  		for _, singularBatch := range expectedOutputBatches {
   468  			inputBatches = append(inputBatches, singularBatch)
   469  		}
   470  	}
   471  
   472  	input := &fakeBatchQueueInput{
   473  		batches: inputBatches,
   474  		errors:  inputErrors,
   475  		origin:  l1[0],
   476  	}
   477  
   478  	bq := NewBatchQueue(log, cfg, input, nil)
   479  	_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
   480  
   481  	for i := 0; i < len(expectedOutputBatches); i++ {
   482  		b, _, e := bq.NextBatch(context.Background(), safeHead)
   483  		require.ErrorIs(t, e, NotEnoughData)
   484  		require.Nil(t, b)
   485  	}
   486  
   487  	// advance origin. Underlying stage still has no more inputBatches
   488  	// This is not enough to auto advance yet
   489  	input.origin = l1[1]
   490  	b, _, e := bq.NextBatch(context.Background(), safeHead)
   491  	require.ErrorIs(t, e, io.EOF)
   492  	require.Nil(t, b)
   493  
   494  	// Advance the origin. At this point batch timestamps 12 and 14 will be created
   495  	input.origin = l1[2]
   496  
   497  	// Check for a generated batch at t = 12
   498  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   499  	require.Nil(t, e)
   500  	require.Equal(t, b.Timestamp, uint64(12))
   501  	require.Empty(t, b.Transactions)
   502  	require.Equal(t, rollup.Epoch(0), b.EpochNum)
   503  	safeHead.Number += 1
   504  	safeHead.Time += 2
   505  	safeHead.Hash = mockHash(b.Timestamp, 2)
   506  
   507  	// Check for generated batch at t = 14
   508  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   509  	require.Nil(t, e)
   510  	require.Equal(t, b.Timestamp, uint64(14))
   511  	require.Empty(t, b.Transactions)
   512  	require.Equal(t, rollup.Epoch(0), b.EpochNum)
   513  	safeHead.Number += 1
   514  	safeHead.Time += 2
   515  	safeHead.Hash = mockHash(b.Timestamp, 2)
   516  
   517  	// Check for the inputted batch at t = 16
   518  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   519  	require.Nil(t, e)
   520  	require.Equal(t, b, expectedOutputBatches[0])
   521  	require.Equal(t, rollup.Epoch(0), b.EpochNum)
   522  	safeHead.Number += 1
   523  	safeHead.Time += 2
   524  	safeHead.Hash = mockHash(b.Timestamp, 2)
   525  
   526  	// Advance the origin. At this point the batch with timestamp 18 will be created
   527  	input.origin = l1[3]
   528  
   529  	// Check for the generated batch at t = 18. This batch advances the epoch
   530  	// Note: We need one io.EOF returned from the bq that advances the internal L1 Blocks view
   531  	// before the batch will be auto generated
   532  	_, _, e = bq.NextBatch(context.Background(), safeHead)
   533  	require.Equal(t, e, io.EOF)
   534  	b, _, e = bq.NextBatch(context.Background(), safeHead)
   535  	require.Nil(t, e)
   536  	require.Equal(t, b.Timestamp, uint64(18))
   537  	require.Empty(t, b.Transactions)
   538  	require.Equal(t, rollup.Epoch(1), b.EpochNum)
   539  }
   540  
   541  // BatchQueueAdvancedEpoch tests that batch queue derives consecutive valid batches with advancing epochs.
   542  // Batch queue's l1blocks list should be updated along epochs.
   543  func BatchQueueAdvancedEpoch(t *testing.T, batchType int) {
   544  	log := testlog.Logger(t, log.LevelCrit)
   545  	l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s
   546  	chainId := big.NewInt(1234)
   547  	safeHead := eth.L2BlockRef{
   548  		Hash:           mockHash(4, 2),
   549  		Number:         0,
   550  		ParentHash:     common.Hash{},
   551  		Time:           4,
   552  		L1Origin:       l1[0].ID(),
   553  		SequenceNumber: 0,
   554  	}
   555  	cfg := &rollup.Config{
   556  		Genesis: rollup.Genesis{
   557  			L2Time: 10,
   558  		},
   559  		BlockTime:         2,
   560  		MaxSequencerDrift: 600,
   561  		SeqWindowSize:     30,
   562  		DeltaTime:         getDeltaTime(batchType),
   563  		L2ChainID:         chainId,
   564  	}
   565  
   566  	// expected output of BatchQueue.NextBatch()
   567  	expectedOutputBatches := []*SingularBatch{
   568  		// 3 L2 blocks per L1 block
   569  		b(cfg.L2ChainID, 6, l1[1]),
   570  		b(cfg.L2ChainID, 8, l1[1]),
   571  		b(cfg.L2ChainID, 10, l1[1]),
   572  		b(cfg.L2ChainID, 12, l1[2]),
   573  		b(cfg.L2ChainID, 14, l1[2]),
   574  		b(cfg.L2ChainID, 16, l1[2]),
   575  		b(cfg.L2ChainID, 18, l1[3]),
   576  		b(cfg.L2ChainID, 20, l1[3]),
   577  		b(cfg.L2ChainID, 22, l1[3]),
   578  		nil,
   579  	}
   580  	// expected error of BatchQueue.NextBatch()
   581  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF}
   582  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   583  	inputErrors := expectedOutputErrors
   584  	// batches will be returned by fakeBatchQueueInput
   585  	var inputBatches []Batch
   586  	if batchType == SpanBatchType {
   587  		spanBlockCounts := []int{2, 2, 2, 3}
   588  		inputErrors = []error{nil, nil, nil, nil, io.EOF}
   589  		inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId)
   590  		inputBatches = append(inputBatches, nil)
   591  	} else {
   592  		for _, singularBatch := range expectedOutputBatches {
   593  			inputBatches = append(inputBatches, singularBatch)
   594  		}
   595  	}
   596  
   597  	// ChannelInReader origin number
   598  	inputOriginNumber := 2
   599  	input := &fakeBatchQueueInput{
   600  		batches: inputBatches,
   601  		errors:  inputErrors,
   602  		origin:  l1[inputOriginNumber],
   603  	}
   604  
   605  	bq := NewBatchQueue(log, cfg, input, nil)
   606  	_ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{})
   607  
   608  	for i := 0; i < len(expectedOutputBatches); i++ {
   609  		expectedOutput := expectedOutputBatches[i]
   610  		if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number {
   611  			// Advance ChannelInReader origin if needed
   612  			inputOriginNumber += 1
   613  			input.origin = l1[inputOriginNumber]
   614  		}
   615  		b, _, e := bq.NextBatch(context.Background(), safeHead)
   616  		require.ErrorIs(t, e, expectedOutputErrors[i])
   617  		if b == nil {
   618  			require.Nil(t, expectedOutput)
   619  		} else {
   620  			require.Equal(t, expectedOutput, b)
   621  			safeHead.Number += 1
   622  			safeHead.Time += cfg.BlockTime
   623  			safeHead.Hash = mockHash(b.Timestamp, 2)
   624  			safeHead.L1Origin = b.Epoch()
   625  		}
   626  	}
   627  }
   628  
   629  // BatchQueueShuffle tests batch queue can reorder shuffled valid batches
   630  func BatchQueueShuffle(t *testing.T, batchType int) {
   631  	log := testlog.Logger(t, log.LevelCrit)
   632  	l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s
   633  	chainId := big.NewInt(1234)
   634  	safeHead := eth.L2BlockRef{
   635  		Hash:           mockHash(4, 2),
   636  		Number:         0,
   637  		ParentHash:     common.Hash{},
   638  		Time:           4,
   639  		L1Origin:       l1[0].ID(),
   640  		SequenceNumber: 0,
   641  	}
   642  	cfg := &rollup.Config{
   643  		Genesis: rollup.Genesis{
   644  			L2Time: 10,
   645  		},
   646  		BlockTime:         2,
   647  		MaxSequencerDrift: 600,
   648  		SeqWindowSize:     30,
   649  		DeltaTime:         getDeltaTime(batchType),
   650  		L2ChainID:         chainId,
   651  	}
   652  
   653  	// expected output of BatchQueue.NextBatch()
   654  	expectedOutputBatches := []*SingularBatch{
   655  		// 3 L2 blocks per L1 block
   656  		b(cfg.L2ChainID, 6, l1[1]),
   657  		b(cfg.L2ChainID, 8, l1[1]),
   658  		b(cfg.L2ChainID, 10, l1[1]),
   659  		b(cfg.L2ChainID, 12, l1[2]),
   660  		b(cfg.L2ChainID, 14, l1[2]),
   661  		b(cfg.L2ChainID, 16, l1[2]),
   662  		b(cfg.L2ChainID, 18, l1[3]),
   663  		b(cfg.L2ChainID, 20, l1[3]),
   664  		b(cfg.L2ChainID, 22, l1[3]),
   665  	}
   666  	// expected error of BatchQueue.NextBatch()
   667  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF}
   668  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   669  	inputErrors := expectedOutputErrors
   670  	// batches will be returned by fakeBatchQueueInput
   671  	var inputBatches []Batch
   672  	if batchType == SpanBatchType {
   673  		spanBlockCounts := []int{2, 2, 2, 3}
   674  		inputErrors = []error{nil, nil, nil, nil, io.EOF}
   675  		inputBatches = buildSpanBatches(t, &safeHead, expectedOutputBatches, spanBlockCounts, chainId)
   676  	} else {
   677  		for _, singularBatch := range expectedOutputBatches {
   678  			inputBatches = append(inputBatches, singularBatch)
   679  		}
   680  	}
   681  
   682  	// Shuffle the order of input batches
   683  	rand.Shuffle(len(inputBatches), func(i, j int) {
   684  		inputBatches[i], inputBatches[j] = inputBatches[j], inputBatches[i]
   685  	})
   686  	inputBatches = append(inputBatches, nil)
   687  
   688  	// ChannelInReader origin number
   689  	inputOriginNumber := 2
   690  	input := &fakeBatchQueueInput{
   691  		batches: inputBatches,
   692  		errors:  inputErrors,
   693  		origin:  l1[inputOriginNumber],
   694  	}
   695  
   696  	bq := NewBatchQueue(log, cfg, input, nil)
   697  	_ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{})
   698  
   699  	for i := 0; i < len(expectedOutputBatches); i++ {
   700  		expectedOutput := expectedOutputBatches[i]
   701  		if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number {
   702  			// Advance ChannelInReader origin if needed
   703  			inputOriginNumber += 1
   704  			input.origin = l1[inputOriginNumber]
   705  		}
   706  		var b *SingularBatch
   707  		var e error
   708  		for j := 0; j < len(expectedOutputBatches); j++ {
   709  			// Multiple NextBatch() executions may be required because the order of input is shuffled
   710  			b, _, e = bq.NextBatch(context.Background(), safeHead)
   711  			if !errors.Is(e, NotEnoughData) {
   712  				break
   713  			}
   714  		}
   715  		require.ErrorIs(t, e, expectedOutputErrors[i])
   716  		if b == nil {
   717  			require.Nil(t, expectedOutput)
   718  		} else {
   719  			require.Equal(t, expectedOutput, b)
   720  			safeHead.Number += 1
   721  			safeHead.Time += cfg.BlockTime
   722  			safeHead.Hash = mockHash(b.Timestamp, 2)
   723  			safeHead.L1Origin = b.Epoch()
   724  		}
   725  	}
   726  }
   727  
   728  func TestBatchQueueOverlappingSpanBatch(t *testing.T) {
   729  	log := testlog.Logger(t, log.LevelCrit)
   730  	l1 := L1Chain([]uint64{10, 20, 30})
   731  	chainId := big.NewInt(1234)
   732  	safeHead := eth.L2BlockRef{
   733  		Hash:           mockHash(10, 2),
   734  		Number:         0,
   735  		ParentHash:     common.Hash{},
   736  		Time:           10,
   737  		L1Origin:       l1[0].ID(),
   738  		SequenceNumber: 0,
   739  	}
   740  	cfg := &rollup.Config{
   741  		Genesis: rollup.Genesis{
   742  			L2Time: 10,
   743  		},
   744  		BlockTime:         2,
   745  		MaxSequencerDrift: 600,
   746  		SeqWindowSize:     30,
   747  		DeltaTime:         getDeltaTime(SpanBatchType),
   748  		L2ChainID:         chainId,
   749  	}
   750  
   751  	// expected output of BatchQueue.NextBatch()
   752  	expectedOutputBatches := []*SingularBatch{
   753  		b(cfg.L2ChainID, 12, l1[0]),
   754  		b(cfg.L2ChainID, 14, l1[0]),
   755  		b(cfg.L2ChainID, 16, l1[0]),
   756  		b(cfg.L2ChainID, 18, l1[0]),
   757  		b(cfg.L2ChainID, 20, l1[0]),
   758  		b(cfg.L2ChainID, 22, l1[0]),
   759  		nil,
   760  	}
   761  	// expected error of BatchQueue.NextBatch()
   762  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
   763  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   764  	inputErrors := []error{nil, nil, nil, nil, io.EOF}
   765  
   766  	// batches will be returned by fakeBatchQueueInput
   767  	var inputBatches []Batch
   768  	batchSize := 3
   769  	for i := 0; i < len(expectedOutputBatches)-batchSize; i++ {
   770  		inputBatches = append(inputBatches, NewSpanBatch(expectedOutputBatches[i:i+batchSize]))
   771  	}
   772  	inputBatches = append(inputBatches, nil)
   773  	// inputBatches:
   774  	// [
   775  	//    [12, 14, 16],  // No overlap
   776  	//    [14, 16, 18],  // overlapped blocks: 14, 16
   777  	//    [16, 18, 20],  // overlapped blocks: 16, 18
   778  	//    [18, 20, 22],  // overlapped blocks: 18, 20
   779  	// ]
   780  
   781  	input := &fakeBatchQueueInput{
   782  		batches: inputBatches,
   783  		errors:  inputErrors,
   784  		origin:  l1[0],
   785  	}
   786  
   787  	l2Client := testutils.MockL2Client{}
   788  	var nilErr error
   789  	for i, batch := range expectedOutputBatches {
   790  		if batch != nil {
   791  			blockRef := singularBatchToBlockRef(t, batch, uint64(i+1))
   792  			payload := singularBatchToPayload(t, batch, uint64(i+1))
   793  			if i < 3 {
   794  				// In CheckBatch(), "L2BlockRefByNumber" is called when fetching the parent block of overlapped span batch
   795  				// so blocks at 12, 14, 16 should be called.
   796  				// CheckBatch() is called twice for a batch - before pushing to the queue, after popping from the queue
   797  				l2Client.Mock.On("L2BlockRefByNumber", uint64(i+1)).Times(2).Return(blockRef, &nilErr)
   798  			}
   799  			if i == 1 || i == 4 {
   800  				// In CheckBatch(), "PayloadByNumber" is called when fetching the overlapped blocks.
   801  				// blocks at 14, 20 are included in overlapped blocks once.
   802  				// CheckBatch() is called twice for a batch - before adding to the queue, after getting from the queue
   803  				l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Times(2).Return(&payload, &nilErr)
   804  			} else if i == 2 || i == 3 {
   805  				// blocks at 16, 18 are included in overlapped blocks twice.
   806  				l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Times(4).Return(&payload, &nilErr)
   807  			}
   808  		}
   809  	}
   810  
   811  	bq := NewBatchQueue(log, cfg, input, &l2Client)
   812  	_ = bq.Reset(context.Background(), l1[0], eth.SystemConfig{})
   813  	// Advance the origin
   814  	input.origin = l1[1]
   815  
   816  	for i := 0; i < len(expectedOutputBatches); i++ {
   817  		b, _, e := bq.NextBatch(context.Background(), safeHead)
   818  		require.ErrorIs(t, e, expectedOutputErrors[i])
   819  		if b == nil {
   820  			require.Nil(t, expectedOutputBatches[i])
   821  		} else {
   822  			require.Equal(t, expectedOutputBatches[i], b)
   823  			safeHead.Number += 1
   824  			safeHead.Time += cfg.BlockTime
   825  			safeHead.Hash = mockHash(b.Timestamp, 2)
   826  			safeHead.L1Origin = b.Epoch()
   827  		}
   828  	}
   829  
   830  	l2Client.Mock.AssertExpectations(t)
   831  }
   832  
   833  func TestBatchQueueComplex(t *testing.T) {
   834  	log := testlog.Logger(t, log.LevelCrit)
   835  	l1 := L1Chain([]uint64{0, 6, 12, 18, 24}) // L1 block time: 6s
   836  	chainId := big.NewInt(1234)
   837  	safeHead := eth.L2BlockRef{
   838  		Hash:           mockHash(4, 2),
   839  		Number:         0,
   840  		ParentHash:     common.Hash{},
   841  		Time:           4,
   842  		L1Origin:       l1[0].ID(),
   843  		SequenceNumber: 0,
   844  	}
   845  	cfg := &rollup.Config{
   846  		Genesis: rollup.Genesis{
   847  			L2Time: 10,
   848  		},
   849  		BlockTime:         2,
   850  		MaxSequencerDrift: 600,
   851  		SeqWindowSize:     30,
   852  		DeltaTime:         getDeltaTime(SpanBatchType),
   853  		L2ChainID:         chainId,
   854  	}
   855  
   856  	// expected output of BatchQueue.NextBatch()
   857  	expectedOutputBatches := []*SingularBatch{
   858  		// 3 L2 blocks per L1 block
   859  		b(cfg.L2ChainID, 6, l1[1]),
   860  		b(cfg.L2ChainID, 8, l1[1]),
   861  		b(cfg.L2ChainID, 10, l1[1]),
   862  		b(cfg.L2ChainID, 12, l1[2]),
   863  		b(cfg.L2ChainID, 14, l1[2]),
   864  		b(cfg.L2ChainID, 16, l1[2]),
   865  		b(cfg.L2ChainID, 18, l1[3]),
   866  		b(cfg.L2ChainID, 20, l1[3]),
   867  		b(cfg.L2ChainID, 22, l1[3]),
   868  	}
   869  	// expected error of BatchQueue.NextBatch()
   870  	expectedOutputErrors := []error{nil, nil, nil, nil, nil, nil, nil, nil, nil, io.EOF}
   871  	// errors will be returned by fakeBatchQueueInput.NextBatch()
   872  	inputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
   873  	// batches will be returned by fakeBatchQueueInput
   874  	inputBatches := []Batch{
   875  		NewSpanBatch(expectedOutputBatches[0:2]), // [6, 8] - no overlap
   876  		expectedOutputBatches[2],                 // [10] - no overlap
   877  		NewSpanBatch(expectedOutputBatches[1:4]), // [8, 10, 12] - overlapped blocks: 8 or 8, 10
   878  		expectedOutputBatches[4],                 // [14] - no overlap
   879  		NewSpanBatch(expectedOutputBatches[4:6]), // [14, 16] - overlapped blocks: nothing or 14
   880  		NewSpanBatch(expectedOutputBatches[6:9]), // [18, 20, 22] - no overlap
   881  	}
   882  
   883  	// Shuffle the order of input batches
   884  	rand.Shuffle(len(inputBatches), func(i, j int) {
   885  		inputBatches[i], inputBatches[j] = inputBatches[j], inputBatches[i]
   886  	})
   887  
   888  	inputBatches = append(inputBatches, nil)
   889  
   890  	// ChannelInReader origin number
   891  	inputOriginNumber := 2
   892  	input := &fakeBatchQueueInput{
   893  		batches: inputBatches,
   894  		errors:  inputErrors,
   895  		origin:  l1[inputOriginNumber],
   896  	}
   897  
   898  	l2Client := testutils.MockL2Client{}
   899  	var nilErr error
   900  	for i, batch := range expectedOutputBatches {
   901  		if batch != nil {
   902  			blockRef := singularBatchToBlockRef(t, batch, uint64(i+1))
   903  			payload := singularBatchToPayload(t, batch, uint64(i+1))
   904  			if i == 0 || i == 3 {
   905  				// In CheckBatch(), "L2BlockRefByNumber" is called when fetching the parent block of overlapped span batch
   906  				// so blocks at 6, 8 could be called, depends on the order of batches
   907  				l2Client.Mock.On("L2BlockRefByNumber", uint64(i+1)).Return(blockRef, &nilErr).Maybe()
   908  			}
   909  			if i == 1 || i == 2 || i == 4 {
   910  				// In CheckBatch(), "PayloadByNumber" is called when fetching the overlapped blocks.
   911  				// so blocks at 14, 20 could be called, depends on the order of batches
   912  				l2Client.Mock.On("PayloadByNumber", uint64(i+1)).Return(&payload, &nilErr).Maybe()
   913  			}
   914  		}
   915  	}
   916  
   917  	bq := NewBatchQueue(log, cfg, input, &l2Client)
   918  	_ = bq.Reset(context.Background(), l1[1], eth.SystemConfig{})
   919  
   920  	for i := 0; i < len(expectedOutputBatches); i++ {
   921  		expectedOutput := expectedOutputBatches[i]
   922  		if expectedOutput != nil && uint64(expectedOutput.EpochNum) == l1[inputOriginNumber].Number {
   923  			// Advance ChannelInReader origin if needed
   924  			inputOriginNumber += 1
   925  			input.origin = l1[inputOriginNumber]
   926  		}
   927  		var b *SingularBatch
   928  		var e error
   929  		for j := 0; j < len(expectedOutputBatches); j++ {
   930  			// Multiple NextBatch() executions may be required because the order of input is shuffled
   931  			b, _, e = bq.NextBatch(context.Background(), safeHead)
   932  			if !errors.Is(e, NotEnoughData) {
   933  				break
   934  			}
   935  		}
   936  		require.ErrorIs(t, e, expectedOutputErrors[i])
   937  		if b == nil {
   938  			require.Nil(t, expectedOutput)
   939  		} else {
   940  			require.Equal(t, expectedOutput, b)
   941  			safeHead.Number += 1
   942  			safeHead.Time += cfg.BlockTime
   943  			safeHead.Hash = mockHash(b.Timestamp, 2)
   944  			safeHead.L1Origin = b.Epoch()
   945  		}
   946  	}
   947  
   948  	l2Client.Mock.AssertExpectations(t)
   949  }
   950  
   951  func TestBatchQueueResetSpan(t *testing.T) {
   952  	log := testlog.Logger(t, log.LevelCrit)
   953  	chainId := big.NewInt(1234)
   954  	l1 := L1Chain([]uint64{0, 4, 8})
   955  	safeHead := eth.L2BlockRef{
   956  		Hash:           mockHash(0, 2),
   957  		Number:         0,
   958  		ParentHash:     common.Hash{},
   959  		Time:           0,
   960  		L1Origin:       l1[0].ID(),
   961  		SequenceNumber: 0,
   962  	}
   963  	cfg := &rollup.Config{
   964  		Genesis: rollup.Genesis{
   965  			L2Time: 10,
   966  		},
   967  		BlockTime:         2,
   968  		MaxSequencerDrift: 600,
   969  		SeqWindowSize:     30,
   970  		DeltaTime:         getDeltaTime(SpanBatchType),
   971  		L2ChainID:         chainId,
   972  	}
   973  
   974  	singularBatches := []*SingularBatch{
   975  		b(cfg.L2ChainID, 2, l1[0]),
   976  		b(cfg.L2ChainID, 4, l1[1]),
   977  		b(cfg.L2ChainID, 6, l1[1]),
   978  		b(cfg.L2ChainID, 8, l1[2]),
   979  	}
   980  
   981  	input := &fakeBatchQueueInput{
   982  		batches: []Batch{NewSpanBatch(singularBatches)},
   983  		errors:  []error{nil},
   984  		origin:  l1[2],
   985  	}
   986  	l2Client := testutils.MockL2Client{}
   987  	bq := NewBatchQueue(log, cfg, input, &l2Client)
   988  	bq.l1Blocks = l1 // Set enough l1 blocks to derive span batch
   989  
   990  	// This NextBatch() will derive the span batch, return the first singular batch and save rest of batches in span.
   991  	nextBatch, _, err := bq.NextBatch(context.Background(), safeHead)
   992  	require.NoError(t, err)
   993  	require.Equal(t, nextBatch, singularBatches[0])
   994  	require.Equal(t, len(bq.nextSpan), len(singularBatches)-1)
   995  	// batch queue's epoch should not be advanced until the entire span batch is returned
   996  	require.Equal(t, bq.l1Blocks[0], l1[0])
   997  
   998  	// This NextBatch() will return the second singular batch.
   999  	safeHead.Number += 1
  1000  	safeHead.Time += cfg.BlockTime
  1001  	safeHead.Hash = mockHash(nextBatch.Timestamp, 2)
  1002  	safeHead.L1Origin = nextBatch.Epoch()
  1003  	nextBatch, _, err = bq.NextBatch(context.Background(), safeHead)
  1004  	require.NoError(t, err)
  1005  	require.Equal(t, nextBatch, singularBatches[1])
  1006  	require.Equal(t, len(bq.nextSpan), len(singularBatches)-2)
  1007  	// batch queue's epoch should not be advanced until the entire span batch is returned
  1008  	require.Equal(t, bq.l1Blocks[0], l1[0])
  1009  
  1010  	// Call NextBatch() with stale safeHead. It means the second batch failed to be processed.
  1011  	// Batch queue should drop the entire span batch.
  1012  	nextBatch, _, err = bq.NextBatch(context.Background(), safeHead)
  1013  	require.Nil(t, nextBatch)
  1014  	require.ErrorIs(t, err, io.EOF)
  1015  	require.Equal(t, len(bq.nextSpan), 0)
  1016  }