github.com/0xPolygon/supernets2-node@v0.0.0-20230711153321-2fe574524eaa/sequencer/sequencer.go (about)

     1  package sequencer
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/0xPolygon/supernets2-node/event"
    11  	"github.com/0xPolygon/supernets2-node/log"
    12  	"github.com/0xPolygon/supernets2-node/pool"
    13  	"github.com/0xPolygon/supernets2-node/sequencer/metrics"
    14  	"github.com/0xPolygon/supernets2-node/state"
    15  	stateMetrics "github.com/0xPolygon/supernets2-node/state/metrics"
    16  	"github.com/ethereum/go-ethereum/common"
    17  )
    18  
    19  // Sequencer represents a sequencer
    20  type Sequencer struct {
    21  	cfg Config
    22  
    23  	pool         txPool
    24  	state        stateInterface
    25  	eventLog     *event.EventLog
    26  	ethTxManager ethTxManager
    27  	etherman     etherman
    28  
    29  	address common.Address
    30  }
    31  
    32  // batchConstraints represents the constraints for a batch
    33  type batchConstraints struct {
    34  	MaxTxsPerBatch       uint64
    35  	MaxBatchBytesSize    uint64
    36  	MaxCumulativeGasUsed uint64
    37  	MaxKeccakHashes      uint32
    38  	MaxPoseidonHashes    uint32
    39  	MaxPoseidonPaddings  uint32
    40  	MaxMemAligns         uint32
    41  	MaxArithmetics       uint32
    42  	MaxBinaries          uint32
    43  	MaxSteps             uint32
    44  }
    45  
    46  // TODO: Add tests to config_test.go
    47  type batchResourceWeights struct {
    48  	WeightBatchBytesSize    int
    49  	WeightCumulativeGasUsed int
    50  	WeightKeccakHashes      int
    51  	WeightPoseidonHashes    int
    52  	WeightPoseidonPaddings  int
    53  	WeightMemAligns         int
    54  	WeightArithmetics       int
    55  	WeightBinaries          int
    56  	WeightSteps             int
    57  }
    58  
    59  // L2ReorgEvent is the event that is triggered when a reorg happens in the L2
    60  type L2ReorgEvent struct {
    61  	TxHashes []common.Hash
    62  }
    63  
    64  // ClosingSignalCh is a struct that contains all the channels that are used to receive batch closing signals
    65  type ClosingSignalCh struct {
    66  	ForcedBatchCh chan state.ForcedBatch
    67  	GERCh         chan common.Hash
    68  	L2ReorgCh     chan L2ReorgEvent
    69  }
    70  
    71  // TxsStore is a struct that contains the channel and the wait group for the txs to be stored in order
    72  type TxsStore struct {
    73  	Ch chan *txToStore
    74  	Wg *sync.WaitGroup
    75  }
    76  
    77  // txToStore represents a transaction to store.
    78  type txToStore struct {
    79  	txResponse               *state.ProcessTransactionResponse
    80  	batchNumber              uint64
    81  	coinbase                 common.Address
    82  	timestamp                uint64
    83  	previousL2BlockStateRoot common.Hash
    84  	isForcedBatch            bool
    85  }
    86  
    87  // New init sequencer
    88  func New(cfg Config, txPool txPool, state stateInterface, etherman etherman, manager ethTxManager, eventLog *event.EventLog) (*Sequencer, error) {
    89  	addr, err := etherman.TrustedSequencer()
    90  	if err != nil {
    91  		return nil, fmt.Errorf("failed to get trusted sequencer address, err: %v", err)
    92  	}
    93  
    94  	return &Sequencer{
    95  		cfg:          cfg,
    96  		pool:         txPool,
    97  		state:        state,
    98  		etherman:     etherman,
    99  		ethTxManager: manager,
   100  		address:      addr,
   101  		eventLog:     eventLog,
   102  	}, nil
   103  }
   104  
   105  // Start starts the sequencer
   106  func (s *Sequencer) Start(ctx context.Context) {
   107  	for !s.isSynced(ctx) {
   108  		log.Infof("waiting for synchronizer to sync...")
   109  		time.Sleep(s.cfg.WaitPeriodPoolIsEmpty.Duration)
   110  	}
   111  	metrics.Register()
   112  
   113  	closingSignalCh := ClosingSignalCh{
   114  		ForcedBatchCh: make(chan state.ForcedBatch),
   115  		GERCh:         make(chan common.Hash),
   116  		L2ReorgCh:     make(chan L2ReorgEvent),
   117  	}
   118  
   119  	txsStore := TxsStore{
   120  		Ch: make(chan *txToStore),
   121  		Wg: new(sync.WaitGroup),
   122  	}
   123  
   124  	batchConstraints := batchConstraints{
   125  		MaxTxsPerBatch:       s.cfg.MaxTxsPerBatch,
   126  		MaxBatchBytesSize:    s.cfg.MaxBatchBytesSize,
   127  		MaxCumulativeGasUsed: s.cfg.MaxCumulativeGasUsed,
   128  		MaxKeccakHashes:      s.cfg.MaxKeccakHashes,
   129  		MaxPoseidonHashes:    s.cfg.MaxPoseidonHashes,
   130  		MaxPoseidonPaddings:  s.cfg.MaxPoseidonPaddings,
   131  		MaxMemAligns:         s.cfg.MaxMemAligns,
   132  		MaxArithmetics:       s.cfg.MaxArithmetics,
   133  		MaxBinaries:          s.cfg.MaxBinaries,
   134  		MaxSteps:             s.cfg.MaxSteps,
   135  	}
   136  	batchResourceWeights := batchResourceWeights{
   137  		WeightBatchBytesSize:    s.cfg.WeightBatchBytesSize,
   138  		WeightCumulativeGasUsed: s.cfg.WeightCumulativeGasUsed,
   139  		WeightKeccakHashes:      s.cfg.WeightKeccakHashes,
   140  		WeightPoseidonHashes:    s.cfg.WeightPoseidonHashes,
   141  		WeightPoseidonPaddings:  s.cfg.WeightPoseidonPaddings,
   142  		WeightMemAligns:         s.cfg.WeightMemAligns,
   143  		WeightArithmetics:       s.cfg.WeightArithmetics,
   144  		WeightBinaries:          s.cfg.WeightBinaries,
   145  		WeightSteps:             s.cfg.WeightSteps,
   146  	}
   147  
   148  	err := s.pool.MarkWIPTxsAsPending(ctx)
   149  	if err != nil {
   150  		log.Fatalf("failed to mark WIP txs as pending, err: %v", err)
   151  	}
   152  
   153  	worker := NewWorker(s.cfg.Worker, s.state, batchConstraints, batchResourceWeights)
   154  	dbManager := newDBManager(ctx, s.cfg.DBManager, s.pool, s.state, worker, closingSignalCh, txsStore, batchConstraints)
   155  	go dbManager.Start()
   156  
   157  	finalizer := newFinalizer(s.cfg.Finalizer, worker, dbManager, s.state, s.address, s.isSynced, closingSignalCh, txsStore, batchConstraints, s.eventLog)
   158  	currBatch, processingReq := s.bootstrap(ctx, dbManager, finalizer)
   159  	go finalizer.Start(ctx, currBatch, processingReq)
   160  
   161  	closingSignalsManager := newClosingSignalsManager(ctx, finalizer.dbManager, closingSignalCh, finalizer.cfg, s.etherman)
   162  	go closingSignalsManager.Start()
   163  
   164  	go s.trackOldTxs(ctx)
   165  	tickerProcessTxs := time.NewTicker(s.cfg.WaitPeriodPoolIsEmpty.Duration)
   166  	defer tickerProcessTxs.Stop()
   167  
   168  	// Expire too old txs in the worker
   169  	go func() {
   170  		for {
   171  			time.Sleep(s.cfg.TxLifetimeCheckTimeout.Duration)
   172  			txTrackers := worker.ExpireTransactions(s.cfg.MaxTxLifetime.Duration)
   173  			failedReason := ErrExpiredTransaction.Error()
   174  			for _, txTracker := range txTrackers {
   175  				err := s.pool.UpdateTxStatus(ctx, txTracker.Hash, pool.TxStatusFailed, false, &failedReason)
   176  				if err != nil {
   177  					log.Errorf("failed to update tx status, err: %v", err)
   178  				}
   179  			}
   180  		}
   181  	}()
   182  
   183  	// Wait until context is done
   184  	<-ctx.Done()
   185  }
   186  
   187  func (s *Sequencer) bootstrap(ctx context.Context, dbManager *dbManager, finalizer *finalizer) (*WipBatch, *state.ProcessRequest) {
   188  	var (
   189  		currBatch      *WipBatch
   190  		processRequest *state.ProcessRequest
   191  	)
   192  
   193  	batchNum, err := dbManager.GetLastBatchNumber(ctx)
   194  	for err != nil {
   195  		if errors.Is(err, state.ErrStateNotSynchronized) {
   196  			log.Warnf("state is not synchronized, trying to get last batch num once again...")
   197  			time.Sleep(s.cfg.WaitPeriodPoolIsEmpty.Duration)
   198  			batchNum, err = dbManager.GetLastBatchNumber(ctx)
   199  		} else {
   200  			log.Fatalf("failed to get last batch number, err: %v", err)
   201  		}
   202  	}
   203  	if batchNum == 0 {
   204  		///////////////////
   205  		// GENESIS Batch //
   206  		///////////////////
   207  		processingCtx := dbManager.CreateFirstBatch(ctx, s.address)
   208  		timestamp := processingCtx.Timestamp
   209  		_, oldStateRoot, err := finalizer.getLastBatchNumAndOldStateRoot(ctx)
   210  		if err != nil {
   211  			log.Fatalf("failed to get old state root, err: %v", err)
   212  		}
   213  		processRequest = &state.ProcessRequest{
   214  			BatchNumber:    processingCtx.BatchNumber,
   215  			OldStateRoot:   oldStateRoot,
   216  			GlobalExitRoot: processingCtx.GlobalExitRoot,
   217  			Coinbase:       processingCtx.Coinbase,
   218  			Timestamp:      timestamp,
   219  			Caller:         stateMetrics.SequencerCallerLabel,
   220  		}
   221  		currBatch = &WipBatch{
   222  			globalExitRoot:     processingCtx.GlobalExitRoot,
   223  			initialStateRoot:   oldStateRoot,
   224  			stateRoot:          oldStateRoot,
   225  			batchNumber:        processingCtx.BatchNumber,
   226  			coinbase:           processingCtx.Coinbase,
   227  			timestamp:          timestamp,
   228  			remainingResources: getMaxRemainingResources(finalizer.batchConstraints),
   229  		}
   230  	} else {
   231  		err := finalizer.syncWithState(ctx, &batchNum)
   232  		if err != nil {
   233  			log.Fatalf("failed to sync with state, err: %v", err)
   234  		}
   235  		currBatch = finalizer.batch
   236  		processRequest = &finalizer.processRequest
   237  	}
   238  
   239  	return currBatch, processRequest
   240  }
   241  
   242  func (s *Sequencer) trackOldTxs(ctx context.Context) {
   243  	ticker := time.NewTicker(s.cfg.FrequencyToCheckTxsForDelete.Duration)
   244  	for {
   245  		waitTick(ctx, ticker)
   246  		log.Infof("trying to get txs to delete from the pool...")
   247  		txHashes, err := s.state.GetTxsOlderThanNL1Blocks(ctx, s.cfg.BlocksAmountForTxsToBeDeleted, nil)
   248  		if err != nil {
   249  			log.Errorf("failed to get txs hashes to delete, err: %v", err)
   250  			continue
   251  		}
   252  		log.Infof("will try to delete %d redundant txs", len(txHashes))
   253  		err = s.pool.DeleteTransactionsByHashes(ctx, txHashes)
   254  		if err != nil {
   255  			log.Errorf("failed to delete txs from the pool, err: %v", err)
   256  			continue
   257  		}
   258  		log.Infof("deleted %d selected txs from the pool", len(txHashes))
   259  	}
   260  }
   261  
   262  func waitTick(ctx context.Context, ticker *time.Ticker) {
   263  	select {
   264  	case <-ticker.C:
   265  		// nothing
   266  	case <-ctx.Done():
   267  		return
   268  	}
   269  }
   270  
   271  func (s *Sequencer) isSynced(ctx context.Context) bool {
   272  	lastSyncedBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
   273  	if err != nil && err != state.ErrNotFound {
   274  		log.Errorf("failed to get last isSynced batch, err: %v", err)
   275  		return false
   276  	}
   277  	lastBatchNum, err := s.state.GetLastBatchNumber(ctx, nil)
   278  	if err != nil && err != state.ErrNotFound {
   279  		log.Errorf("failed to get last batch num, err: %v", err)
   280  		return false
   281  	}
   282  	if lastBatchNum > lastSyncedBatchNum {
   283  		return true
   284  	}
   285  	lastEthBatchNum, err := s.etherman.GetLatestBatchNumber()
   286  	if err != nil {
   287  		log.Errorf("failed to get last eth batch, err: %v", err)
   288  		return false
   289  	}
   290  	if lastSyncedBatchNum < lastEthBatchNum {
   291  		log.Infof("waiting for the state to be isSynced, lastSyncedBatchNum: %d, lastEthBatchNum: %d", lastSyncedBatchNum, lastEthBatchNum)
   292  		return false
   293  	}
   294  
   295  	return true
   296  }
   297  
   298  func getMaxRemainingResources(constraints batchConstraints) state.BatchResources {
   299  	return state.BatchResources{
   300  		ZKCounters: state.ZKCounters{
   301  			CumulativeGasUsed:    constraints.MaxCumulativeGasUsed,
   302  			UsedKeccakHashes:     constraints.MaxKeccakHashes,
   303  			UsedPoseidonHashes:   constraints.MaxPoseidonHashes,
   304  			UsedPoseidonPaddings: constraints.MaxPoseidonPaddings,
   305  			UsedMemAligns:        constraints.MaxMemAligns,
   306  			UsedArithmetics:      constraints.MaxArithmetics,
   307  			UsedBinaries:         constraints.MaxBinaries,
   308  			UsedSteps:            constraints.MaxSteps,
   309  		},
   310  		Bytes: constraints.MaxBatchBytesSize,
   311  	}
   312  }