github.com/0xPolygon/supernets2-node@v0.0.0-20230711153321-2fe574524eaa/sequencer/finalizer.go (about)

     1  package sequencer
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"math/big"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/0xPolygon/supernets2-node/event"
    13  	"github.com/0xPolygon/supernets2-node/log"
    14  	"github.com/0xPolygon/supernets2-node/pool"
    15  	"github.com/0xPolygon/supernets2-node/sequencer/metrics"
    16  	"github.com/0xPolygon/supernets2-node/state"
    17  	stateMetrics "github.com/0xPolygon/supernets2-node/state/metrics"
    18  	"github.com/0xPolygon/supernets2-node/state/runtime/executor"
    19  	"github.com/ethereum/go-ethereum/common"
    20  	"github.com/jackc/pgx/v4"
    21  )
    22  
    23  const oneHundred = 100
    24  
    25  var (
    26  	now = time.Now
    27  )
    28  
    29  // finalizer represents the finalizer component of the sequencer.
    30  type finalizer struct {
    31  	cfg                FinalizerCfg
    32  	txsStore           TxsStore
    33  	closingSignalCh    ClosingSignalCh
    34  	isSynced           func(ctx context.Context) bool
    35  	sequencerAddress   common.Address
    36  	worker             workerInterface
    37  	dbManager          dbManagerInterface
    38  	executor           stateInterface
    39  	batch              *WipBatch
    40  	batchConstraints   batchConstraints
    41  	processRequest     state.ProcessRequest
    42  	sharedResourcesMux *sync.RWMutex
    43  	lastGERHash        common.Hash
    44  	// closing signals
    45  	nextGER                 common.Hash
    46  	nextGERDeadline         int64
    47  	nextGERMux              *sync.RWMutex
    48  	nextForcedBatches       []state.ForcedBatch
    49  	nextForcedBatchDeadline int64
    50  	nextForcedBatchesMux    *sync.RWMutex
    51  	handlingL2Reorg         bool
    52  	eventLog                *event.EventLog
    53  }
    54  
    55  // WipBatch represents a work-in-progress batch.
    56  type WipBatch struct {
    57  	batchNumber        uint64
    58  	coinbase           common.Address
    59  	initialStateRoot   common.Hash
    60  	stateRoot          common.Hash
    61  	localExitRoot      common.Hash
    62  	timestamp          time.Time
    63  	globalExitRoot     common.Hash // 0x000...0 (ZeroHash) means to not update
    64  	remainingResources state.BatchResources
    65  	countOfTxs         int
    66  	closingReason      state.ClosingReason
    67  }
    68  
    69  func (w *WipBatch) isEmpty() bool {
    70  	return w.countOfTxs == 0
    71  }
    72  
    73  // newFinalizer returns a new instance of Finalizer.
    74  func newFinalizer(
    75  	cfg FinalizerCfg,
    76  	worker workerInterface,
    77  	dbManager dbManagerInterface,
    78  	executor stateInterface,
    79  	sequencerAddr common.Address,
    80  	isSynced func(ctx context.Context) bool,
    81  	closingSignalCh ClosingSignalCh,
    82  	txsStore TxsStore,
    83  	batchConstraints batchConstraints,
    84  	eventLog *event.EventLog,
    85  ) *finalizer {
    86  	return &finalizer{
    87  		cfg:                cfg,
    88  		txsStore:           txsStore,
    89  		closingSignalCh:    closingSignalCh,
    90  		isSynced:           isSynced,
    91  		sequencerAddress:   sequencerAddr,
    92  		worker:             worker,
    93  		dbManager:          dbManager,
    94  		executor:           executor,
    95  		batch:              new(WipBatch),
    96  		batchConstraints:   batchConstraints,
    97  		processRequest:     state.ProcessRequest{},
    98  		sharedResourcesMux: new(sync.RWMutex),
    99  		lastGERHash:        state.ZeroHash,
   100  		// closing signals
   101  		nextGER:                 common.Hash{},
   102  		nextGERDeadline:         0,
   103  		nextGERMux:              new(sync.RWMutex),
   104  		nextForcedBatches:       make([]state.ForcedBatch, 0),
   105  		nextForcedBatchDeadline: 0,
   106  		nextForcedBatchesMux:    new(sync.RWMutex),
   107  		eventLog:                eventLog,
   108  	}
   109  }
   110  
   111  // Start starts the finalizer.
   112  func (f *finalizer) Start(ctx context.Context, batch *WipBatch, processingReq *state.ProcessRequest) {
   113  	var err error
   114  	if batch != nil {
   115  		f.batch = batch
   116  	} else {
   117  		f.batch, err = f.dbManager.GetWIPBatch(ctx)
   118  		if err != nil {
   119  			log.Fatalf("failed to get work-in-progress batch from DB, Err: %s", err)
   120  		}
   121  	}
   122  
   123  	if processingReq == nil {
   124  		log.Fatal("processingReq should not be nil")
   125  	} else {
   126  		f.processRequest = *processingReq
   127  	}
   128  
   129  	// Closing signals receiver
   130  	go f.listenForClosingSignals(ctx)
   131  
   132  	// Processing transactions and finalizing batches
   133  	f.finalizeBatches(ctx)
   134  }
   135  
   136  func (f *finalizer) SortForcedBatches(fb []state.ForcedBatch) []state.ForcedBatch {
   137  	if len(fb) == 0 {
   138  		return fb
   139  	}
   140  	// Sort by ForcedBatchNumber
   141  	for i := 0; i < len(fb)-1; i++ {
   142  		for j := i + 1; j < len(fb); j++ {
   143  			if fb[i].ForcedBatchNumber > fb[j].ForcedBatchNumber {
   144  				fb[i], fb[j] = fb[j], fb[i]
   145  			}
   146  		}
   147  	}
   148  
   149  	return fb
   150  }
   151  
   152  // listenForClosingSignals listens for signals for the batch and sets the deadline for when they need to be closed.
   153  func (f *finalizer) listenForClosingSignals(ctx context.Context) {
   154  	for {
   155  		select {
   156  		case <-ctx.Done():
   157  			log.Infof("finalizer closing signal listener received context done, Err: %s", ctx.Err())
   158  			return
   159  		// ForcedBatch ch
   160  		case fb := <-f.closingSignalCh.ForcedBatchCh:
   161  			log.Debugf("finalizer received forced batch at block number: %v", fb.BlockNumber)
   162  			f.nextForcedBatchesMux.Lock()
   163  			f.nextForcedBatches = f.SortForcedBatches(append(f.nextForcedBatches, fb))
   164  			if f.nextForcedBatchDeadline == 0 {
   165  				f.setNextForcedBatchDeadline()
   166  			}
   167  			f.nextForcedBatchesMux.Unlock()
   168  		// GlobalExitRoot ch
   169  		case ger := <-f.closingSignalCh.GERCh:
   170  			log.Debugf("finalizer received global exit root: %s", ger.String())
   171  			f.nextGERMux.Lock()
   172  			f.nextGER = ger
   173  			if f.nextGERDeadline == 0 {
   174  				f.setNextGERDeadline()
   175  			}
   176  			f.nextGERMux.Unlock()
   177  		// L2Reorg ch
   178  		case <-f.closingSignalCh.L2ReorgCh:
   179  			log.Debug("finalizer received L2 reorg event")
   180  			f.handlingL2Reorg = true
   181  			f.halt(ctx, fmt.Errorf("L2 reorg event received"))
   182  			return
   183  		}
   184  	}
   185  }
   186  
   187  // finalizeBatches runs the endless loop for processing transactions finalizing batches.
   188  func (f *finalizer) finalizeBatches(ctx context.Context) {
   189  	log.Debug("finalizer init loop")
   190  	for {
   191  		start := now()
   192  		tx := f.worker.GetBestFittingTx(f.batch.remainingResources)
   193  		metrics.WorkerProcessingTime(time.Since(start))
   194  		if tx != nil {
   195  			// Timestamp resolution
   196  			if f.batch.isEmpty() {
   197  				f.batch.timestamp = now()
   198  			}
   199  
   200  			f.sharedResourcesMux.Lock()
   201  			log.Debugf("processing tx: %s", tx.Hash.Hex())
   202  			err := f.processTransaction(ctx, tx)
   203  			if err != nil {
   204  				log.Errorf("failed to process transaction in finalizeBatches, Err: %v", err)
   205  			}
   206  			f.sharedResourcesMux.Unlock()
   207  		} else {
   208  			// wait for new txs
   209  			log.Debugf("no transactions to be processed. Sleeping for %v", f.cfg.SleepDuration.Duration)
   210  			if f.cfg.SleepDuration.Duration > 0 {
   211  				time.Sleep(f.cfg.SleepDuration.Duration)
   212  			}
   213  		}
   214  
   215  		if f.isDeadlineEncountered() {
   216  			log.Infof("Closing batch: %d, because deadline was encountered.", f.batch.batchNumber)
   217  			f.finalizeBatch(ctx)
   218  		} else if f.isBatchFull() || f.isBatchAlmostFull() {
   219  			log.Infof("Closing batch: %d, because it's almost full.", f.batch.batchNumber)
   220  			f.finalizeBatch(ctx)
   221  		}
   222  
   223  		if err := ctx.Err(); err != nil {
   224  			log.Infof("Stopping finalizer because of context, err: %s", err)
   225  			return
   226  		}
   227  	}
   228  }
   229  
   230  func (f *finalizer) isBatchFull() bool {
   231  	if f.batch.countOfTxs >= int(f.batchConstraints.MaxTxsPerBatch) {
   232  		log.Infof("Closing batch: %d, because it's full.", f.batch.batchNumber)
   233  		f.batch.closingReason = state.BatchFullClosingReason
   234  		return true
   235  	}
   236  	return false
   237  }
   238  
   239  // finalizeBatch retries to until successful closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
   240  func (f *finalizer) finalizeBatch(ctx context.Context) {
   241  	start := time.Now()
   242  	defer func() {
   243  		metrics.ProcessingTime(time.Since(start))
   244  	}()
   245  	f.txsStore.Wg.Wait()
   246  	var err error
   247  	f.batch, err = f.newWIPBatch(ctx)
   248  	for err != nil {
   249  		log.Errorf("failed to create new work-in-progress batch, Err: %s", err)
   250  		f.batch, err = f.newWIPBatch(ctx)
   251  	}
   252  }
   253  
   254  func (f *finalizer) halt(ctx context.Context, err error) {
   255  	event := &event.Event{
   256  		ReceivedAt:  time.Now(),
   257  		Source:      event.Source_Node,
   258  		Component:   event.Component_Sequencer,
   259  		Level:       event.Level_Critical,
   260  		EventID:     event.EventID_FinalizerHalt,
   261  		Description: fmt.Sprintf("finalizer halted due to error: %s", err),
   262  	}
   263  
   264  	eventErr := f.eventLog.LogEvent(ctx, event)
   265  	if eventErr != nil {
   266  		log.Errorf("error storing finalizer halt event: %v", eventErr)
   267  	}
   268  
   269  	for {
   270  		log.Errorf("fatal error: %s", err)
   271  		log.Error("halting the finalizer")
   272  		time.Sleep(5 * time.Second) //nolint:gomnd
   273  	}
   274  }
   275  
   276  // newWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
   277  func (f *finalizer) newWIPBatch(ctx context.Context) (*WipBatch, error) {
   278  	f.sharedResourcesMux.Lock()
   279  	defer f.sharedResourcesMux.Unlock()
   280  
   281  	var err error
   282  	if f.batch.stateRoot.String() == "" || f.batch.localExitRoot.String() == "" {
   283  		return nil, errors.New("state root and local exit root must have value to close batch")
   284  	}
   285  
   286  	// We need to process the batch to update the state root before closing the batch
   287  	if f.batch.initialStateRoot == f.batch.stateRoot {
   288  		log.Info("reprocessing batch because the state root has not changed...")
   289  		err := f.processTransaction(ctx, nil)
   290  		if err != nil {
   291  			return nil, err
   292  		}
   293  	}
   294  
   295  	// Reprocess full batch as sanity check
   296  	processBatchResponse, err := f.reprocessFullBatch(ctx, f.batch.batchNumber, f.batch.stateRoot)
   297  	if err != nil || processBatchResponse.IsRomOOCError || processBatchResponse.ExecutorError != nil {
   298  		log.Info("halting the finalizer because of a reprocessing error")
   299  		if err != nil {
   300  			f.halt(ctx, fmt.Errorf("failed to reprocess batch, err: %v", err))
   301  		} else if processBatchResponse.IsRomOOCError {
   302  			f.halt(ctx, fmt.Errorf("out of counters during reprocessFullBath"))
   303  		} else {
   304  			f.halt(ctx, fmt.Errorf("executor error during reprocessFullBath: %v", processBatchResponse.ExecutorError))
   305  		}
   306  	}
   307  
   308  	// Close the current batch
   309  	err = f.closeBatch(ctx)
   310  	if err != nil {
   311  		return nil, fmt.Errorf("failed to close batch, err: %w", err)
   312  	}
   313  
   314  	// Metadata for the next batch
   315  	stateRoot := f.batch.stateRoot
   316  	lastBatchNumber := f.batch.batchNumber
   317  
   318  	// Process Forced Batches
   319  	if len(f.nextForcedBatches) > 0 {
   320  		lastBatchNumber, stateRoot, err = f.processForcedBatches(ctx, lastBatchNumber, stateRoot)
   321  		if err != nil {
   322  			log.Warnf("failed to process forced batch, err: %s", err)
   323  		}
   324  	}
   325  
   326  	// Take into consideration the GER
   327  	f.nextGERMux.Lock()
   328  	if f.nextGER != state.ZeroHash {
   329  		f.lastGERHash = f.nextGER
   330  	}
   331  	f.nextGER = state.ZeroHash
   332  	f.nextGERDeadline = 0
   333  	f.nextGERMux.Unlock()
   334  
   335  	batch, err := f.openWIPBatch(ctx, lastBatchNumber+1, f.lastGERHash, stateRoot)
   336  	if err == nil {
   337  		f.processRequest.Timestamp = batch.timestamp
   338  		f.processRequest.BatchNumber = batch.batchNumber
   339  		f.processRequest.OldStateRoot = stateRoot
   340  		f.processRequest.GlobalExitRoot = batch.globalExitRoot
   341  		f.processRequest.Transactions = make([]byte, 0, 1)
   342  	}
   343  
   344  	return batch, err
   345  }
   346  
   347  // processTransaction processes a single transaction.
   348  func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) error {
   349  	var txHash string
   350  	if tx != nil {
   351  		txHash = tx.Hash.String()
   352  	}
   353  	log := log.WithFields("txHash", txHash, "batchNumber", f.processRequest.BatchNumber)
   354  	start := time.Now()
   355  	defer func() {
   356  		metrics.ProcessingTime(time.Since(start))
   357  	}()
   358  
   359  	if f.batch.isEmpty() {
   360  		f.processRequest.GlobalExitRoot = f.batch.globalExitRoot
   361  	} else {
   362  		f.processRequest.GlobalExitRoot = state.ZeroHash
   363  	}
   364  
   365  	if tx != nil {
   366  		f.processRequest.Transactions = tx.RawTx
   367  	} else {
   368  		f.processRequest.Transactions = []byte{}
   369  	}
   370  	hash := "nil"
   371  	if tx != nil {
   372  		hash = tx.HashStr
   373  	}
   374  	log.Infof("processTransaction: single tx. Batch.BatchNumber: %d, BatchNumber: %d, OldStateRoot: %s, txHash: %s, GER: %s", f.batch.batchNumber, f.processRequest.BatchNumber, f.processRequest.OldStateRoot, hash, f.processRequest.GlobalExitRoot.String())
   375  	processBatchResponse, err := f.executor.ProcessBatch(ctx, f.processRequest, true)
   376  	if err != nil {
   377  		log.Errorf("failed to process transaction: %s", err)
   378  		return err
   379  	}
   380  
   381  	oldStateRoot := f.batch.stateRoot
   382  	if len(processBatchResponse.Responses) > 0 && tx != nil {
   383  		err = f.handleProcessTransactionResponse(ctx, tx, processBatchResponse, oldStateRoot)
   384  		if err != nil {
   385  			return err
   386  		}
   387  	}
   388  
   389  	// Update in-memory batch and processRequest
   390  	f.processRequest.OldStateRoot = processBatchResponse.NewStateRoot
   391  	f.batch.stateRoot = processBatchResponse.NewStateRoot
   392  	f.batch.localExitRoot = processBatchResponse.NewLocalExitRoot
   393  	log.Infof("processTransaction: data loaded in memory. batch.batchNumber: %d, batchNumber: %d, result.NewStateRoot: %s, result.NewLocalExitRoot: %s, oldStateRoot: %s", f.batch.batchNumber, f.processRequest.BatchNumber, processBatchResponse.NewStateRoot.String(), processBatchResponse.NewLocalExitRoot.String(), oldStateRoot.String())
   394  
   395  	return nil
   396  }
   397  
   398  // handleProcessTransactionResponse handles the response of transaction processing.
   399  func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) error {
   400  	// Handle Transaction Error
   401  	errorCode := executor.RomErrorCode(result.Responses[0].RomError)
   402  	if !state.IsStateRootChanged(errorCode) {
   403  		// If intrinsic error or OOC error, we skip adding the transaction to the batch
   404  		f.handleProcessTransactionError(ctx, result, tx)
   405  		return result.Responses[0].RomError
   406  	}
   407  
   408  	// Check remaining resources
   409  	err := f.checkRemainingResources(result, tx)
   410  	if err != nil {
   411  		return err
   412  	}
   413  
   414  	// Store the processed transaction, add it to the batch and update status in the pool atomically
   415  	f.storeProcessedTx(f.batch.batchNumber, f.batch.coinbase, f.batch.timestamp, oldStateRoot, result.Responses[0], false)
   416  	f.batch.countOfTxs++
   417  	f.updateWorkerAfterTxStored(ctx, tx, result)
   418  
   419  	return nil
   420  }
   421  
   422  // handleForcedTxsProcessResp handles the transactions responses for the processed forced batch.
   423  func (f *finalizer) handleForcedTxsProcessResp(request state.ProcessRequest, result *state.ProcessBatchResponse, oldStateRoot common.Hash) {
   424  	log.Infof("handleForcedTxsProcessResp: batchNumber: %d, oldStateRoot: %s, newStateRoot: %s", request.BatchNumber, oldStateRoot.String(), result.NewStateRoot.String())
   425  	for _, txResp := range result.Responses {
   426  		// Handle Transaction Error
   427  		if txResp.RomError != nil {
   428  			romErr := executor.RomErrorCode(txResp.RomError)
   429  			if executor.IsIntrinsicError(romErr) {
   430  				// If we have an intrinsic error, we should continue processing the batch, but skip the transaction
   431  				log.Errorf("handleForcedTxsProcessResp: ROM error: %s", txResp.RomError)
   432  				continue
   433  			}
   434  		}
   435  
   436  		// Store the processed transaction, add it to the batch and update status in the pool atomically
   437  		f.storeProcessedTx(request.BatchNumber, request.Coinbase, request.Timestamp, oldStateRoot, txResp, true)
   438  		oldStateRoot = txResp.StateRoot
   439  	}
   440  }
   441  
   442  func (f *finalizer) storeProcessedTx(batchNum uint64, coinbase common.Address, timestamp time.Time, previousL2BlockStateRoot common.Hash, txResponse *state.ProcessTransactionResponse, isForcedBatch bool) {
   443  	log.Infof("storeProcessedTx: storing processed tx: %s", txResponse.TxHash.String())
   444  	f.txsStore.Wg.Wait()
   445  	f.txsStore.Wg.Add(1)
   446  	f.txsStore.Ch <- &txToStore{
   447  		batchNumber:              batchNum,
   448  		txResponse:               txResponse,
   449  		coinbase:                 coinbase,
   450  		timestamp:                uint64(timestamp.Unix()),
   451  		previousL2BlockStateRoot: previousL2BlockStateRoot,
   452  		isForcedBatch:            isForcedBatch,
   453  	}
   454  	metrics.TxProcessed(metrics.TxProcessedLabelSuccessful, 1)
   455  }
   456  
   457  func (f *finalizer) updateWorkerAfterTxStored(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse) {
   458  	// Delete the transaction from the efficiency list
   459  	f.worker.DeleteTx(tx.Hash, tx.From)
   460  	log.Debug("tx deleted from efficiency list", "txHash", tx.Hash.String(), "from", tx.From.Hex())
   461  
   462  	start := time.Now()
   463  	txsToDelete := f.worker.UpdateAfterSingleSuccessfulTxExecution(tx.From, result.ReadWriteAddresses)
   464  	for _, txToDelete := range txsToDelete {
   465  		err := f.dbManager.UpdateTxStatus(ctx, txToDelete.Hash, pool.TxStatusFailed, false, txToDelete.FailedReason)
   466  		if err != nil {
   467  			log.Errorf("failed to update status to failed in the pool for tx: %s, err: %s", txToDelete.Hash.String(), err)
   468  		} else {
   469  			metrics.TxProcessed(metrics.TxProcessedLabelFailed, 1)
   470  		}
   471  	}
   472  	metrics.WorkerProcessingTime(time.Since(start))
   473  }
   474  
   475  // handleProcessTransactionError handles the error of a transaction
   476  func (f *finalizer) handleProcessTransactionError(ctx context.Context, result *state.ProcessBatchResponse, tx *TxTracker) *sync.WaitGroup {
   477  	txResponse := result.Responses[0]
   478  	errorCode := executor.RomErrorCode(txResponse.RomError)
   479  	addressInfo := result.ReadWriteAddresses[tx.From]
   480  	log.Infof("handleTransactionError: error in tx: %s, errorCode: %d", tx.Hash.String(), errorCode)
   481  	wg := new(sync.WaitGroup)
   482  	failedReason := executor.RomErr(errorCode).Error()
   483  	if executor.IsROMOutOfCountersError(errorCode) {
   484  		log.Errorf("ROM out of counters error, marking tx with Hash: %s as INVALID, errorCode: %s", tx.Hash.String(), errorCode.String())
   485  		start := time.Now()
   486  		f.worker.DeleteTx(tx.Hash, tx.From)
   487  		metrics.WorkerProcessingTime(time.Since(start))
   488  
   489  		wg.Add(1)
   490  		go func() {
   491  			defer wg.Done()
   492  			err := f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &failedReason)
   493  			if err != nil {
   494  				log.Errorf("failed to update status to failed in the pool for tx: %s, err: %s", tx.Hash.String(), err)
   495  			} else {
   496  				metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1)
   497  			}
   498  		}()
   499  	} else if executor.IsInvalidNonceError(errorCode) || executor.IsInvalidBalanceError(errorCode) {
   500  		var (
   501  			nonce   *uint64
   502  			balance *big.Int
   503  		)
   504  		if addressInfo != nil {
   505  			nonce = addressInfo.Nonce
   506  			balance = addressInfo.Balance
   507  		}
   508  		start := time.Now()
   509  		log.Errorf("intrinsic error, moving tx with Hash: %s to NOT READY nonce(%d) balance(%s) cost(%s), err: %s", tx.Hash, nonce, balance.String(), tx.Cost.String(), txResponse.RomError)
   510  		txsToDelete := f.worker.MoveTxToNotReady(tx.Hash, tx.From, nonce, balance)
   511  		for _, txToDelete := range txsToDelete {
   512  			wg.Add(1)
   513  			txToDelete := txToDelete
   514  			go func() {
   515  				defer wg.Done()
   516  				err := f.dbManager.UpdateTxStatus(ctx, txToDelete.Hash, pool.TxStatusFailed, false, &failedReason)
   517  				metrics.TxProcessed(metrics.TxProcessedLabelFailed, 1)
   518  				if err != nil {
   519  					log.Errorf("failed to update status to failed in the pool for tx: %s, err: %s", txToDelete.Hash.String(), err)
   520  				}
   521  			}()
   522  		}
   523  		metrics.WorkerProcessingTime(time.Since(start))
   524  	} else {
   525  		// Delete the transaction from the efficiency list
   526  		f.worker.DeleteTx(tx.Hash, tx.From)
   527  		log.Debug("tx deleted from efficiency list", "txHash", tx.Hash.String(), "from", tx.From.Hex())
   528  
   529  		wg.Add(1)
   530  		go func() {
   531  			defer wg.Done()
   532  			// Update the status of the transaction to failed
   533  			err := f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusFailed, false, &failedReason)
   534  			if err != nil {
   535  				log.Errorf("failed to update status to failed in the pool for tx: %s, err: %s", tx.Hash.String(), err)
   536  			} else {
   537  				metrics.TxProcessed(metrics.TxProcessedLabelFailed, 1)
   538  			}
   539  		}()
   540  	}
   541  
   542  	return wg
   543  }
   544  
   545  // syncWithState syncs the WIP batch and processRequest with the state
   546  func (f *finalizer) syncWithState(ctx context.Context, lastBatchNum *uint64) error {
   547  	f.sharedResourcesMux.Lock()
   548  	defer f.sharedResourcesMux.Unlock()
   549  	f.txsStore.Wg.Wait()
   550  
   551  	var lastBatch *state.Batch
   552  	var err error
   553  	for !f.isSynced(ctx) {
   554  		log.Info("wait for synchronizer to sync last batch")
   555  		time.Sleep(time.Second)
   556  	}
   557  	if lastBatchNum == nil {
   558  		lastBatch, err = f.dbManager.GetLastBatch(ctx)
   559  		if err != nil {
   560  			return fmt.Errorf("failed to get last batch, err: %w", err)
   561  		}
   562  	} else {
   563  		lastBatch, err = f.dbManager.GetBatchByNumber(ctx, *lastBatchNum, nil)
   564  		if err != nil {
   565  			return fmt.Errorf("failed to get last batch, err: %w", err)
   566  		}
   567  	}
   568  
   569  	batchNum := lastBatch.BatchNumber
   570  	lastBatchNum = &batchNum
   571  
   572  	isClosed, err := f.dbManager.IsBatchClosed(ctx, *lastBatchNum)
   573  	if err != nil {
   574  		return fmt.Errorf("failed to check if batch is closed, err: %w", err)
   575  	}
   576  	log.Infof("Batch %d isClosed: %v", batchNum, isClosed)
   577  	if isClosed {
   578  		ger, _, err := f.dbManager.GetLatestGer(ctx, f.cfg.GERFinalityNumberOfBlocks)
   579  		if err != nil {
   580  			return fmt.Errorf("failed to get latest ger, err: %w", err)
   581  		}
   582  
   583  		oldStateRoot := lastBatch.StateRoot
   584  		f.batch, err = f.openWIPBatch(ctx, *lastBatchNum+1, ger.GlobalExitRoot, oldStateRoot)
   585  		if err != nil {
   586  			return err
   587  		}
   588  	} else {
   589  		f.batch, err = f.dbManager.GetWIPBatch(ctx)
   590  		if err != nil {
   591  			return fmt.Errorf("failed to get work-in-progress batch, err: %w", err)
   592  		}
   593  	}
   594  	log.Infof("Initial Batch: %+v", f.batch)
   595  	log.Infof("Initial Batch.StateRoot: %s", f.batch.stateRoot.String())
   596  	log.Infof("Initial Batch.GER: %s", f.batch.globalExitRoot.String())
   597  	log.Infof("Initial Batch.Coinbase: %s", f.batch.coinbase.String())
   598  	log.Infof("Initial Batch.InitialStateRoot: %s", f.batch.initialStateRoot.String())
   599  	log.Infof("Initial Batch.localExitRoot: %s", f.batch.localExitRoot.String())
   600  
   601  	f.processRequest = state.ProcessRequest{
   602  		BatchNumber:    *lastBatchNum,
   603  		OldStateRoot:   f.batch.stateRoot,
   604  		GlobalExitRoot: f.batch.globalExitRoot,
   605  		Coinbase:       f.sequencerAddress,
   606  		Timestamp:      f.batch.timestamp,
   607  		Transactions:   make([]byte, 0, 1),
   608  		Caller:         stateMetrics.SequencerCallerLabel,
   609  	}
   610  
   611  	log.Infof("synced with state, lastBatchNum: %d. State root: %s", *lastBatchNum, f.batch.initialStateRoot.Hex())
   612  
   613  	return nil
   614  }
   615  
   616  // processForcedBatches processes all the forced batches that are pending to be processed
   617  func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumberInState uint64, stateRoot common.Hash) (uint64, common.Hash, error) {
   618  	f.nextForcedBatchesMux.Lock()
   619  	defer f.nextForcedBatchesMux.Unlock()
   620  	f.nextForcedBatchDeadline = 0
   621  
   622  	lastTrustedForcedBatchNumber, err := f.dbManager.GetLastTrustedForcedBatchNumber(ctx, nil)
   623  	if err != nil {
   624  		return 0, common.Hash{}, fmt.Errorf("failed to get last trusted forced batch number, err: %w", err)
   625  	}
   626  	nextForcedBatchNum := lastTrustedForcedBatchNumber + 1
   627  
   628  	for _, forcedBatch := range f.nextForcedBatches {
   629  		// Skip already processed forced batches
   630  		if forcedBatch.ForcedBatchNumber < nextForcedBatchNum {
   631  			continue
   632  		}
   633  		// Process in-between unprocessed forced batches
   634  		for forcedBatch.ForcedBatchNumber > nextForcedBatchNum {
   635  			lastBatchNumberInState, stateRoot = f.processForcedBatch(ctx, lastBatchNumberInState, stateRoot, forcedBatch)
   636  			nextForcedBatchNum += 1
   637  		}
   638  		// Process the current forced batch from the channel queue
   639  		lastBatchNumberInState, stateRoot = f.processForcedBatch(ctx, lastBatchNumberInState, stateRoot, forcedBatch)
   640  		nextForcedBatchNum += 1
   641  	}
   642  	f.nextForcedBatches = make([]state.ForcedBatch, 0)
   643  
   644  	return lastBatchNumberInState, stateRoot, nil
   645  }
   646  
   647  func (f *finalizer) processForcedBatch(ctx context.Context, lastBatchNumberInState uint64, stateRoot common.Hash, forcedBatch state.ForcedBatch) (uint64, common.Hash) {
   648  	request := state.ProcessRequest{
   649  		BatchNumber:    lastBatchNumberInState + 1,
   650  		OldStateRoot:   stateRoot,
   651  		GlobalExitRoot: forcedBatch.GlobalExitRoot,
   652  		Transactions:   forcedBatch.RawTxsData,
   653  		Coinbase:       f.sequencerAddress,
   654  		Timestamp:      now(),
   655  		Caller:         stateMetrics.SequencerCallerLabel,
   656  	}
   657  	response, err := f.dbManager.ProcessForcedBatch(forcedBatch.ForcedBatchNumber, request)
   658  	if err != nil {
   659  		// If there is EXECUTOR (Batch level) error, halt the finalizer.
   660  		f.halt(ctx, fmt.Errorf("failed to process forced batch, Executor err: %w", err))
   661  		return lastBatchNumberInState, stateRoot
   662  	}
   663  
   664  	stateRoot = response.NewStateRoot
   665  	lastBatchNumberInState += 1
   666  	f.nextGERMux.Lock()
   667  	f.lastGERHash = forcedBatch.GlobalExitRoot
   668  	f.nextGERMux.Unlock()
   669  	if len(response.Responses) > 0 && !response.IsRomOOCError {
   670  		f.handleForcedTxsProcessResp(request, response, stateRoot)
   671  	}
   672  
   673  	return lastBatchNumberInState, stateRoot
   674  }
   675  
   676  // openWIPBatch opens a new batch in the state and returns it as WipBatch
   677  func (f *finalizer) openWIPBatch(ctx context.Context, batchNum uint64, ger, stateRoot common.Hash) (*WipBatch, error) {
   678  	dbTx, err := f.dbManager.BeginStateTransaction(ctx)
   679  	if err != nil {
   680  		return nil, fmt.Errorf("failed to begin state transaction to open batch, err: %w", err)
   681  	}
   682  
   683  	// open next batch
   684  	openBatchResp, err := f.openBatch(ctx, batchNum, ger, dbTx)
   685  	if err != nil {
   686  		if rollbackErr := dbTx.Rollback(ctx); rollbackErr != nil {
   687  			return nil, fmt.Errorf(
   688  				"failed to rollback dbTx: %s. Rollback err: %w",
   689  				rollbackErr.Error(), err,
   690  			)
   691  		}
   692  		return nil, err
   693  	}
   694  	if err := dbTx.Commit(ctx); err != nil {
   695  		return nil, fmt.Errorf("failed to commit database transaction for opening a batch, err: %w", err)
   696  	}
   697  
   698  	// Check if synchronizer is up-to-date
   699  	for !f.isSynced(ctx) {
   700  		log.Info("wait for synchronizer to sync last batch")
   701  		time.Sleep(time.Second)
   702  	}
   703  
   704  	return &WipBatch{
   705  		batchNumber:        batchNum,
   706  		coinbase:           f.sequencerAddress,
   707  		initialStateRoot:   stateRoot,
   708  		stateRoot:          stateRoot,
   709  		timestamp:          openBatchResp.Timestamp,
   710  		globalExitRoot:     ger,
   711  		remainingResources: getMaxRemainingResources(f.batchConstraints),
   712  	}, err
   713  }
   714  
   715  // closeBatch closes the current batch in the state
   716  func (f *finalizer) closeBatch(ctx context.Context) error {
   717  	transactions, err := f.dbManager.GetTransactionsByBatchNumber(ctx, f.batch.batchNumber)
   718  	if err != nil {
   719  		return fmt.Errorf("failed to get transactions from transactions, err: %w", err)
   720  	}
   721  	for i, tx := range transactions {
   722  		log.Infof("closeBatch: BatchNum: %d, Tx position: %d, txHash: %s", f.batch.batchNumber, i, tx.Hash().String())
   723  	}
   724  	usedResources := getUsedBatchResources(f.batchConstraints, f.batch.remainingResources)
   725  	receipt := ClosingBatchParameters{
   726  		BatchNumber:    f.batch.batchNumber,
   727  		StateRoot:      f.batch.stateRoot,
   728  		LocalExitRoot:  f.batch.localExitRoot,
   729  		Txs:            transactions,
   730  		BatchResources: usedResources,
   731  		ClosingReason:  f.batch.closingReason,
   732  	}
   733  	return f.dbManager.CloseBatch(ctx, receipt)
   734  }
   735  
   736  // openBatch opens a new batch in the state
   737  func (f *finalizer) openBatch(ctx context.Context, num uint64, ger common.Hash, dbTx pgx.Tx) (state.ProcessingContext, error) {
   738  	processingCtx := state.ProcessingContext{
   739  		BatchNumber:    num,
   740  		Coinbase:       f.sequencerAddress,
   741  		Timestamp:      now(),
   742  		GlobalExitRoot: ger,
   743  	}
   744  	err := f.dbManager.OpenBatch(ctx, processingCtx, dbTx)
   745  	if err != nil {
   746  		return state.ProcessingContext{}, fmt.Errorf("failed to open new batch, err: %w", err)
   747  	}
   748  
   749  	return processingCtx, nil
   750  }
   751  
   752  // reprocessBatch reprocesses a batch used as sanity check
   753  func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, expectedStateRoot common.Hash) (*state.ProcessBatchResponse, error) {
   754  	batch, err := f.dbManager.GetBatchByNumber(ctx, batchNum, nil)
   755  	if err != nil {
   756  		return nil, fmt.Errorf("failed to get batch by number, err: %v", err)
   757  	}
   758  	processRequest := state.ProcessRequest{
   759  		BatchNumber:    batch.BatchNumber,
   760  		GlobalExitRoot: batch.GlobalExitRoot,
   761  		OldStateRoot:   f.batch.initialStateRoot,
   762  		Transactions:   batch.BatchL2Data,
   763  		Coinbase:       batch.Coinbase,
   764  		Timestamp:      batch.Timestamp,
   765  		Caller:         stateMetrics.DiscardCallerLabel,
   766  	}
   767  	log.Infof("reprocessFullBatch: BatchNumber: %d, OldStateRoot: %s, Ger: %s", batch.BatchNumber, f.batch.initialStateRoot.String(), batch.GlobalExitRoot.String())
   768  	txs, _, err := state.DecodeTxs(batch.BatchL2Data)
   769  	if err != nil {
   770  		log.Errorf("reprocessFullBatch: error decoding BatchL2Data before reprocessing full batch: %d. Error: %v", batch.BatchNumber, err)
   771  		return nil, fmt.Errorf("reprocessFullBatch: error decoding BatchL2Data before reprocessing full batch: %d. Error: %v", batch.BatchNumber, err)
   772  	}
   773  	for i, tx := range txs {
   774  		log.Infof("reprocessFullBatch: Tx position %d. TxHash: %s", i, tx.Hash())
   775  	}
   776  
   777  	result, err := f.executor.ProcessBatch(ctx, processRequest, false)
   778  	if err != nil {
   779  		log.Errorf("failed to process batch, err: %s", err)
   780  		return nil, err
   781  	}
   782  
   783  	if result.IsRomOOCError {
   784  		log.Errorf("failed to process batch %v because OutOfCounters", batch.BatchNumber)
   785  		payload, err := json.Marshal(processRequest)
   786  		if err != nil {
   787  			log.Errorf("error marshaling payload: %v", err)
   788  		} else {
   789  			event := &event.Event{
   790  				ReceivedAt:  time.Now(),
   791  				Source:      event.Source_Node,
   792  				Component:   event.Component_Sequencer,
   793  				Level:       event.Level_Critical,
   794  				EventID:     event.EventID_ReprocessFullBatchOOC,
   795  				Description: string(payload),
   796  				Json:        processRequest,
   797  			}
   798  			err = f.eventLog.LogEvent(ctx, event)
   799  			if err != nil {
   800  				log.Errorf("error storing payload: %v", err)
   801  			}
   802  		}
   803  		return nil, fmt.Errorf("failed to process batch because OutOfCounters error")
   804  	}
   805  
   806  	if result.NewStateRoot != expectedStateRoot {
   807  		log.Errorf("batchNumber: %d, reprocessed batch has different state root, expected: %s, got: %s", batch.BatchNumber, expectedStateRoot.Hex(), result.NewStateRoot.Hex())
   808  		return nil, fmt.Errorf("batchNumber: %d, reprocessed batch has different state root, expected: %s, got: %s", batch.BatchNumber, expectedStateRoot.Hex(), result.NewStateRoot.Hex())
   809  	}
   810  
   811  	return result, nil
   812  }
   813  
   814  func (f *finalizer) getLastBatchNumAndOldStateRoot(ctx context.Context) (uint64, common.Hash, error) {
   815  	const two = 2
   816  	var oldStateRoot common.Hash
   817  	batches, err := f.dbManager.GetLastNBatches(ctx, two)
   818  	if err != nil {
   819  		return 0, common.Hash{}, fmt.Errorf("failed to get last %d batches, err: %w", two, err)
   820  	}
   821  	lastBatch := batches[0]
   822  
   823  	oldStateRoot = f.getOldStateRootFromBatches(batches)
   824  	return lastBatch.BatchNumber, oldStateRoot, nil
   825  }
   826  
   827  func (f *finalizer) getOldStateRootFromBatches(batches []*state.Batch) common.Hash {
   828  	const one = 1
   829  	const two = 2
   830  	var oldStateRoot common.Hash
   831  	if len(batches) == one {
   832  		oldStateRoot = batches[0].StateRoot
   833  	} else if len(batches) == two {
   834  		oldStateRoot = batches[1].StateRoot
   835  	}
   836  
   837  	return oldStateRoot
   838  }
   839  
   840  // isDeadlineEncountered returns true if any closing signal deadline is encountered
   841  func (f *finalizer) isDeadlineEncountered() bool {
   842  	// Forced batch deadline
   843  	if f.nextForcedBatchDeadline != 0 && now().Unix() >= f.nextForcedBatchDeadline {
   844  		log.Infof("Closing batch: %d, forced batch deadline encountered.", f.batch.batchNumber)
   845  		return true
   846  	}
   847  	// Global Exit Root deadline
   848  	if f.nextGERDeadline != 0 && now().Unix() >= f.nextGERDeadline {
   849  		log.Infof("Closing batch: %d, Global Exit Root deadline encountered.", f.batch.batchNumber)
   850  		f.batch.closingReason = state.GlobalExitRootDeadlineClosingReason
   851  		return true
   852  	}
   853  	// Timestamp resolution deadline
   854  	if !f.batch.isEmpty() && f.batch.timestamp.Add(f.cfg.TimestampResolution.Duration).Before(time.Now()) {
   855  		log.Infof("Closing batch: %d, because of timestamp resolution.", f.batch.batchNumber)
   856  		f.batch.closingReason = state.TimeoutResolutionDeadlineClosingReason
   857  		return true
   858  	}
   859  	return false
   860  }
   861  
   862  // checkRemainingResources checks if the transaction uses less resources than the remaining ones in the batch.
   863  func (f *finalizer) checkRemainingResources(result *state.ProcessBatchResponse, tx *TxTracker) error {
   864  	usedResources := state.BatchResources{
   865  		ZKCounters: result.UsedZkCounters,
   866  		Bytes:      uint64(len(tx.RawTx)),
   867  	}
   868  
   869  	err := f.batch.remainingResources.Sub(usedResources)
   870  	if err != nil {
   871  		log.Infof("current transaction exceeds the batch limit, updating metadata for tx in worker and continuing")
   872  		start := time.Now()
   873  		f.worker.UpdateTx(result.Responses[0].TxHash, tx.From, usedResources.ZKCounters)
   874  		metrics.WorkerProcessingTime(time.Since(start))
   875  		return err
   876  	}
   877  
   878  	return nil
   879  }
   880  
   881  // isBatchAlmostFull checks if the current batch remaining resources are under the constraints threshold for most efficient moment to close a batch
   882  func (f *finalizer) isBatchAlmostFull() bool {
   883  	resources := f.batch.remainingResources
   884  	zkCounters := resources.ZKCounters
   885  	result := false
   886  	resourceDesc := ""
   887  	if resources.Bytes <= f.getConstraintThresholdUint64(f.batchConstraints.MaxBatchBytesSize) {
   888  		resourceDesc = "MaxBatchBytesSize"
   889  		result = true
   890  	} else if zkCounters.UsedSteps <= f.getConstraintThresholdUint32(f.batchConstraints.MaxSteps) {
   891  		resourceDesc = "MaxSteps"
   892  		result = true
   893  	} else if zkCounters.UsedPoseidonPaddings <= f.getConstraintThresholdUint32(f.batchConstraints.MaxPoseidonPaddings) {
   894  		resourceDesc = "MaxPoseidonPaddings"
   895  		result = true
   896  	} else if zkCounters.UsedBinaries <= f.getConstraintThresholdUint32(f.batchConstraints.MaxBinaries) {
   897  		resourceDesc = "MaxBinaries"
   898  		result = true
   899  	} else if zkCounters.UsedKeccakHashes <= f.getConstraintThresholdUint32(f.batchConstraints.MaxKeccakHashes) {
   900  		resourceDesc = "MaxKeccakHashes"
   901  		result = true
   902  	} else if zkCounters.UsedArithmetics <= f.getConstraintThresholdUint32(f.batchConstraints.MaxArithmetics) {
   903  		resourceDesc = "MaxArithmetics"
   904  		result = true
   905  	} else if zkCounters.UsedMemAligns <= f.getConstraintThresholdUint32(f.batchConstraints.MaxMemAligns) {
   906  		resourceDesc = "MaxMemAligns"
   907  		result = true
   908  	} else if zkCounters.CumulativeGasUsed <= f.getConstraintThresholdUint64(f.batchConstraints.MaxCumulativeGasUsed) {
   909  		resourceDesc = "MaxCumulativeGasUsed"
   910  		result = true
   911  	}
   912  
   913  	if result {
   914  		log.Infof("Closing batch: %d, because it reached %s threshold limit", f.batch.batchNumber, resourceDesc)
   915  		f.batch.closingReason = state.BatchAlmostFullClosingReason
   916  	}
   917  
   918  	return result
   919  }
   920  
   921  func (f *finalizer) setNextForcedBatchDeadline() {
   922  	f.nextForcedBatchDeadline = now().Unix() + int64(f.cfg.ForcedBatchDeadlineTimeout.Duration.Seconds())
   923  }
   924  
   925  func (f *finalizer) setNextGERDeadline() {
   926  	f.nextGERDeadline = now().Unix() + int64(f.cfg.GERDeadlineTimeout.Duration.Seconds())
   927  }
   928  
   929  func (f *finalizer) getConstraintThresholdUint64(input uint64) uint64 {
   930  	return input * uint64(f.cfg.ResourcePercentageToCloseBatch) / oneHundred
   931  }
   932  
   933  func (f *finalizer) getConstraintThresholdUint32(input uint32) uint32 {
   934  	return uint32(input*f.cfg.ResourcePercentageToCloseBatch) / oneHundred
   935  }
   936  
   937  func getUsedBatchResources(constraints batchConstraints, remainingResources state.BatchResources) state.BatchResources {
   938  	return state.BatchResources{
   939  		ZKCounters: state.ZKCounters{
   940  			CumulativeGasUsed:    constraints.MaxCumulativeGasUsed - remainingResources.ZKCounters.CumulativeGasUsed,
   941  			UsedKeccakHashes:     constraints.MaxKeccakHashes - remainingResources.ZKCounters.UsedKeccakHashes,
   942  			UsedPoseidonHashes:   constraints.MaxPoseidonHashes - remainingResources.ZKCounters.UsedPoseidonHashes,
   943  			UsedPoseidonPaddings: constraints.MaxPoseidonPaddings - remainingResources.ZKCounters.UsedPoseidonPaddings,
   944  			UsedMemAligns:        constraints.MaxMemAligns - remainingResources.ZKCounters.UsedMemAligns,
   945  			UsedArithmetics:      constraints.MaxArithmetics - remainingResources.ZKCounters.UsedArithmetics,
   946  			UsedBinaries:         constraints.MaxBinaries - remainingResources.ZKCounters.UsedBinaries,
   947  			UsedSteps:            constraints.MaxSteps - remainingResources.ZKCounters.UsedSteps,
   948  		},
   949  		Bytes: constraints.MaxBatchBytesSize - remainingResources.Bytes,
   950  	}
   951  }