github.com/0xPolygon/supernets2-node@v0.0.0-20230711153321-2fe574524eaa/sequencesender/sequencesender.go (about)

     1  package sequencesender
     2  
     3  import (
     4  	"context"
     5  	"crypto/ecdsa"
     6  	"errors"
     7  	"fmt"
     8  	"time"
     9  
    10  	"github.com/0xPolygon/supernets2-node/etherman/types"
    11  	"github.com/0xPolygon/supernets2-node/ethtxmanager"
    12  	"github.com/0xPolygon/supernets2-node/event"
    13  	"github.com/0xPolygon/supernets2-node/log"
    14  	"github.com/0xPolygon/supernets2-node/sequencer/metrics"
    15  	"github.com/0xPolygon/supernets2-node/state"
    16  	"github.com/ethereum/go-ethereum/common"
    17  	"github.com/jackc/pgx/v4"
    18  )
    19  
    20  const (
    21  	ethTxManagerOwner = "sequencer"
    22  	monitoredIDFormat = "sequence-from-%v-to-%v"
    23  )
    24  
    25  var (
    26  	// ErrOversizedData is returned if the input data of a transaction is greater
    27  	// than some meaningful limit a user might use. This is not a consensus error
    28  	// making the transaction invalid, rather a DOS protection.
    29  	ErrOversizedData = errors.New("oversized data")
    30  )
    31  
    32  // SequenceSender represents a sequence sender
    33  type SequenceSender struct {
    34  	cfg          Config
    35  	privKey      *ecdsa.PrivateKey
    36  	state        stateInterface
    37  	ethTxManager ethTxManager
    38  	etherman     etherman
    39  	eventLog     *event.EventLog
    40  }
    41  
    42  // New inits sequence sender
    43  func New(cfg Config, state stateInterface, etherman etherman, manager ethTxManager, eventLog *event.EventLog, privKey *ecdsa.PrivateKey) (*SequenceSender, error) {
    44  	return &SequenceSender{
    45  		cfg:          cfg,
    46  		state:        state,
    47  		etherman:     etherman,
    48  		ethTxManager: manager,
    49  		eventLog:     eventLog,
    50  		privKey:      privKey,
    51  	}, nil
    52  }
    53  
    54  // Start starts the sequence sender
    55  func (s *SequenceSender) Start(ctx context.Context) {
    56  	ticker := time.NewTicker(s.cfg.WaitPeriodSendSequence.Duration)
    57  	for {
    58  		s.tryToSendSequence(ctx, ticker)
    59  	}
    60  }
    61  
    62  func (s *SequenceSender) tryToSendSequence(ctx context.Context, ticker *time.Ticker) {
    63  	retry := false
    64  	// process monitored sequences before starting a next cycle
    65  	s.ethTxManager.ProcessPendingMonitoredTxs(ctx, ethTxManagerOwner, func(result ethtxmanager.MonitoredTxResult, dbTx pgx.Tx) {
    66  		if result.Status == ethtxmanager.MonitoredTxStatusFailed {
    67  			retry = true
    68  			resultLog := log.WithFields("owner", ethTxManagerOwner, "id", result.ID)
    69  			resultLog.Error("failed to send sequence, TODO: review this fatal and define what to do in this case")
    70  		}
    71  	}, nil)
    72  
    73  	if retry {
    74  		return
    75  	}
    76  
    77  	// Check if synchronizer is up to date
    78  	if !s.isSynced(ctx) {
    79  		log.Info("wait for synchronizer to sync last batch")
    80  		waitTick(ctx, ticker)
    81  		return
    82  	}
    83  
    84  	// Check if should send sequence to L1
    85  	log.Infof("getting sequences to send")
    86  	sequences, err := s.getSequencesToSend(ctx)
    87  	if err != nil || len(sequences) == 0 {
    88  		if err != nil {
    89  			log.Errorf("error getting sequences: %v", err)
    90  		} else {
    91  			log.Info("waiting for sequences to be worth sending to L1")
    92  		}
    93  		waitTick(ctx, ticker)
    94  		return
    95  	}
    96  
    97  	lastVirtualBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
    98  	if err != nil {
    99  		log.Errorf("failed to get last virtual batch num, err: %v", err)
   100  		return
   101  	}
   102  
   103  	// Send sequences to L1
   104  	sequenceCount := len(sequences)
   105  	log.Infof(
   106  		"sending sequences to L1. From batch %d to batch %d",
   107  		lastVirtualBatchNum+1, lastVirtualBatchNum+uint64(sequenceCount),
   108  	)
   109  	metrics.SequencesSentToL1(float64(sequenceCount))
   110  
   111  	// add sequence to be monitored
   112  	sender := common.HexToAddress(s.cfg.SenderAddress)
   113  	signaturesAndAddrs, err := s.getSignaturesAndAddrsFromDataCommittee(ctx, sequences)
   114  	if err != nil {
   115  		log.Error("error getting signatures and addresses from the data committee: ", err)
   116  		return
   117  	}
   118  	to, data, err := s.etherman.BuildSequenceBatchesTxData(sender, sequences, signaturesAndAddrs)
   119  	if err != nil {
   120  		log.Error("error estimating new sequenceBatches to add to eth tx manager: ", err)
   121  		return
   122  	}
   123  	firstSequence := sequences[0]
   124  	lastSequence := sequences[len(sequences)-1]
   125  	monitoredTxID := fmt.Sprintf(monitoredIDFormat, firstSequence.BatchNumber, lastSequence.BatchNumber)
   126  	err = s.ethTxManager.Add(ctx, ethTxManagerOwner, monitoredTxID, sender, to, nil, data, nil)
   127  	if err != nil {
   128  		log.Error("error to add sequences tx to eth tx manager: ", err)
   129  		return
   130  	}
   131  }
   132  
   133  // getSequencesToSend generates an array of sequences to be send to L1.
   134  // If the array is empty, it doesn't necessarily mean that there are no sequences to be sent,
   135  // it could be that it's not worth it to do so yet.
   136  func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequence, error) {
   137  	lastVirtualBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
   138  	if err != nil {
   139  		return nil, fmt.Errorf("failed to get last virtual batch num, err: %w", err)
   140  	}
   141  
   142  	currentBatchNumToSequence := lastVirtualBatchNum + 1
   143  	sequences := []types.Sequence{}
   144  	// var estimatedGas uint64
   145  
   146  	// var tx *ethTypes.Transaction
   147  
   148  	// Add sequences until too big for a single L1 tx or last batch is reached
   149  	for {
   150  		// Check if batch is closed
   151  		isClosed, err := s.state.IsBatchClosed(ctx, currentBatchNumToSequence, nil)
   152  		if err != nil {
   153  			return nil, err
   154  		}
   155  		if !isClosed {
   156  			// Reached current (WIP) batch
   157  			break
   158  		}
   159  		// Add new sequence
   160  		batch, err := s.state.GetBatchByNumber(ctx, currentBatchNumToSequence, nil)
   161  		if err != nil {
   162  			return nil, err
   163  		}
   164  
   165  		seq := types.Sequence{
   166  			GlobalExitRoot: batch.GlobalExitRoot,
   167  			Timestamp:      batch.Timestamp.Unix(),
   168  			BatchL2Data:    batch.BatchL2Data,
   169  			BatchNumber:    batch.BatchNumber,
   170  		}
   171  
   172  		if batch.ForcedBatchNum != nil {
   173  			forcedBatch, err := s.state.GetForcedBatch(ctx, *batch.ForcedBatchNum, nil)
   174  			if err != nil {
   175  				return nil, err
   176  			}
   177  			seq.ForcedBatchTimestamp = forcedBatch.ForcedAt.Unix()
   178  		}
   179  
   180  		sequences = append(sequences, seq)
   181  		if len(sequences) == int(s.cfg.MaxBatchesForL1) {
   182  			log.Info(
   183  				"sequence should be sent to L1, because MaxBatchesForL1 (%d) has been reached",
   184  				s.cfg.MaxBatchesForL1,
   185  			)
   186  			return sequences, nil
   187  		}
   188  
   189  		// Increase batch num for next iteration
   190  		currentBatchNumToSequence++
   191  	}
   192  
   193  	// Reached latest batch. Decide if it's worth to send the sequence, or wait for new batches
   194  	if len(sequences) == 0 {
   195  		log.Info("no batches to be sequenced")
   196  		return nil, nil
   197  	}
   198  
   199  	lastBatchVirtualizationTime, err := s.state.GetTimeForLatestBatchVirtualization(ctx, nil)
   200  	if err != nil && !errors.Is(err, state.ErrNotFound) {
   201  		log.Warnf("failed to get last l1 interaction time, err: %v. Sending sequences as a conservative approach", err)
   202  		return sequences, nil
   203  	}
   204  	if lastBatchVirtualizationTime.Before(time.Now().Add(-s.cfg.LastBatchVirtualizationTimeMaxWaitPeriod.Duration)) {
   205  		// TODO: implement check profitability
   206  		// if s.checker.IsSendSequencesProfitable(new(big.Int).SetUint64(estimatedGas), sequences) {
   207  		log.Info("sequence should be sent to L1, because too long since didn't send anything to L1")
   208  		return sequences, nil
   209  		//}
   210  	}
   211  
   212  	log.Info("not enough time has passed since last batch was virtualized, and the sequence could be bigger")
   213  	return nil, nil
   214  }
   215  
   216  func waitTick(ctx context.Context, ticker *time.Ticker) {
   217  	select {
   218  	case <-ticker.C:
   219  		// nothing
   220  	case <-ctx.Done():
   221  		return
   222  	}
   223  }
   224  
   225  func (s *SequenceSender) isSynced(ctx context.Context) bool {
   226  	lastSyncedBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
   227  	if err != nil && err != state.ErrNotFound {
   228  		log.Errorf("failed to get last isSynced batch, err: %v", err)
   229  		return false
   230  	}
   231  	lastBatchNum, err := s.state.GetLastBatchNumber(ctx, nil)
   232  	if err != nil && err != state.ErrNotFound {
   233  		log.Errorf("failed to get last batch num, err: %v", err)
   234  		return false
   235  	}
   236  	if lastBatchNum > lastSyncedBatchNum {
   237  		return true
   238  	}
   239  	lastEthBatchNum, err := s.etherman.GetLatestBatchNumber()
   240  	if err != nil {
   241  		log.Errorf("failed to get last eth batch, err: %v", err)
   242  		return false
   243  	}
   244  	if lastSyncedBatchNum < lastEthBatchNum {
   245  		log.Infof("waiting for the state to be isSynced, lastSyncedBatchNum: %d, lastEthBatchNum: %d", lastSyncedBatchNum, lastEthBatchNum)
   246  		return false
   247  	}
   248  
   249  	return true
   250  }