github.com/0xPolygon/supernets2-node@v0.0.0-20230711153321-2fe574524eaa/sequencer/worker.go (about) 1 package sequencer 2 3 import ( 4 "context" 5 "fmt" 6 "math/big" 7 "runtime" 8 "sync" 9 "time" 10 11 "github.com/0xPolygon/supernets2-node/log" 12 "github.com/0xPolygon/supernets2-node/state" 13 "github.com/ethereum/go-ethereum/common" 14 "github.com/ethereum/go-ethereum/core/types" 15 ) 16 17 // Worker represents the worker component of the sequencer 18 type Worker struct { 19 cfg WorkerCfg 20 pool map[string]*addrQueue 21 efficiencyList *efficiencyList 22 workerMutex sync.Mutex 23 state stateInterface 24 batchConstraints batchConstraintsFloat64 25 batchResourceWeights batchResourceWeights 26 } 27 28 // NewWorker creates an init a worker 29 func NewWorker(cfg WorkerCfg, state stateInterface, constraints batchConstraints, weights batchResourceWeights) *Worker { 30 w := Worker{ 31 cfg: cfg, 32 pool: make(map[string]*addrQueue), 33 efficiencyList: newEfficiencyList(), 34 state: state, 35 batchConstraints: convertBatchConstraintsToFloat64(constraints), 36 batchResourceWeights: weights, 37 } 38 39 return &w 40 } 41 42 // NewTxTracker creates and inits a TxTracker 43 func (w *Worker) NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { 44 return newTxTracker(tx, counters, w.batchConstraints, w.batchResourceWeights, w.cfg.ResourceCostMultiplier, ip) 45 } 46 47 // AddTxTracker adds a new Tx to the Worker 48 func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (dropReason error, isWIP bool) { 49 w.workerMutex.Lock() 50 defer w.workerMutex.Unlock() 51 52 addr, found := w.pool[tx.FromStr] 53 54 if !found { 55 // Unlock the worker to let execute other worker functions while creating the new AddrQueue 56 w.workerMutex.Unlock() 57 58 root, err := w.state.GetLastStateRoot(ctx, nil) 59 if err != nil { 60 dropReason = fmt.Errorf("AddTx GetLastStateRoot error: %v", err) 61 log.Error(dropReason) 62 return dropReason, false 63 } 64 nonce, err := w.state.GetNonceByStateRoot(ctx, tx.From, root) 65 if err != nil { 66 dropReason = fmt.Errorf("AddTx GetNonceByStateRoot error: %v", err) 67 log.Error(dropReason) 68 return dropReason, false 69 } 70 balance, err := w.state.GetBalanceByStateRoot(ctx, tx.From, root) 71 if err != nil { 72 dropReason = fmt.Errorf("AddTx GetBalanceByStateRoot error: %v", err) 73 log.Error(dropReason) 74 return dropReason, false 75 } 76 77 addr = newAddrQueue(tx.From, nonce.Uint64(), balance) 78 79 // Lock again the worker 80 w.workerMutex.Lock() 81 82 w.pool[tx.FromStr] = addr 83 log.Infof("AddTx new addrQueue created for addr(%s) nonce(%d) balance(%s)", tx.FromStr, nonce.Uint64(), balance.String()) 84 } 85 86 // Add the txTracker to Addr and get the newReadyTx and prevReadyTx 87 log.Infof("AddTx new tx(%s) nonce(%d) cost(%s) to addrQueue(%s)", tx.Hash.String(), tx.Nonce, tx.Cost.String(), tx.FromStr) 88 var newReadyTx, prevReadyTx *TxTracker 89 newReadyTx, prevReadyTx, dropReason = addr.addTx(tx) 90 if dropReason != nil { 91 log.Infof("AddTx tx(%s) dropped from addrQueue(%s)", tx.Hash.String(), tx.FromStr) 92 return dropReason, false 93 } 94 95 // Update the EfficiencyList (if needed) 96 if prevReadyTx != nil { 97 log.Infof("AddTx prevReadyTx(%s) nonce(%d) cost(%s) deleted from EfficiencyList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.Cost.String()) 98 w.efficiencyList.delete(prevReadyTx) 99 } 100 if newReadyTx != nil { 101 log.Infof("AddTx newReadyTx(%s) nonce(%d) cost(%s) added to EfficiencyList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.Cost.String()) 102 w.efficiencyList.add(newReadyTx) 103 } 104 105 return nil, true 106 } 107 108 func (w *Worker) applyAddressUpdate(from common.Address, fromNonce *uint64, fromBalance *big.Int) (*TxTracker, *TxTracker, []*TxTracker) { 109 addrQueue, found := w.pool[from.String()] 110 111 if found { 112 newReadyTx, prevReadyTx, txsToDelete := addrQueue.updateCurrentNonceBalance(fromNonce, fromBalance) 113 114 // Update the EfficiencyList (if needed) 115 if prevReadyTx != nil { 116 log.Infof("applyAddressUpdate prevReadyTx(%s) nonce(%d) cost(%s) deleted from EfficiencyList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.Cost.String()) 117 w.efficiencyList.delete(prevReadyTx) 118 } 119 if newReadyTx != nil { 120 log.Infof("applyAddressUpdate newReadyTx(%s) nonce(%d) cost(%s) added to EfficiencyList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.Cost.String()) 121 w.efficiencyList.add(newReadyTx) 122 } 123 124 return newReadyTx, prevReadyTx, txsToDelete 125 } 126 127 return nil, nil, nil 128 } 129 130 // UpdateAfterSingleSuccessfulTxExecution updates the touched addresses after execute on Executor a successfully tx 131 func (w *Worker) UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker { 132 w.workerMutex.Lock() 133 defer w.workerMutex.Unlock() 134 if len(touchedAddresses) == 0 { 135 log.Errorf("UpdateAfterSingleSuccessfulTxExecution touchedAddresses is nil or empty") 136 } 137 txsToDelete := make([]*TxTracker, 0) 138 touchedFrom, found := touchedAddresses[from] 139 if found { 140 fromNonce, fromBalance := touchedFrom.Nonce, touchedFrom.Balance 141 _, _, txsToDelete = w.applyAddressUpdate(from, fromNonce, fromBalance) 142 } else { 143 log.Errorf("UpdateAfterSingleSuccessfulTxExecution from(%s) not found in touchedAddresses", from.String()) 144 } 145 146 for addr, addressInfo := range touchedAddresses { 147 if addr != from { 148 _, _, txsToDeleteTemp := w.applyAddressUpdate(addr, nil, addressInfo.Balance) 149 txsToDelete = append(txsToDelete, txsToDeleteTemp...) 150 } 151 } 152 return txsToDelete 153 } 154 155 // MoveTxToNotReady move a tx to not ready after it fails to execute 156 func (w *Worker) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker { 157 w.workerMutex.Lock() 158 defer w.workerMutex.Unlock() 159 log.Infof("MoveTxToNotReady tx(%s) from(%s) actualNonce(%d) actualBalance(%s)", txHash.String(), from.String(), actualNonce, actualBalance.String()) 160 161 addrQueue, found := w.pool[from.String()] 162 if found { 163 // Sanity check. The txHash must be the readyTx 164 if addrQueue.readyTx == nil || txHash.String() != addrQueue.readyTx.HashStr { 165 readyHashStr := "" 166 if addrQueue.readyTx != nil { 167 readyHashStr = addrQueue.readyTx.HashStr 168 } 169 log.Errorf("MoveTxToNotReady txHash(%s) is not the readyTx(%s)", txHash.String(), readyHashStr) 170 } 171 } 172 _, _, txsToDelete := w.applyAddressUpdate(from, actualNonce, actualBalance) 173 174 return txsToDelete 175 } 176 177 // DeleteTx delete the tx after it fails to execute 178 func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) { 179 w.workerMutex.Lock() 180 defer w.workerMutex.Unlock() 181 182 addrQueue, found := w.pool[addr.String()] 183 if found { 184 deletedReadyTx := addrQueue.deleteTx(txHash) 185 if deletedReadyTx != nil { 186 log.Infof("DeleteTx tx(%s) deleted from EfficiencyList", deletedReadyTx.Hash.String()) 187 w.efficiencyList.delete(deletedReadyTx) 188 } 189 } else { 190 log.Errorf("DeleteTx addrQueue(%s) not found", addr.String()) 191 } 192 } 193 194 // UpdateTx updates the ZKCounter of a tx and resort the tx in the efficiency list if needed 195 func (w *Worker) UpdateTx(txHash common.Hash, addr common.Address, counters state.ZKCounters) { 196 w.workerMutex.Lock() 197 defer w.workerMutex.Unlock() 198 log.Infof("UpdateTx tx(%s) addr(%s)", txHash.String(), addr.String()) 199 log.Debugf("UpdateTx counters.CumulativeGasUsed: %d", counters.CumulativeGasUsed) 200 log.Debugf("UpdateTx counters.UsedKeccakHashes: %d", counters.UsedKeccakHashes) 201 log.Debugf("UpdateTx counters.UsedPoseidonHashes: %d", counters.UsedPoseidonHashes) 202 log.Debugf("UpdateTx counters.UsedPoseidonPaddings: %d", counters.UsedPoseidonPaddings) 203 log.Debugf("UpdateTx counters.UsedMemAligns: %d", counters.UsedMemAligns) 204 log.Debugf("UpdateTx counters.UsedArithmetics: %d", counters.UsedArithmetics) 205 log.Debugf("UpdateTx counters.UsedBinaries: %d", counters.UsedBinaries) 206 log.Debugf("UpdateTx counters.UsedSteps: %d", counters.UsedSteps) 207 208 addrQueue, found := w.pool[addr.String()] 209 210 if found { 211 newReadyTx, prevReadyTx := addrQueue.UpdateTxZKCounters(txHash, counters, w.batchConstraints, w.batchResourceWeights) 212 213 // Resort the newReadyTx in efficiencyList 214 if prevReadyTx != nil { 215 log.Infof("UpdateTx prevReadyTx(%s) nonce(%d) cost(%s) deleted from EfficiencyList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.Cost.String()) 216 w.efficiencyList.delete(prevReadyTx) 217 } 218 if newReadyTx != nil { 219 log.Infof("UpdateTx newReadyTx(%s) nonce(%d) cost(%s) added to EfficiencyList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.Cost.String()) 220 w.efficiencyList.add(newReadyTx) 221 } 222 } else { 223 log.Errorf("UpdateTx addrQueue(%s) not found", addr.String()) 224 } 225 } 226 227 // GetBestFittingTx gets the most efficient tx that fits in the available batch resources 228 func (w *Worker) GetBestFittingTx(resources state.BatchResources) *TxTracker { 229 w.workerMutex.Lock() 230 defer w.workerMutex.Unlock() 231 232 var ( 233 tx *TxTracker 234 foundMutex sync.RWMutex 235 ) 236 237 nGoRoutines := runtime.NumCPU() 238 foundAt := -1 239 240 wg := sync.WaitGroup{} 241 wg.Add(nGoRoutines) 242 243 // Each go routine looks for a fitting tx 244 for i := 0; i < nGoRoutines; i++ { 245 go func(n int, bresources state.BatchResources) { 246 defer wg.Done() 247 for i := n; i < w.efficiencyList.len(); i += nGoRoutines { 248 foundMutex.RLock() 249 if foundAt != -1 && i > foundAt { 250 foundMutex.RUnlock() 251 return 252 } 253 foundMutex.RUnlock() 254 255 txCandidate := w.efficiencyList.getByIndex(i) 256 err := bresources.Sub(txCandidate.BatchResources) 257 if err != nil { 258 // We don't add this Tx 259 continue 260 } 261 262 foundMutex.Lock() 263 if foundAt == -1 || foundAt > i { 264 foundAt = i 265 tx = txCandidate 266 log.Infof("GetBestFittingTx found tx(%s) at index(%d) with efficiency(%f)", tx.Hash.String(), i, tx.Efficiency) 267 } 268 foundMutex.Unlock() 269 270 return 271 } 272 }(i, resources) 273 } 274 wg.Wait() 275 276 return tx 277 } 278 279 // ExpireTransactions deletes old txs 280 func (w *Worker) ExpireTransactions(maxTime time.Duration) []*TxTracker { 281 w.workerMutex.Lock() 282 defer w.workerMutex.Unlock() 283 284 var txs []*TxTracker 285 286 log.Info("ExpireTransactions start. addrQueue len: ", len(w.pool)) 287 for _, addrQueue := range w.pool { 288 subTxs, prevReadyTx := addrQueue.ExpireTransactions(maxTime) 289 txs = append(txs, subTxs...) 290 291 if prevReadyTx != nil { 292 w.efficiencyList.delete(prevReadyTx) 293 } 294 295 if addrQueue.IsEmpty() { 296 delete(w.pool, addrQueue.fromStr) 297 } 298 } 299 log.Info("ExpireTransactions end. addrQueue len: ", len(w.pool), " deleteCount: ", len(txs)) 300 301 return txs 302 } 303 304 // GetEfficiencyList returns the efficiency list 305 func (w *Worker) GetEfficiencyList() *efficiencyList { 306 return w.efficiencyList 307 } 308 309 // HandleL2Reorg handles the L2 reorg signal 310 func (w *Worker) HandleL2Reorg(txHashes []common.Hash) { 311 log.Fatal("L2 Reorg detected. Restarting to sync with the new L2 state...") 312 } 313 314 // convertBatchConstraintsToFloat64 converts the batch constraints to float64 315 func convertBatchConstraintsToFloat64(constraints batchConstraints) batchConstraintsFloat64 { 316 return batchConstraintsFloat64{ 317 maxTxsPerBatch: float64(constraints.MaxTxsPerBatch), 318 maxBatchBytesSize: float64(constraints.MaxBatchBytesSize), 319 maxCumulativeGasUsed: float64(constraints.MaxCumulativeGasUsed), 320 maxKeccakHashes: float64(constraints.MaxKeccakHashes), 321 maxPoseidonHashes: float64(constraints.MaxPoseidonHashes), 322 maxPoseidonPaddings: float64(constraints.MaxPoseidonPaddings), 323 maxMemAligns: float64(constraints.MaxMemAligns), 324 maxArithmetics: float64(constraints.MaxArithmetics), 325 maxBinaries: float64(constraints.MaxBinaries), 326 maxSteps: float64(constraints.MaxSteps), 327 } 328 }