github.com/theQRL/go-zond@v0.1.1/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/theQRL/go-zond/common" 28 "github.com/theQRL/go-zond/consensus" 29 "github.com/theQRL/go-zond/consensus/misc/eip1559" 30 "github.com/theQRL/go-zond/consensus/misc/eip4844" 31 "github.com/theQRL/go-zond/core" 32 "github.com/theQRL/go-zond/core/state" 33 "github.com/theQRL/go-zond/core/txpool" 34 "github.com/theQRL/go-zond/core/types" 35 "github.com/theQRL/go-zond/core/vm" 36 "github.com/theQRL/go-zond/event" 37 "github.com/theQRL/go-zond/log" 38 "github.com/theQRL/go-zond/params" 39 "github.com/theQRL/go-zond/trie" 40 ) 41 42 const ( 43 // resultQueueSize is the size of channel listening to sealing result. 44 resultQueueSize = 10 45 46 // txChanSize is the size of channel listening to NewTxsEvent. 47 // The number is referenced from the size of tx pool. 48 txChanSize = 4096 49 50 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 51 chainHeadChanSize = 10 52 53 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 54 resubmitAdjustChanSize = 10 55 56 // minRecommitInterval is the minimal time interval to recreate the sealing block with 57 // any newly arrived transactions. 58 minRecommitInterval = 1 * time.Second 59 60 // maxRecommitInterval is the maximum time interval to recreate the sealing block with 61 // any newly arrived transactions. 62 maxRecommitInterval = 15 * time.Second 63 64 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 65 // resubmitting interval. 66 intervalAdjustRatio = 0.1 67 68 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 69 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 70 intervalAdjustBias = 200 * 1000.0 * 1000.0 71 72 // staleThreshold is the maximum depth of the acceptable stale block. 73 staleThreshold = 7 74 ) 75 76 var ( 77 errBlockInterruptedByNewHead = errors.New("new head arrived while building block") 78 errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") 79 errBlockInterruptedByTimeout = errors.New("timeout while building block") 80 ) 81 82 // environment is the worker's current environment and holds all 83 // information of the sealing block generation. 84 type environment struct { 85 signer types.Signer 86 state *state.StateDB // apply state changes here 87 tcount int // tx count in cycle 88 gasPool *core.GasPool // available gas used to pack transactions 89 coinbase common.Address 90 91 header *types.Header 92 txs []*types.Transaction 93 receipts []*types.Receipt 94 sidecars []*types.BlobTxSidecar 95 blobs int 96 } 97 98 // copy creates a deep copy of environment. 99 func (env *environment) copy() *environment { 100 cpy := &environment{ 101 signer: env.signer, 102 state: env.state.Copy(), 103 tcount: env.tcount, 104 coinbase: env.coinbase, 105 header: types.CopyHeader(env.header), 106 receipts: copyReceipts(env.receipts), 107 } 108 if env.gasPool != nil { 109 gasPool := *env.gasPool 110 cpy.gasPool = &gasPool 111 } 112 cpy.txs = make([]*types.Transaction, len(env.txs)) 113 copy(cpy.txs, env.txs) 114 115 cpy.sidecars = make([]*types.BlobTxSidecar, len(env.sidecars)) 116 copy(cpy.sidecars, env.sidecars) 117 118 return cpy 119 } 120 121 // discard terminates the background prefetcher go-routine. It should 122 // always be called for all created environment instances otherwise 123 // the go-routine leak can happen. 124 func (env *environment) discard() { 125 if env.state == nil { 126 return 127 } 128 env.state.StopPrefetcher() 129 } 130 131 // task contains all information for consensus engine sealing and result submitting. 132 type task struct { 133 receipts []*types.Receipt 134 state *state.StateDB 135 block *types.Block 136 createdAt time.Time 137 } 138 139 const ( 140 commitInterruptNone int32 = iota 141 commitInterruptNewHead 142 commitInterruptResubmit 143 commitInterruptTimeout 144 ) 145 146 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 147 type newWorkReq struct { 148 interrupt *atomic.Int32 149 timestamp int64 150 } 151 152 // newPayloadResult is the result of payload generation. 153 type newPayloadResult struct { 154 err error 155 block *types.Block 156 fees *big.Int // total block fees 157 sidecars []*types.BlobTxSidecar // collected blobs of blob transactions 158 } 159 160 // getWorkReq represents a request for getting a new sealing work with provided parameters. 161 type getWorkReq struct { 162 params *generateParams 163 result chan *newPayloadResult // non-blocking channel 164 } 165 166 // intervalAdjust represents a resubmitting interval adjustment. 167 type intervalAdjust struct { 168 ratio float64 169 inc bool 170 } 171 172 // worker is the main object which takes care of submitting new work to consensus engine 173 // and gathering the sealing result. 174 type worker struct { 175 config *Config 176 chainConfig *params.ChainConfig 177 engine consensus.Engine 178 eth Backend 179 chain *core.BlockChain 180 181 // Feeds 182 pendingLogsFeed event.Feed 183 184 // Subscriptions 185 mux *event.TypeMux 186 txsCh chan core.NewTxsEvent 187 txsSub event.Subscription 188 chainHeadCh chan core.ChainHeadEvent 189 chainHeadSub event.Subscription 190 191 // Channels 192 newWorkCh chan *newWorkReq 193 getWorkCh chan *getWorkReq 194 taskCh chan *task 195 resultCh chan *types.Block 196 startCh chan struct{} 197 exitCh chan struct{} 198 resubmitIntervalCh chan time.Duration 199 resubmitAdjustCh chan *intervalAdjust 200 201 wg sync.WaitGroup 202 203 current *environment // An environment for current running cycle. 204 205 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 206 coinbase common.Address 207 extra []byte 208 209 pendingMu sync.RWMutex 210 pendingTasks map[common.Hash]*task 211 212 snapshotMu sync.RWMutex // The lock used to protect the snapshots below 213 snapshotBlock *types.Block 214 snapshotReceipts types.Receipts 215 snapshotState *state.StateDB 216 217 // atomic status counters 218 running atomic.Bool // The indicator whether the consensus engine is running or not. 219 newTxs atomic.Int32 // New arrival transaction count since last sealing work submitting. 220 syncing atomic.Bool // The indicator whether the node is still syncing. 221 222 // newpayloadTimeout is the maximum timeout allowance for creating payload. 223 // The default value is 2 seconds but node operator can set it to arbitrary 224 // large value. A large timeout allowance may cause Geth to fail creating 225 // a non-empty payload within the specified time and eventually miss the slot 226 // in case there are some computation expensive transactions in txpool. 227 newpayloadTimeout time.Duration 228 229 // recommit is the time interval to re-create sealing work or to re-build 230 // payload in proof-of-stake stage. 231 recommit time.Duration 232 233 // External functions 234 isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. 235 236 // Test hooks 237 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 238 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 239 fullTaskHook func() // Method to call before pushing the full sealing task. 240 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 241 } 242 243 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker { 244 worker := &worker{ 245 config: config, 246 chainConfig: chainConfig, 247 engine: engine, 248 eth: eth, 249 chain: eth.BlockChain(), 250 mux: mux, 251 isLocalBlock: isLocalBlock, 252 coinbase: config.Etherbase, 253 extra: config.ExtraData, 254 pendingTasks: make(map[common.Hash]*task), 255 txsCh: make(chan core.NewTxsEvent, txChanSize), 256 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 257 newWorkCh: make(chan *newWorkReq), 258 getWorkCh: make(chan *getWorkReq), 259 taskCh: make(chan *task), 260 resultCh: make(chan *types.Block, resultQueueSize), 261 startCh: make(chan struct{}, 1), 262 exitCh: make(chan struct{}), 263 resubmitIntervalCh: make(chan time.Duration), 264 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 265 } 266 // Subscribe NewTxsEvent for tx pool 267 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 268 // Subscribe events for blockchain 269 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 270 271 // Sanitize recommit interval if the user-specified one is too short. 272 recommit := worker.config.Recommit 273 if recommit < minRecommitInterval { 274 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 275 recommit = minRecommitInterval 276 } 277 worker.recommit = recommit 278 279 // Sanitize the timeout config for creating payload. 280 newpayloadTimeout := worker.config.NewPayloadTimeout 281 if newpayloadTimeout == 0 { 282 log.Warn("Sanitizing new payload timeout to default", "provided", newpayloadTimeout, "updated", DefaultConfig.NewPayloadTimeout) 283 newpayloadTimeout = DefaultConfig.NewPayloadTimeout 284 } 285 if newpayloadTimeout < time.Millisecond*100 { 286 log.Warn("Low payload timeout may cause high amount of non-full blocks", "provided", newpayloadTimeout, "default", DefaultConfig.NewPayloadTimeout) 287 } 288 worker.newpayloadTimeout = newpayloadTimeout 289 290 worker.wg.Add(4) 291 go worker.mainLoop() 292 go worker.newWorkLoop(recommit) 293 go worker.resultLoop() 294 go worker.taskLoop() 295 296 // Submit first work to initialize pending state. 297 if init { 298 worker.startCh <- struct{}{} 299 } 300 return worker 301 } 302 303 // setEtherbase sets the etherbase used to initialize the block coinbase field. 304 func (w *worker) setEtherbase(addr common.Address) { 305 w.mu.Lock() 306 defer w.mu.Unlock() 307 w.coinbase = addr 308 } 309 310 // etherbase retrieves the configured etherbase address. 311 func (w *worker) etherbase() common.Address { 312 w.mu.RLock() 313 defer w.mu.RUnlock() 314 return w.coinbase 315 } 316 317 func (w *worker) setGasCeil(ceil uint64) { 318 w.mu.Lock() 319 defer w.mu.Unlock() 320 w.config.GasCeil = ceil 321 } 322 323 // setExtra sets the content used to initialize the block extra field. 324 func (w *worker) setExtra(extra []byte) { 325 w.mu.Lock() 326 defer w.mu.Unlock() 327 w.extra = extra 328 } 329 330 // setRecommitInterval updates the interval for miner sealing work recommitting. 331 func (w *worker) setRecommitInterval(interval time.Duration) { 332 select { 333 case w.resubmitIntervalCh <- interval: 334 case <-w.exitCh: 335 } 336 } 337 338 // pending returns the pending state and corresponding block. The returned 339 // values can be nil in case the pending block is not initialized. 340 func (w *worker) pending() (*types.Block, *state.StateDB) { 341 w.snapshotMu.RLock() 342 defer w.snapshotMu.RUnlock() 343 if w.snapshotState == nil { 344 return nil, nil 345 } 346 return w.snapshotBlock, w.snapshotState.Copy() 347 } 348 349 // pendingBlock returns pending block. The returned block can be nil in case the 350 // pending block is not initialized. 351 func (w *worker) pendingBlock() *types.Block { 352 w.snapshotMu.RLock() 353 defer w.snapshotMu.RUnlock() 354 return w.snapshotBlock 355 } 356 357 // pendingBlockAndReceipts returns pending block and corresponding receipts. 358 // The returned values can be nil in case the pending block is not initialized. 359 func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { 360 w.snapshotMu.RLock() 361 defer w.snapshotMu.RUnlock() 362 return w.snapshotBlock, w.snapshotReceipts 363 } 364 365 // start sets the running status as 1 and triggers new work submitting. 366 func (w *worker) start() { 367 w.running.Store(true) 368 w.startCh <- struct{}{} 369 } 370 371 // stop sets the running status as 0. 372 func (w *worker) stop() { 373 w.running.Store(false) 374 } 375 376 // isRunning returns an indicator whether worker is running or not. 377 func (w *worker) isRunning() bool { 378 return w.running.Load() 379 } 380 381 // close terminates all background threads maintained by the worker. 382 // Note the worker does not support being closed multiple times. 383 func (w *worker) close() { 384 w.running.Store(false) 385 close(w.exitCh) 386 w.wg.Wait() 387 } 388 389 // recalcRecommit recalculates the resubmitting interval upon feedback. 390 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 391 var ( 392 prevF = float64(prev.Nanoseconds()) 393 next float64 394 ) 395 if inc { 396 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 397 max := float64(maxRecommitInterval.Nanoseconds()) 398 if next > max { 399 next = max 400 } 401 } else { 402 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 403 min := float64(minRecommit.Nanoseconds()) 404 if next < min { 405 next = min 406 } 407 } 408 return time.Duration(int64(next)) 409 } 410 411 // newWorkLoop is a standalone goroutine to submit new sealing work upon received events. 412 func (w *worker) newWorkLoop(recommit time.Duration) { 413 defer w.wg.Done() 414 var ( 415 interrupt *atomic.Int32 416 minRecommit = recommit // minimal resubmit interval specified by user. 417 timestamp int64 // timestamp for each round of sealing. 418 ) 419 420 timer := time.NewTimer(0) 421 defer timer.Stop() 422 <-timer.C // discard the initial tick 423 424 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 425 commit := func(s int32) { 426 if interrupt != nil { 427 interrupt.Store(s) 428 } 429 interrupt = new(atomic.Int32) 430 select { 431 case w.newWorkCh <- &newWorkReq{interrupt: interrupt, timestamp: timestamp}: 432 case <-w.exitCh: 433 return 434 } 435 timer.Reset(recommit) 436 w.newTxs.Store(0) 437 } 438 // clearPending cleans the stale pending tasks. 439 clearPending := func(number uint64) { 440 w.pendingMu.Lock() 441 for h, t := range w.pendingTasks { 442 if t.block.NumberU64()+staleThreshold <= number { 443 delete(w.pendingTasks, h) 444 } 445 } 446 w.pendingMu.Unlock() 447 } 448 449 for { 450 select { 451 case <-w.startCh: 452 clearPending(w.chain.CurrentBlock().Number.Uint64()) 453 timestamp = time.Now().Unix() 454 commit(commitInterruptNewHead) 455 456 case head := <-w.chainHeadCh: 457 clearPending(head.Block.NumberU64()) 458 timestamp = time.Now().Unix() 459 commit(commitInterruptNewHead) 460 461 case <-timer.C: 462 // If sealing is running resubmit a new work cycle periodically to pull in 463 // higher priced transactions. Disable this overhead for pending blocks. 464 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 465 // Short circuit if no new transaction arrives. 466 if w.newTxs.Load() == 0 { 467 timer.Reset(recommit) 468 continue 469 } 470 commit(commitInterruptResubmit) 471 } 472 473 case interval := <-w.resubmitIntervalCh: 474 // Adjust resubmit interval explicitly by user. 475 if interval < minRecommitInterval { 476 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 477 interval = minRecommitInterval 478 } 479 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 480 minRecommit, recommit = interval, interval 481 482 if w.resubmitHook != nil { 483 w.resubmitHook(minRecommit, recommit) 484 } 485 486 case adjust := <-w.resubmitAdjustCh: 487 // Adjust resubmit interval by feedback. 488 if adjust.inc { 489 before := recommit 490 target := float64(recommit.Nanoseconds()) / adjust.ratio 491 recommit = recalcRecommit(minRecommit, recommit, target, true) 492 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 493 } else { 494 before := recommit 495 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 496 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 497 } 498 499 if w.resubmitHook != nil { 500 w.resubmitHook(minRecommit, recommit) 501 } 502 503 case <-w.exitCh: 504 return 505 } 506 } 507 } 508 509 // mainLoop is responsible for generating and submitting sealing work based on 510 // the received event. It can support two modes: automatically generate task and 511 // submit it or return task according to given parameters for various proposes. 512 func (w *worker) mainLoop() { 513 defer w.wg.Done() 514 defer w.txsSub.Unsubscribe() 515 defer w.chainHeadSub.Unsubscribe() 516 defer func() { 517 if w.current != nil { 518 w.current.discard() 519 } 520 }() 521 522 for { 523 select { 524 case req := <-w.newWorkCh: 525 w.commitWork(req.interrupt, req.timestamp) 526 527 case req := <-w.getWorkCh: 528 req.result <- w.generateWork(req.params) 529 530 case ev := <-w.txsCh: 531 // Apply transactions to the pending state if we're not sealing 532 // 533 // Note all transactions received may not be continuous with transactions 534 // already included in the current sealing block. These transactions will 535 // be automatically eliminated. 536 if !w.isRunning() && w.current != nil { 537 // If block is already full, abort 538 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 539 continue 540 } 541 txs := make(map[common.Address][]*txpool.LazyTransaction, len(ev.Txs)) 542 for _, tx := range ev.Txs { 543 acc, _ := types.Sender(w.current.signer, tx) 544 txs[acc] = append(txs[acc], &txpool.LazyTransaction{ 545 Hash: tx.Hash(), 546 Tx: tx.WithoutBlobTxSidecar(), 547 Time: tx.Time(), 548 GasFeeCap: tx.GasFeeCap(), 549 GasTipCap: tx.GasTipCap(), 550 }) 551 } 552 txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) 553 tcount := w.current.tcount 554 w.commitTransactions(w.current, txset, nil) 555 556 // Only update the snapshot if any new transactions were added 557 // to the pending block 558 if tcount != w.current.tcount { 559 w.updateSnapshot(w.current) 560 } 561 } else { 562 // Special case, if the consensus engine is 0 period clique(dev mode), 563 // submit sealing work here since all empty submission will be rejected 564 // by clique. Of course the advance sealing(empty submission) is disabled. 565 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 566 w.commitWork(nil, time.Now().Unix()) 567 } 568 } 569 w.newTxs.Add(int32(len(ev.Txs))) 570 571 // System stopped 572 case <-w.exitCh: 573 return 574 case <-w.txsSub.Err(): 575 return 576 case <-w.chainHeadSub.Err(): 577 return 578 } 579 } 580 } 581 582 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 583 // push them to consensus engine. 584 func (w *worker) taskLoop() { 585 defer w.wg.Done() 586 var ( 587 stopCh chan struct{} 588 prev common.Hash 589 ) 590 591 // interrupt aborts the in-flight sealing task. 592 interrupt := func() { 593 if stopCh != nil { 594 close(stopCh) 595 stopCh = nil 596 } 597 } 598 for { 599 select { 600 case task := <-w.taskCh: 601 if w.newTaskHook != nil { 602 w.newTaskHook(task) 603 } 604 // Reject duplicate sealing work due to resubmitting. 605 sealHash := w.engine.SealHash(task.block.Header()) 606 if sealHash == prev { 607 continue 608 } 609 // Interrupt previous sealing operation 610 interrupt() 611 stopCh, prev = make(chan struct{}), sealHash 612 613 if w.skipSealHook != nil && w.skipSealHook(task) { 614 continue 615 } 616 w.pendingMu.Lock() 617 w.pendingTasks[sealHash] = task 618 w.pendingMu.Unlock() 619 620 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 621 log.Warn("Block sealing failed", "err", err) 622 w.pendingMu.Lock() 623 delete(w.pendingTasks, sealHash) 624 w.pendingMu.Unlock() 625 } 626 case <-w.exitCh: 627 interrupt() 628 return 629 } 630 } 631 } 632 633 // resultLoop is a standalone goroutine to handle sealing result submitting 634 // and flush relative data to the database. 635 func (w *worker) resultLoop() { 636 defer w.wg.Done() 637 for { 638 select { 639 case block := <-w.resultCh: 640 // Short circuit when receiving empty result. 641 if block == nil { 642 continue 643 } 644 // Short circuit when receiving duplicate result caused by resubmitting. 645 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 646 continue 647 } 648 var ( 649 sealhash = w.engine.SealHash(block.Header()) 650 hash = block.Hash() 651 ) 652 w.pendingMu.RLock() 653 task, exist := w.pendingTasks[sealhash] 654 w.pendingMu.RUnlock() 655 if !exist { 656 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 657 continue 658 } 659 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 660 var ( 661 receipts = make([]*types.Receipt, len(task.receipts)) 662 logs []*types.Log 663 ) 664 for i, taskReceipt := range task.receipts { 665 receipt := new(types.Receipt) 666 receipts[i] = receipt 667 *receipt = *taskReceipt 668 669 // add block location fields 670 receipt.BlockHash = hash 671 receipt.BlockNumber = block.Number() 672 receipt.TransactionIndex = uint(i) 673 674 // Update the block hash in all logs since it is now available and not when the 675 // receipt/log of individual transactions were created. 676 receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) 677 for i, taskLog := range taskReceipt.Logs { 678 log := new(types.Log) 679 receipt.Logs[i] = log 680 *log = *taskLog 681 log.BlockHash = hash 682 } 683 logs = append(logs, receipt.Logs...) 684 } 685 // Commit block and state to database. 686 _, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) 687 if err != nil { 688 log.Error("Failed writing block to chain", "err", err) 689 continue 690 } 691 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 692 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 693 694 // Broadcast the block and announce chain insertion event 695 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 696 697 case <-w.exitCh: 698 return 699 } 700 } 701 } 702 703 // makeEnv creates a new environment for the sealing block. 704 func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) { 705 // Retrieve the parent state to execute on top and start a prefetcher for 706 // the miner to speed block sealing up a bit. 707 state, err := w.chain.StateAt(parent.Root) 708 if err != nil { 709 return nil, err 710 } 711 state.StartPrefetcher("miner") 712 713 // Note the passed coinbase may be different with header.Coinbase. 714 env := &environment{ 715 signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), 716 state: state, 717 coinbase: coinbase, 718 header: header, 719 } 720 // Keep track of transactions which return errors so they can be removed 721 env.tcount = 0 722 return env, nil 723 } 724 725 // updateSnapshot updates pending snapshot block, receipts and state. 726 func (w *worker) updateSnapshot(env *environment) { 727 w.snapshotMu.Lock() 728 defer w.snapshotMu.Unlock() 729 730 w.snapshotBlock = types.NewBlock( 731 env.header, 732 env.txs, 733 nil, 734 env.receipts, 735 trie.NewStackTrie(nil), 736 ) 737 w.snapshotReceipts = copyReceipts(env.receipts) 738 w.snapshotState = env.state.Copy() 739 } 740 741 func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { 742 if tx.Type() == types.BlobTxType { 743 return w.commitBlobTransaction(env, tx) 744 } 745 746 receipt, err := w.applyTransaction(env, tx) 747 if err != nil { 748 return nil, err 749 } 750 env.txs = append(env.txs, tx) 751 env.receipts = append(env.receipts, receipt) 752 return receipt.Logs, nil 753 } 754 755 func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { 756 sc := tx.BlobTxSidecar() 757 if sc == nil { 758 panic("blob transaction without blobs in miner") 759 } 760 // Checking against blob gas limit: It's kind of ugly to perform this check here, but there 761 // isn't really a better place right now. The blob gas limit is checked at block validation time 762 // and not during execution. This means core.ApplyTransaction will not return an error if the 763 // tx has too many blobs. So we have to explicitly check it here. 764 if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { 765 return nil, errors.New("max data blobs reached") 766 } 767 768 receipt, err := w.applyTransaction(env, tx) 769 if err != nil { 770 return nil, err 771 } 772 env.txs = append(env.txs, tx.WithoutBlobTxSidecar()) 773 env.receipts = append(env.receipts, receipt) 774 env.sidecars = append(env.sidecars, sc) 775 env.blobs += len(sc.Blobs) 776 *env.header.BlobGasUsed += receipt.BlobGasUsed 777 return receipt.Logs, nil 778 } 779 780 // applyTransaction runs the transaction. If execution fails, state and gas pool are reverted. 781 func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*types.Receipt, error) { 782 var ( 783 snap = env.state.Snapshot() 784 gp = env.gasPool.Gas() 785 ) 786 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) 787 if err != nil { 788 env.state.RevertToSnapshot(snap) 789 env.gasPool.SetGas(gp) 790 } 791 return receipt, err 792 } 793 794 func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error { 795 gasLimit := env.header.GasLimit 796 if env.gasPool == nil { 797 env.gasPool = new(core.GasPool).AddGas(gasLimit) 798 } 799 var coalescedLogs []*types.Log 800 801 for { 802 // Check interruption signal and abort building if it's fired. 803 if interrupt != nil { 804 if signal := interrupt.Load(); signal != commitInterruptNone { 805 return signalToErr(signal) 806 } 807 } 808 // If we don't have enough gas for any further transactions then we're done. 809 if env.gasPool.Gas() < params.TxGas { 810 log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) 811 break 812 } 813 // Retrieve the next transaction and abort if all done. 814 ltx := txs.Peek() 815 if ltx == nil { 816 break 817 } 818 tx := ltx.Resolve() 819 if tx == nil { 820 log.Warn("Ignoring evicted transaction") 821 txs.Pop() 822 continue 823 } 824 825 // Error may be ignored here. The error has already been checked 826 // during transaction acceptance is the transaction pool. 827 from, _ := types.Sender(env.signer, tx) 828 829 // Check whether the tx is replay protected. If we're not in the EIP155 hf 830 // phase, start ignoring the sender until we do. 831 if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { 832 log.Trace("Ignoring replay protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 833 txs.Pop() 834 continue 835 } 836 837 // Start executing the transaction 838 env.state.SetTxContext(tx.Hash(), env.tcount) 839 840 logs, err := w.commitTransaction(env, tx) 841 switch { 842 case errors.Is(err, core.ErrNonceTooLow): 843 // New head notification data race between the transaction pool and miner, shift 844 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 845 txs.Shift() 846 847 case errors.Is(err, nil): 848 // Everything ok, collect the logs and shift in the next transaction from the same account 849 coalescedLogs = append(coalescedLogs, logs...) 850 env.tcount++ 851 txs.Shift() 852 853 default: 854 // Transaction is regarded as invalid, drop all consecutive transactions from 855 // the same sender because of `nonce-too-high` clause. 856 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 857 txs.Pop() 858 } 859 } 860 if !w.isRunning() && len(coalescedLogs) > 0 { 861 // We don't push the pendingLogsEvent while we are sealing. The reason is that 862 // when we are sealing, the worker will regenerate a sealing block every 3 seconds. 863 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 864 865 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 866 // logs by filling in the block hash when the block was mined by the local miner. This can 867 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 868 cpy := make([]*types.Log, len(coalescedLogs)) 869 for i, l := range coalescedLogs { 870 cpy[i] = new(types.Log) 871 *cpy[i] = *l 872 } 873 w.pendingLogsFeed.Send(cpy) 874 } 875 return nil 876 } 877 878 // generateParams wraps various of settings for generating sealing task. 879 type generateParams struct { 880 timestamp uint64 // The timstamp for sealing task 881 forceTime bool // Flag whether the given timestamp is immutable or not 882 parentHash common.Hash // Parent block hash, empty means the latest chain head 883 coinbase common.Address // The fee recipient address for including transaction 884 random common.Hash // The randomness generated by beacon chain, empty before the merge 885 withdrawals types.Withdrawals // List of withdrawals to include in block. 886 beaconRoot *common.Hash // The beacon root (cancun field). 887 noTxs bool // Flag whether an empty block without any transaction is expected 888 } 889 890 // prepareWork constructs the sealing task according to the given parameters, 891 // either based on the last chain head or specified parent. In this function 892 // the pending transactions are not filled yet, only the empty task returned. 893 func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { 894 w.mu.RLock() 895 defer w.mu.RUnlock() 896 897 // Find the parent block for sealing task 898 parent := w.chain.CurrentBlock() 899 if genParams.parentHash != (common.Hash{}) { 900 block := w.chain.GetBlockByHash(genParams.parentHash) 901 if block == nil { 902 return nil, fmt.Errorf("missing parent") 903 } 904 parent = block.Header() 905 } 906 // Sanity check the timestamp correctness, recap the timestamp 907 // to parent+1 if the mutation is allowed. 908 timestamp := genParams.timestamp 909 if parent.Time >= timestamp { 910 if genParams.forceTime { 911 return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time, timestamp) 912 } 913 timestamp = parent.Time + 1 914 } 915 // Construct the sealing block header. 916 header := &types.Header{ 917 ParentHash: parent.Hash(), 918 Number: new(big.Int).Add(parent.Number, common.Big1), 919 GasLimit: core.CalcGasLimit(parent.GasLimit, w.config.GasCeil), 920 Time: timestamp, 921 Coinbase: genParams.coinbase, 922 } 923 // Set the extra field. 924 if len(w.extra) != 0 { 925 header.Extra = w.extra 926 } 927 // Set the randomness field from the beacon chain if it's available. 928 if genParams.random != (common.Hash{}) { 929 header.MixDigest = genParams.random 930 } 931 // Set baseFee and GasLimit if we are on an EIP-1559 chain 932 if w.chainConfig.IsLondon(header.Number) { 933 header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent) 934 if !w.chainConfig.IsLondon(parent.Number) { 935 parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier() 936 header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) 937 } 938 } 939 // Apply EIP-4844, EIP-4788. 940 if w.chainConfig.IsCancun(header.Number, header.Time) { 941 var excessBlobGas uint64 942 if w.chainConfig.IsCancun(parent.Number, parent.Time) { 943 excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed) 944 } else { 945 // For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0 946 excessBlobGas = eip4844.CalcExcessBlobGas(0, 0) 947 } 948 header.BlobGasUsed = new(uint64) 949 header.ExcessBlobGas = &excessBlobGas 950 header.ParentBeaconRoot = genParams.beaconRoot 951 } 952 // Run the consensus preparation with the default or customized consensus engine. 953 if err := w.engine.Prepare(w.chain, header); err != nil { 954 log.Error("Failed to prepare header for sealing", "err", err) 955 return nil, err 956 } 957 // Could potentially happen if starting to mine in an odd state. 958 // Note genParams.coinbase can be different with header.Coinbase 959 // since clique algorithm can modify the coinbase field in header. 960 env, err := w.makeEnv(parent, header, genParams.coinbase) 961 if err != nil { 962 log.Error("Failed to create sealing context", "err", err) 963 return nil, err 964 } 965 if header.ParentBeaconRoot != nil { 966 context := core.NewEVMBlockContext(header, w.chain, nil) 967 vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) 968 core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) 969 } 970 return env, nil 971 } 972 973 // fillTransactions retrieves the pending transactions from the txpool and fills them 974 // into the given sealing block. The transaction selection and ordering strategy can 975 // be customized with the plugin in the future. 976 func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) error { 977 pending := w.eth.TxPool().Pending(true) 978 979 // Split the pending transactions into locals and remotes. 980 localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending 981 for _, account := range w.eth.TxPool().Locals() { 982 if txs := remoteTxs[account]; len(txs) > 0 { 983 delete(remoteTxs, account) 984 localTxs[account] = txs 985 } 986 } 987 988 // Fill the block with all available pending transactions. 989 if len(localTxs) > 0 { 990 txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) 991 if err := w.commitTransactions(env, txs, interrupt); err != nil { 992 return err 993 } 994 } 995 if len(remoteTxs) > 0 { 996 txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) 997 if err := w.commitTransactions(env, txs, interrupt); err != nil { 998 return err 999 } 1000 } 1001 return nil 1002 } 1003 1004 // generateWork generates a sealing block based on the given parameters. 1005 func (w *worker) generateWork(params *generateParams) *newPayloadResult { 1006 work, err := w.prepareWork(params) 1007 if err != nil { 1008 return &newPayloadResult{err: err} 1009 } 1010 defer work.discard() 1011 1012 if !params.noTxs { 1013 interrupt := new(atomic.Int32) 1014 timer := time.AfterFunc(w.newpayloadTimeout, func() { 1015 interrupt.Store(commitInterruptTimeout) 1016 }) 1017 defer timer.Stop() 1018 1019 err := w.fillTransactions(interrupt, work) 1020 if errors.Is(err, errBlockInterruptedByTimeout) { 1021 log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout)) 1022 } 1023 } 1024 block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, params.withdrawals) 1025 if err != nil { 1026 return &newPayloadResult{err: err} 1027 } 1028 return &newPayloadResult{ 1029 block: block, 1030 fees: totalFees(block, work.receipts), 1031 sidecars: work.sidecars, 1032 } 1033 } 1034 1035 // commitWork generates several new sealing tasks based on the parent block 1036 // and submit them to the sealer. 1037 func (w *worker) commitWork(interrupt *atomic.Int32, timestamp int64) { 1038 // Abort committing if node is still syncing 1039 if w.syncing.Load() { 1040 return 1041 } 1042 start := time.Now() 1043 1044 // Set the coinbase if the worker is running or it's required 1045 var coinbase common.Address 1046 if w.isRunning() { 1047 coinbase = w.etherbase() 1048 if coinbase == (common.Address{}) { 1049 log.Error("Refusing to mine without etherbase") 1050 return 1051 } 1052 } 1053 work, err := w.prepareWork(&generateParams{ 1054 timestamp: uint64(timestamp), 1055 coinbase: coinbase, 1056 }) 1057 if err != nil { 1058 return 1059 } 1060 // Fill pending transactions from the txpool into the block. 1061 err = w.fillTransactions(interrupt, work) 1062 switch { 1063 case err == nil: 1064 // The entire block is filled, decrease resubmit interval in case 1065 // of current interval is larger than the user-specified one. 1066 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 1067 1068 case errors.Is(err, errBlockInterruptedByRecommit): 1069 // Notify resubmit loop to increase resubmitting interval if the 1070 // interruption is due to frequent commits. 1071 gaslimit := work.header.GasLimit 1072 ratio := float64(gaslimit-work.gasPool.Gas()) / float64(gaslimit) 1073 if ratio < 0.1 { 1074 ratio = 0.1 1075 } 1076 w.resubmitAdjustCh <- &intervalAdjust{ 1077 ratio: ratio, 1078 inc: true, 1079 } 1080 1081 case errors.Is(err, errBlockInterruptedByNewHead): 1082 // If the block building is interrupted by newhead event, discard it 1083 // totally. Committing the interrupted block introduces unnecessary 1084 // delay, and possibly causes miner to mine on the previous head, 1085 // which could result in higher uncle rate. 1086 work.discard() 1087 return 1088 } 1089 // Submit the generated block for consensus sealing. 1090 w.commit(work.copy(), w.fullTaskHook, true, start) 1091 1092 // Swap out the old work with the new one, terminating any leftover 1093 // prefetcher processes in the mean time and starting a new one. 1094 if w.current != nil { 1095 w.current.discard() 1096 } 1097 w.current = work 1098 } 1099 1100 // commit runs any post-transaction state modifications, assembles the final block 1101 // and commits new work if consensus engine is running. 1102 // Note the assumption is held that the mutation is allowed to the passed env, do 1103 // the deep copy first. 1104 func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error { 1105 if w.isRunning() { 1106 if interval != nil { 1107 interval() 1108 } 1109 // Create a local environment copy, avoid the data race with snapshot state. 1110 // https://github.com/theQRL/go-zond/issues/24299 1111 env := env.copy() 1112 // Withdrawals are set to nil here, because this is only called in PoW. 1113 block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, nil, env.receipts, nil) 1114 if err != nil { 1115 return err 1116 } 1117 // If we're post merge, just ignore 1118 if !w.isTTDReached(block.Header()) { 1119 select { 1120 case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: 1121 fees := totalFees(block, env.receipts) 1122 feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) 1123 log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1124 "txs", env.tcount, "gas", block.GasUsed(), "fees", feesInEther, 1125 "elapsed", common.PrettyDuration(time.Since(start))) 1126 1127 case <-w.exitCh: 1128 log.Info("Worker has exited") 1129 } 1130 } 1131 } 1132 if update { 1133 w.updateSnapshot(env) 1134 } 1135 return nil 1136 } 1137 1138 // getSealingBlock generates the sealing block based on the given parameters. 1139 // The generation result will be passed back via the given channel no matter 1140 // the generation itself succeeds or not. 1141 func (w *worker) getSealingBlock(params *generateParams) *newPayloadResult { 1142 req := &getWorkReq{ 1143 params: params, 1144 result: make(chan *newPayloadResult, 1), 1145 } 1146 select { 1147 case w.getWorkCh <- req: 1148 return <-req.result 1149 case <-w.exitCh: 1150 return &newPayloadResult{err: errors.New("miner closed")} 1151 } 1152 } 1153 1154 // isTTDReached returns the indicator if the given block has reached the total 1155 // terminal difficulty for The Merge transition. 1156 func (w *worker) isTTDReached(header *types.Header) bool { 1157 td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty 1158 return td != nil && ttd != nil && td.Cmp(ttd) >= 0 1159 } 1160 1161 // copyReceipts makes a deep copy of the given receipts. 1162 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 1163 result := make([]*types.Receipt, len(receipts)) 1164 for i, l := range receipts { 1165 cpy := *l 1166 result[i] = &cpy 1167 } 1168 return result 1169 } 1170 1171 // totalFees computes total consumed miner fees in Wei. Block transactions and receipts have to have the same order. 1172 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Int { 1173 feesWei := new(big.Int) 1174 for i, tx := range block.Transactions() { 1175 minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) 1176 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) 1177 } 1178 return feesWei 1179 } 1180 1181 // signalToErr converts the interruption signal to a concrete error type for return. 1182 // The given signal must be a valid interruption signal. 1183 func signalToErr(signal int32) error { 1184 switch signal { 1185 case commitInterruptNewHead: 1186 return errBlockInterruptedByNewHead 1187 case commitInterruptResubmit: 1188 return errBlockInterruptedByRecommit 1189 case commitInterruptTimeout: 1190 return errBlockInterruptedByTimeout 1191 default: 1192 panic(fmt.Errorf("undefined signal %d", signal)) 1193 } 1194 }