github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/consensus" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/state" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/log" 36 "github.com/ethereum/go-ethereum/params" 37 "github.com/ethereum/go-ethereum/trie" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // sealingLogAtDepth is the number of confirmations before logging successful sealing. 58 sealingLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the sealing block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the sealing block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 // environment is the worker's current environment and holds all 81 // information of the sealing block generation. 82 type environment struct { 83 signer types.Signer 84 85 state *state.StateDB // apply state changes here 86 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 87 family mapset.Set // family set (used for checking uncle invalidity) 88 tcount int // tx count in cycle 89 gasPool *core.GasPool // available gas used to pack transactions 90 coinbase common.Address 91 92 header *types.Header 93 txs []*types.Transaction 94 receipts []*types.Receipt 95 uncles map[common.Hash]*types.Header 96 } 97 98 // copy creates a deep copy of environment. 99 func (env *environment) copy() *environment { 100 cpy := &environment{ 101 signer: env.signer, 102 state: env.state.Copy(), 103 ancestors: env.ancestors.Clone(), 104 family: env.family.Clone(), 105 tcount: env.tcount, 106 coinbase: env.coinbase, 107 header: types.CopyHeader(env.header), 108 receipts: copyReceipts(env.receipts), 109 } 110 if env.gasPool != nil { 111 gasPool := *env.gasPool 112 cpy.gasPool = &gasPool 113 } 114 // The content of txs and uncles are immutable, unnecessary 115 // to do the expensive deep copy for them. 116 cpy.txs = make([]*types.Transaction, len(env.txs)) 117 copy(cpy.txs, env.txs) 118 cpy.uncles = make(map[common.Hash]*types.Header) 119 for hash, uncle := range env.uncles { 120 cpy.uncles[hash] = uncle 121 } 122 return cpy 123 } 124 125 // unclelist returns the contained uncles as the list format. 126 func (env *environment) unclelist() []*types.Header { 127 var uncles []*types.Header 128 for _, uncle := range env.uncles { 129 uncles = append(uncles, uncle) 130 } 131 return uncles 132 } 133 134 // discard terminates the background prefetcher go-routine. It should 135 // always be called for all created environment instances otherwise 136 // the go-routine leak can happen. 137 func (env *environment) discard() { 138 if env.state == nil { 139 return 140 } 141 env.state.StopPrefetcher() 142 } 143 144 // task contains all information for consensus engine sealing and result submitting. 145 type task struct { 146 receipts []*types.Receipt 147 state *state.StateDB 148 block *types.Block 149 createdAt time.Time 150 } 151 152 const ( 153 commitInterruptNone int32 = iota 154 commitInterruptNewHead 155 commitInterruptResubmit 156 ) 157 158 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 159 type newWorkReq struct { 160 interrupt *int32 161 noempty bool 162 timestamp int64 163 } 164 165 // getWorkReq represents a request for getting a new sealing work with provided parameters. 166 type getWorkReq struct { 167 params *generateParams 168 err error 169 result chan *types.Block 170 } 171 172 // intervalAdjust represents a resubmitting interval adjustment. 173 type intervalAdjust struct { 174 ratio float64 175 inc bool 176 } 177 178 // worker is the main object which takes care of submitting new work to consensus engine 179 // and gathering the sealing result. 180 type worker struct { 181 config *Config 182 chainConfig *params.ChainConfig 183 engine consensus.Engine 184 eth Backend 185 chain *core.BlockChain 186 187 // Feeds 188 pendingLogsFeed event.Feed 189 190 // Subscriptions 191 mux *event.TypeMux 192 txsCh chan core.NewTxsEvent 193 txsSub event.Subscription 194 chainHeadCh chan core.ChainHeadEvent 195 chainHeadSub event.Subscription 196 chainSideCh chan core.ChainSideEvent 197 chainSideSub event.Subscription 198 199 // Channels 200 newWorkCh chan *newWorkReq 201 getWorkCh chan *getWorkReq 202 taskCh chan *task 203 resultCh chan *types.Block 204 startCh chan struct{} 205 exitCh chan struct{} 206 resubmitIntervalCh chan time.Duration 207 resubmitAdjustCh chan *intervalAdjust 208 209 wg sync.WaitGroup 210 211 current *environment // An environment for current running cycle. 212 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 213 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 214 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 215 216 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 217 coinbase common.Address 218 extra []byte 219 220 pendingMu sync.RWMutex 221 pendingTasks map[common.Hash]*task 222 223 snapshotMu sync.RWMutex // The lock used to protect the snapshots below 224 snapshotBlock *types.Block 225 snapshotReceipts types.Receipts 226 snapshotState *state.StateDB 227 228 // atomic status counters 229 running int32 // The indicator whether the consensus engine is running or not. 230 newTxs int32 // New arrival transaction count since last sealing work submitting. 231 232 // noempty is the flag used to control whether the feature of pre-seal empty 233 // block is enabled. The default value is false(pre-seal is enabled by default). 234 // But in some special scenario the consensus engine will seal blocks instantaneously, 235 // in this case this feature will add all empty blocks into canonical chain 236 // non-stop and no real transaction will be included. 237 noempty uint32 238 239 // External functions 240 isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. 241 242 // Test hooks 243 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 244 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 245 fullTaskHook func() // Method to call before pushing the full sealing task. 246 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 247 } 248 249 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker { 250 worker := &worker{ 251 config: config, 252 chainConfig: chainConfig, 253 engine: engine, 254 eth: eth, 255 mux: mux, 256 chain: eth.BlockChain(), 257 isLocalBlock: isLocalBlock, 258 localUncles: make(map[common.Hash]*types.Block), 259 remoteUncles: make(map[common.Hash]*types.Block), 260 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth), 261 pendingTasks: make(map[common.Hash]*task), 262 txsCh: make(chan core.NewTxsEvent, txChanSize), 263 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 264 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 265 newWorkCh: make(chan *newWorkReq), 266 getWorkCh: make(chan *getWorkReq), 267 taskCh: make(chan *task), 268 resultCh: make(chan *types.Block, resultQueueSize), 269 exitCh: make(chan struct{}), 270 startCh: make(chan struct{}, 1), 271 resubmitIntervalCh: make(chan time.Duration), 272 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 273 } 274 // Subscribe NewTxsEvent for tx pool 275 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 276 // Subscribe events for blockchain 277 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 278 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 279 280 // Sanitize recommit interval if the user-specified one is too short. 281 recommit := worker.config.Recommit 282 if recommit < minRecommitInterval { 283 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 284 recommit = minRecommitInterval 285 } 286 287 worker.wg.Add(4) 288 go worker.mainLoop() 289 go worker.newWorkLoop(recommit) 290 go worker.resultLoop() 291 go worker.taskLoop() 292 293 // Submit first work to initialize pending state. 294 if init { 295 worker.startCh <- struct{}{} 296 } 297 return worker 298 } 299 300 // setEtherbase sets the etherbase used to initialize the block coinbase field. 301 func (w *worker) setEtherbase(addr common.Address) { 302 w.mu.Lock() 303 defer w.mu.Unlock() 304 w.coinbase = addr 305 } 306 307 func (w *worker) setGasCeil(ceil uint64) { 308 w.mu.Lock() 309 defer w.mu.Unlock() 310 w.config.GasCeil = ceil 311 } 312 313 // setExtra sets the content used to initialize the block extra field. 314 func (w *worker) setExtra(extra []byte) { 315 w.mu.Lock() 316 defer w.mu.Unlock() 317 w.extra = extra 318 } 319 320 // setRecommitInterval updates the interval for miner sealing work recommitting. 321 func (w *worker) setRecommitInterval(interval time.Duration) { 322 select { 323 case w.resubmitIntervalCh <- interval: 324 case <-w.exitCh: 325 } 326 } 327 328 // disablePreseal disables pre-sealing feature 329 func (w *worker) disablePreseal() { 330 atomic.StoreUint32(&w.noempty, 1) 331 } 332 333 // enablePreseal enables pre-sealing feature 334 func (w *worker) enablePreseal() { 335 atomic.StoreUint32(&w.noempty, 0) 336 } 337 338 // pending returns the pending state and corresponding block. 339 func (w *worker) pending() (*types.Block, *state.StateDB) { 340 // return a snapshot to avoid contention on currentMu mutex 341 w.snapshotMu.RLock() 342 defer w.snapshotMu.RUnlock() 343 if w.snapshotState == nil { 344 return nil, nil 345 } 346 return w.snapshotBlock, w.snapshotState.Copy() 347 } 348 349 // pendingBlock returns pending block. 350 func (w *worker) pendingBlock() *types.Block { 351 // return a snapshot to avoid contention on currentMu mutex 352 w.snapshotMu.RLock() 353 defer w.snapshotMu.RUnlock() 354 return w.snapshotBlock 355 } 356 357 // pendingBlockAndReceipts returns pending block and corresponding receipts. 358 func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { 359 // return a snapshot to avoid contention on currentMu mutex 360 w.snapshotMu.RLock() 361 defer w.snapshotMu.RUnlock() 362 return w.snapshotBlock, w.snapshotReceipts 363 } 364 365 // start sets the running status as 1 and triggers new work submitting. 366 func (w *worker) start() { 367 atomic.StoreInt32(&w.running, 1) 368 w.startCh <- struct{}{} 369 } 370 371 // stop sets the running status as 0. 372 func (w *worker) stop() { 373 atomic.StoreInt32(&w.running, 0) 374 } 375 376 // isRunning returns an indicator whether worker is running or not. 377 func (w *worker) isRunning() bool { 378 return atomic.LoadInt32(&w.running) == 1 379 } 380 381 // close terminates all background threads maintained by the worker. 382 // Note the worker does not support being closed multiple times. 383 func (w *worker) close() { 384 atomic.StoreInt32(&w.running, 0) 385 close(w.exitCh) 386 w.wg.Wait() 387 } 388 389 // recalcRecommit recalculates the resubmitting interval upon feedback. 390 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 391 var ( 392 prevF = float64(prev.Nanoseconds()) 393 next float64 394 ) 395 if inc { 396 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 397 max := float64(maxRecommitInterval.Nanoseconds()) 398 if next > max { 399 next = max 400 } 401 } else { 402 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 403 min := float64(minRecommit.Nanoseconds()) 404 if next < min { 405 next = min 406 } 407 } 408 return time.Duration(int64(next)) 409 } 410 411 // newWorkLoop is a standalone goroutine to submit new sealing work upon received events. 412 func (w *worker) newWorkLoop(recommit time.Duration) { 413 defer w.wg.Done() 414 var ( 415 interrupt *int32 416 minRecommit = recommit // minimal resubmit interval specified by user. 417 timestamp int64 // timestamp for each round of sealing. 418 ) 419 420 timer := time.NewTimer(0) 421 defer timer.Stop() 422 <-timer.C // discard the initial tick 423 424 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 425 commit := func(noempty bool, s int32) { 426 if interrupt != nil { 427 atomic.StoreInt32(interrupt, s) 428 } 429 interrupt = new(int32) 430 select { 431 case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: 432 case <-w.exitCh: 433 return 434 } 435 timer.Reset(recommit) 436 atomic.StoreInt32(&w.newTxs, 0) 437 } 438 // clearPending cleans the stale pending tasks. 439 clearPending := func(number uint64) { 440 w.pendingMu.Lock() 441 for h, t := range w.pendingTasks { 442 if t.block.NumberU64()+staleThreshold <= number { 443 delete(w.pendingTasks, h) 444 } 445 } 446 w.pendingMu.Unlock() 447 } 448 449 for { 450 select { 451 case <-w.startCh: 452 clearPending(w.chain.CurrentBlock().NumberU64()) 453 timestamp = time.Now().Unix() 454 commit(false, commitInterruptNewHead) 455 456 case head := <-w.chainHeadCh: 457 clearPending(head.Block.NumberU64()) 458 timestamp = time.Now().Unix() 459 commit(false, commitInterruptNewHead) 460 461 case <-timer.C: 462 // If sealing is running resubmit a new work cycle periodically to pull in 463 // higher priced transactions. Disable this overhead for pending blocks. 464 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 465 // Short circuit if no new transaction arrives. 466 if atomic.LoadInt32(&w.newTxs) == 0 { 467 timer.Reset(recommit) 468 continue 469 } 470 commit(true, commitInterruptResubmit) 471 } 472 473 case interval := <-w.resubmitIntervalCh: 474 // Adjust resubmit interval explicitly by user. 475 if interval < minRecommitInterval { 476 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 477 interval = minRecommitInterval 478 } 479 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 480 minRecommit, recommit = interval, interval 481 482 if w.resubmitHook != nil { 483 w.resubmitHook(minRecommit, recommit) 484 } 485 486 case adjust := <-w.resubmitAdjustCh: 487 // Adjust resubmit interval by feedback. 488 if adjust.inc { 489 before := recommit 490 target := float64(recommit.Nanoseconds()) / adjust.ratio 491 recommit = recalcRecommit(minRecommit, recommit, target, true) 492 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 493 } else { 494 before := recommit 495 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 496 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 497 } 498 499 if w.resubmitHook != nil { 500 w.resubmitHook(minRecommit, recommit) 501 } 502 503 case <-w.exitCh: 504 return 505 } 506 } 507 } 508 509 // mainLoop is responsible for generating and submitting sealing work based on 510 // the received event. It can support two modes: automatically generate task and 511 // submit it or return task according to given parameters for various proposes. 512 func (w *worker) mainLoop() { 513 defer w.wg.Done() 514 defer w.txsSub.Unsubscribe() 515 defer w.chainHeadSub.Unsubscribe() 516 defer w.chainSideSub.Unsubscribe() 517 defer func() { 518 if w.current != nil { 519 w.current.discard() 520 } 521 }() 522 523 cleanTicker := time.NewTicker(time.Second * 10) 524 defer cleanTicker.Stop() 525 526 for { 527 select { 528 case req := <-w.newWorkCh: 529 w.commitWork(req.interrupt, req.noempty, req.timestamp) 530 531 case req := <-w.getWorkCh: 532 block, err := w.generateWork(req.params) 533 if err != nil { 534 req.err = err 535 req.result <- nil 536 } else { 537 req.result <- block 538 } 539 540 case ev := <-w.chainSideCh: 541 // Short circuit for duplicate side blocks 542 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 543 continue 544 } 545 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 546 continue 547 } 548 // Add side block to possible uncle block set depending on the author. 549 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block.Header()) { 550 w.localUncles[ev.Block.Hash()] = ev.Block 551 } else { 552 w.remoteUncles[ev.Block.Hash()] = ev.Block 553 } 554 // If our sealing block contains less than 2 uncle blocks, 555 // add the new uncle block if valid and regenerate a new 556 // sealing block for higher profit. 557 if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 { 558 start := time.Now() 559 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 560 w.commit(w.current.copy(), nil, true, start) 561 } 562 } 563 564 case <-cleanTicker.C: 565 chainHead := w.chain.CurrentBlock() 566 for hash, uncle := range w.localUncles { 567 if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() { 568 delete(w.localUncles, hash) 569 } 570 } 571 for hash, uncle := range w.remoteUncles { 572 if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() { 573 delete(w.remoteUncles, hash) 574 } 575 } 576 577 case ev := <-w.txsCh: 578 // Apply transactions to the pending state if we're not sealing 579 // 580 // Note all transactions received may not be continuous with transactions 581 // already included in the current sealing block. These transactions will 582 // be automatically eliminated. 583 if !w.isRunning() && w.current != nil { 584 // If block is already full, abort 585 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 586 continue 587 } 588 txs := make(map[common.Address]types.Transactions) 589 for _, tx := range ev.Txs { 590 acc, _ := types.Sender(w.current.signer, tx) 591 txs[acc] = append(txs[acc], tx) 592 } 593 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) 594 tcount := w.current.tcount 595 w.commitTransactions(w.current, txset, nil) 596 597 // Only update the snapshot if any new transactions were added 598 // to the pending block 599 if tcount != w.current.tcount { 600 w.updateSnapshot(w.current) 601 } 602 } else { 603 // Special case, if the consensus engine is 0 period clique(dev mode), 604 // submit sealing work here since all empty submission will be rejected 605 // by clique. Of course the advance sealing(empty submission) is disabled. 606 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 607 w.commitWork(nil, true, time.Now().Unix()) 608 } 609 } 610 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 611 612 // System stopped 613 case <-w.exitCh: 614 return 615 case <-w.txsSub.Err(): 616 return 617 case <-w.chainHeadSub.Err(): 618 return 619 case <-w.chainSideSub.Err(): 620 return 621 } 622 } 623 } 624 625 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 626 // push them to consensus engine. 627 func (w *worker) taskLoop() { 628 defer w.wg.Done() 629 var ( 630 stopCh chan struct{} 631 prev common.Hash 632 ) 633 634 // interrupt aborts the in-flight sealing task. 635 interrupt := func() { 636 if stopCh != nil { 637 close(stopCh) 638 stopCh = nil 639 } 640 } 641 for { 642 select { 643 case task := <-w.taskCh: 644 if w.newTaskHook != nil { 645 w.newTaskHook(task) 646 } 647 // Reject duplicate sealing work due to resubmitting. 648 sealHash := w.engine.SealHash(task.block.Header()) 649 if sealHash == prev { 650 continue 651 } 652 // Interrupt previous sealing operation 653 interrupt() 654 stopCh, prev = make(chan struct{}), sealHash 655 656 if w.skipSealHook != nil && w.skipSealHook(task) { 657 continue 658 } 659 w.pendingMu.Lock() 660 w.pendingTasks[sealHash] = task 661 w.pendingMu.Unlock() 662 663 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 664 log.Warn("Block sealing failed", "err", err) 665 w.pendingMu.Lock() 666 delete(w.pendingTasks, sealHash) 667 w.pendingMu.Unlock() 668 } 669 case <-w.exitCh: 670 interrupt() 671 return 672 } 673 } 674 } 675 676 // resultLoop is a standalone goroutine to handle sealing result submitting 677 // and flush relative data to the database. 678 func (w *worker) resultLoop() { 679 defer w.wg.Done() 680 for { 681 select { 682 case block := <-w.resultCh: 683 // Short circuit when receiving empty result. 684 if block == nil { 685 continue 686 } 687 // Short circuit when receiving duplicate result caused by resubmitting. 688 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 689 continue 690 } 691 var ( 692 sealhash = w.engine.SealHash(block.Header()) 693 hash = block.Hash() 694 ) 695 w.pendingMu.RLock() 696 task, exist := w.pendingTasks[sealhash] 697 w.pendingMu.RUnlock() 698 if !exist { 699 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 700 continue 701 } 702 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 703 var ( 704 receipts = make([]*types.Receipt, len(task.receipts)) 705 logs []*types.Log 706 ) 707 for i, taskReceipt := range task.receipts { 708 receipt := new(types.Receipt) 709 receipts[i] = receipt 710 *receipt = *taskReceipt 711 712 // add block location fields 713 receipt.BlockHash = hash 714 receipt.BlockNumber = block.Number() 715 receipt.TransactionIndex = uint(i) 716 717 // Update the block hash in all logs since it is now available and not when the 718 // receipt/log of individual transactions were created. 719 receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) 720 for i, taskLog := range taskReceipt.Logs { 721 log := new(types.Log) 722 receipt.Logs[i] = log 723 *log = *taskLog 724 log.BlockHash = hash 725 } 726 logs = append(logs, receipt.Logs...) 727 } 728 // Commit block and state to database. 729 _, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) 730 if err != nil { 731 log.Error("Failed writing block to chain", "err", err) 732 continue 733 } 734 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 735 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 736 737 // Broadcast the block and announce chain insertion event 738 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 739 740 // Insert the block into the set of pending ones to resultLoop for confirmations 741 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 742 743 case <-w.exitCh: 744 return 745 } 746 } 747 } 748 749 // makeEnv creates a new environment for the sealing block. 750 func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) { 751 // Retrieve the parent state to execute on top and start a prefetcher for 752 // the miner to speed block sealing up a bit. 753 state, err := w.chain.StateAt(parent.Root()) 754 if err != nil { 755 // Note since the sealing block can be created upon the arbitrary parent 756 // block, but the state of parent block may already be pruned, so the necessary 757 // state recovery is needed here in the future. 758 // 759 // The maximum acceptable reorg depth can be limited by the finalised block 760 // somehow. TODO(rjl493456442) fix the hard-coded number here later. 761 state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false) 762 log.Warn("Recovered mining state", "root", parent.Root(), "err", err) 763 } 764 if err != nil { 765 return nil, err 766 } 767 state.StartPrefetcher("miner") 768 769 // Note the passed coinbase may be different with header.Coinbase. 770 env := &environment{ 771 signer: types.MakeSigner(w.chainConfig, header.Number), 772 state: state, 773 coinbase: coinbase, 774 ancestors: mapset.NewSet(), 775 family: mapset.NewSet(), 776 header: header, 777 uncles: make(map[common.Hash]*types.Header), 778 } 779 // when 08 is processed ancestors contain 07 (quick block) 780 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 781 for _, uncle := range ancestor.Uncles() { 782 env.family.Add(uncle.Hash()) 783 } 784 env.family.Add(ancestor.Hash()) 785 env.ancestors.Add(ancestor.Hash()) 786 } 787 // Keep track of transactions which return errors so they can be removed 788 env.tcount = 0 789 return env, nil 790 } 791 792 // commitUncle adds the given block to uncle block set, returns error if failed to add. 793 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 794 if w.isTTDReached(env.header) { 795 return errors.New("ignore uncle for beacon block") 796 } 797 hash := uncle.Hash() 798 if _, exist := env.uncles[hash]; exist { 799 return errors.New("uncle not unique") 800 } 801 if env.header.ParentHash == uncle.ParentHash { 802 return errors.New("uncle is sibling") 803 } 804 if !env.ancestors.Contains(uncle.ParentHash) { 805 return errors.New("uncle's parent unknown") 806 } 807 if env.family.Contains(hash) { 808 return errors.New("uncle already included") 809 } 810 env.uncles[hash] = uncle 811 return nil 812 } 813 814 // updateSnapshot updates pending snapshot block, receipts and state. 815 func (w *worker) updateSnapshot(env *environment) { 816 w.snapshotMu.Lock() 817 defer w.snapshotMu.Unlock() 818 819 w.snapshotBlock = types.NewBlock( 820 env.header, 821 env.txs, 822 env.unclelist(), 823 env.receipts, 824 trie.NewStackTrie(nil), 825 ) 826 w.snapshotReceipts = copyReceipts(env.receipts) 827 w.snapshotState = env.state.Copy() 828 } 829 830 func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { 831 snap := env.state.Snapshot() 832 833 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) 834 if err != nil { 835 env.state.RevertToSnapshot(snap) 836 return nil, err 837 } 838 env.txs = append(env.txs, tx) 839 env.receipts = append(env.receipts, receipt) 840 841 return receipt.Logs, nil 842 } 843 844 func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool { 845 gasLimit := env.header.GasLimit 846 if env.gasPool == nil { 847 env.gasPool = new(core.GasPool).AddGas(gasLimit) 848 } 849 var coalescedLogs []*types.Log 850 851 for { 852 // In the following three cases, we will interrupt the execution of the transaction. 853 // (1) new head block event arrival, the interrupt signal is 1 854 // (2) worker start or restart, the interrupt signal is 1 855 // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2. 856 // For the first two cases, the semi-finished work will be discarded. 857 // For the third case, the semi-finished work will be submitted to the consensus engine. 858 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 859 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 860 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 861 ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit) 862 if ratio < 0.1 { 863 ratio = 0.1 864 } 865 w.resubmitAdjustCh <- &intervalAdjust{ 866 ratio: ratio, 867 inc: true, 868 } 869 } 870 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 871 } 872 // If we don't have enough gas for any further transactions then we're done 873 if env.gasPool.Gas() < params.TxGas { 874 log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) 875 break 876 } 877 // Retrieve the next transaction and abort if all done 878 tx := txs.Peek() 879 if tx == nil { 880 break 881 } 882 // Error may be ignored here. The error has already been checked 883 // during transaction acceptance is the transaction pool. 884 // 885 // We use the eip155 signer regardless of the current hf. 886 from, _ := types.Sender(env.signer, tx) 887 // Check whether the tx is replay protected. If we're not in the EIP155 hf 888 // phase, start ignoring the sender until we do. 889 if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { 890 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 891 892 txs.Pop() 893 continue 894 } 895 // Start executing the transaction 896 env.state.Prepare(tx.Hash(), env.tcount) 897 898 logs, err := w.commitTransaction(env, tx) 899 switch { 900 case errors.Is(err, core.ErrGasLimitReached): 901 // Pop the current out-of-gas transaction without shifting in the next from the account 902 log.Trace("Gas limit exceeded for current block", "sender", from) 903 txs.Pop() 904 905 case errors.Is(err, core.ErrNonceTooLow): 906 // New head notification data race between the transaction pool and miner, shift 907 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 908 txs.Shift() 909 910 case errors.Is(err, core.ErrNonceTooHigh): 911 // Reorg notification data race between the transaction pool and miner, skip account = 912 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 913 txs.Pop() 914 915 case errors.Is(err, nil): 916 // Everything ok, collect the logs and shift in the next transaction from the same account 917 coalescedLogs = append(coalescedLogs, logs...) 918 env.tcount++ 919 txs.Shift() 920 921 case errors.Is(err, core.ErrTxTypeNotSupported): 922 // Pop the unsupported transaction without shifting in the next from the account 923 log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) 924 txs.Pop() 925 926 default: 927 // Strange error, discard the transaction and get the next in line (note, the 928 // nonce-too-high clause will prevent us from executing in vain). 929 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 930 txs.Shift() 931 } 932 } 933 934 if !w.isRunning() && len(coalescedLogs) > 0 { 935 // We don't push the pendingLogsEvent while we are sealing. The reason is that 936 // when we are sealing, the worker will regenerate a sealing block every 3 seconds. 937 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 938 939 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 940 // logs by filling in the block hash when the block was mined by the local miner. This can 941 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 942 cpy := make([]*types.Log, len(coalescedLogs)) 943 for i, l := range coalescedLogs { 944 cpy[i] = new(types.Log) 945 *cpy[i] = *l 946 } 947 w.pendingLogsFeed.Send(cpy) 948 } 949 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 950 // than the user-specified one. 951 if interrupt != nil { 952 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 953 } 954 return false 955 } 956 957 // generateParams wraps various of settings for generating sealing task. 958 type generateParams struct { 959 timestamp uint64 // The timstamp for sealing task 960 forceTime bool // Flag whether the given timestamp is immutable or not 961 parentHash common.Hash // Parent block hash, empty means the latest chain head 962 coinbase common.Address // The fee recipient address for including transaction 963 random common.Hash // The randomness generated by beacon chain, empty before the merge 964 noUncle bool // Flag whether the uncle block inclusion is allowed 965 noExtra bool // Flag whether the extra field assignment is allowed 966 } 967 968 // prepareWork constructs the sealing task according to the given parameters, 969 // either based on the last chain head or specified parent. In this function 970 // the pending transactions are not filled yet, only the empty task returned. 971 func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { 972 w.mu.RLock() 973 defer w.mu.RUnlock() 974 975 // Find the parent block for sealing task 976 parent := w.chain.CurrentBlock() 977 if genParams.parentHash != (common.Hash{}) { 978 parent = w.chain.GetBlockByHash(genParams.parentHash) 979 } 980 if parent == nil { 981 return nil, fmt.Errorf("missing parent") 982 } 983 // Sanity check the timestamp correctness, recap the timestamp 984 // to parent+1 if the mutation is allowed. 985 timestamp := genParams.timestamp 986 if parent.Time() >= timestamp { 987 if genParams.forceTime { 988 return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp) 989 } 990 timestamp = parent.Time() + 1 991 } 992 // Construct the sealing block header, set the extra field if it's allowed 993 num := parent.Number() 994 header := &types.Header{ 995 ParentHash: parent.Hash(), 996 Number: num.Add(num, common.Big1), 997 GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil), 998 Time: timestamp, 999 Coinbase: genParams.coinbase, 1000 } 1001 if !genParams.noExtra && len(w.extra) != 0 { 1002 header.Extra = w.extra 1003 } 1004 // Set the randomness field from the beacon chain if it's available. 1005 if genParams.random != (common.Hash{}) { 1006 header.MixDigest = genParams.random 1007 } 1008 // Set baseFee and GasLimit if we are on an EIP-1559 chain 1009 if w.chainConfig.IsLondon(header.Number) { 1010 header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header()) 1011 if !w.chainConfig.IsLondon(parent.Number()) { 1012 parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier 1013 header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) 1014 } 1015 } 1016 // Run the consensus preparation with the default or customized consensus engine. 1017 if err := w.engine.Prepare(w.chain, header); err != nil { 1018 log.Error("Failed to prepare header for sealing", "err", err) 1019 return nil, err 1020 } 1021 // Could potentially happen if starting to mine in an odd state. 1022 // Note genParams.coinbase can be different with header.Coinbase 1023 // since clique algorithm can modify the coinbase field in header. 1024 env, err := w.makeEnv(parent, header, genParams.coinbase) 1025 if err != nil { 1026 log.Error("Failed to create sealing context", "err", err) 1027 return nil, err 1028 } 1029 // Accumulate the uncles for the sealing work only if it's allowed. 1030 if !genParams.noUncle { 1031 commitUncles := func(blocks map[common.Hash]*types.Block) { 1032 for hash, uncle := range blocks { 1033 if len(env.uncles) == 2 { 1034 break 1035 } 1036 if err := w.commitUncle(env, uncle.Header()); err != nil { 1037 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 1038 } else { 1039 log.Debug("Committing new uncle to block", "hash", hash) 1040 } 1041 } 1042 } 1043 // Prefer to locally generated uncle 1044 commitUncles(w.localUncles) 1045 commitUncles(w.remoteUncles) 1046 } 1047 return env, nil 1048 } 1049 1050 // fillTransactions retrieves the pending transactions from the txpool and fills them 1051 // into the given sealing block. The transaction selection and ordering strategy can 1052 // be customized with the plugin in the future. 1053 func (w *worker) fillTransactions(interrupt *int32, env *environment) { 1054 // Split the pending transactions into locals and remotes 1055 // Fill the block with all available pending transactions. 1056 pending := w.eth.TxPool().Pending(true) 1057 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 1058 for _, account := range w.eth.TxPool().Locals() { 1059 if txs := remoteTxs[account]; len(txs) > 0 { 1060 delete(remoteTxs, account) 1061 localTxs[account] = txs 1062 } 1063 } 1064 if len(localTxs) > 0 { 1065 txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) 1066 if w.commitTransactions(env, txs, interrupt) { 1067 return 1068 } 1069 } 1070 if len(remoteTxs) > 0 { 1071 txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) 1072 if w.commitTransactions(env, txs, interrupt) { 1073 return 1074 } 1075 } 1076 } 1077 1078 // generateWork generates a sealing block based on the given parameters. 1079 func (w *worker) generateWork(params *generateParams) (*types.Block, error) { 1080 work, err := w.prepareWork(params) 1081 if err != nil { 1082 return nil, err 1083 } 1084 defer work.discard() 1085 1086 w.fillTransactions(nil, work) 1087 return w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts) 1088 } 1089 1090 // commitWork generates several new sealing tasks based on the parent block 1091 // and submit them to the sealer. 1092 func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) { 1093 start := time.Now() 1094 1095 // Set the coinbase if the worker is running or it's required 1096 var coinbase common.Address 1097 if w.isRunning() { 1098 if w.coinbase == (common.Address{}) { 1099 log.Error("Refusing to mine without etherbase") 1100 return 1101 } 1102 coinbase = w.coinbase // Use the preset address as the fee recipient 1103 } 1104 work, err := w.prepareWork(&generateParams{ 1105 timestamp: uint64(timestamp), 1106 coinbase: coinbase, 1107 }) 1108 if err != nil { 1109 return 1110 } 1111 // Create an empty block based on temporary copied state for 1112 // sealing in advance without waiting block execution finished. 1113 if !noempty && atomic.LoadUint32(&w.noempty) == 0 { 1114 w.commit(work.copy(), nil, false, start) 1115 } 1116 // Fill pending transactions from the txpool 1117 w.fillTransactions(interrupt, work) 1118 w.commit(work.copy(), w.fullTaskHook, true, start) 1119 1120 // Swap out the old work with the new one, terminating any leftover 1121 // prefetcher processes in the mean time and starting a new one. 1122 if w.current != nil { 1123 w.current.discard() 1124 } 1125 w.current = work 1126 } 1127 1128 // commit runs any post-transaction state modifications, assembles the final block 1129 // and commits new work if consensus engine is running. 1130 // Note the assumption is held that the mutation is allowed to the passed env, do 1131 // the deep copy first. 1132 func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error { 1133 if w.isRunning() { 1134 if interval != nil { 1135 interval() 1136 } 1137 // Create a local environment copy, avoid the data race with snapshot state. 1138 // https://github.com/ethereum/go-ethereum/issues/24299 1139 env := env.copy() 1140 block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts) 1141 if err != nil { 1142 return err 1143 } 1144 // If we're post merge, just ignore 1145 if !w.isTTDReached(block.Header()) { 1146 select { 1147 case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: 1148 w.unconfirmed.Shift(block.NumberU64() - 1) 1149 log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1150 "uncles", len(env.uncles), "txs", env.tcount, 1151 "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), 1152 "elapsed", common.PrettyDuration(time.Since(start))) 1153 1154 case <-w.exitCh: 1155 log.Info("Worker has exited") 1156 } 1157 } 1158 } 1159 if update { 1160 w.updateSnapshot(env) 1161 } 1162 return nil 1163 } 1164 1165 // getSealingBlock generates the sealing block based on the given parameters. 1166 func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) { 1167 req := &getWorkReq{ 1168 params: &generateParams{ 1169 timestamp: timestamp, 1170 forceTime: true, 1171 parentHash: parent, 1172 coinbase: coinbase, 1173 random: random, 1174 noUncle: true, 1175 noExtra: true, 1176 }, 1177 result: make(chan *types.Block, 1), 1178 } 1179 select { 1180 case w.getWorkCh <- req: 1181 block := <-req.result 1182 if block == nil { 1183 return nil, req.err 1184 } 1185 return block, nil 1186 case <-w.exitCh: 1187 return nil, errors.New("miner closed") 1188 } 1189 } 1190 1191 // isTTDReached returns the indicator if the given block has reached the total 1192 // terminal difficulty for The Merge transition. 1193 func (w *worker) isTTDReached(header *types.Header) bool { 1194 td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty 1195 return td != nil && ttd != nil && td.Cmp(ttd) >= 0 1196 } 1197 1198 // copyReceipts makes a deep copy of the given receipts. 1199 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 1200 result := make([]*types.Receipt, len(receipts)) 1201 for i, l := range receipts { 1202 cpy := *l 1203 result[i] = &cpy 1204 } 1205 return result 1206 } 1207 1208 // postSideBlock fires a side chain event, only use it for testing. 1209 func (w *worker) postSideBlock(event core.ChainSideEvent) { 1210 select { 1211 case w.chainSideCh <- event: 1212 case <-w.exitCh: 1213 } 1214 } 1215 1216 // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. 1217 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { 1218 feesWei := new(big.Int) 1219 for i, tx := range block.Transactions() { 1220 minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) 1221 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) 1222 } 1223 return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1224 }