github.com/bearnetworkchain/go-bearnetwork@v1.10.19-0.20220604150648-d63890c2e42b/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/consensus" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/state" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/log" 36 "github.com/ethereum/go-ethereum/params" 37 "github.com/ethereum/go-ethereum/trie" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // sealingLogAtDepth is the number of confirmations before logging successful sealing. 58 sealingLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the sealing block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the sealing block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 var ( 81 errBlockInterruptedByNewHead = errors.New("new head arrived while building block") 82 errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") 83 ) 84 85 // environment is the worker's current environment and holds all 86 // information of the sealing block generation. 87 type environment struct { 88 signer types.Signer 89 90 state *state.StateDB // apply state changes here 91 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 92 family mapset.Set // family set (used for checking uncle invalidity) 93 tcount int // tx count in cycle 94 gasPool *core.GasPool // available gas used to pack transactions 95 coinbase common.Address 96 97 header *types.Header 98 txs []*types.Transaction 99 receipts []*types.Receipt 100 uncles map[common.Hash]*types.Header 101 } 102 103 // copy creates a deep copy of environment. 104 func (env *environment) copy() *environment { 105 cpy := &environment{ 106 signer: env.signer, 107 state: env.state.Copy(), 108 ancestors: env.ancestors.Clone(), 109 family: env.family.Clone(), 110 tcount: env.tcount, 111 coinbase: env.coinbase, 112 header: types.CopyHeader(env.header), 113 receipts: copyReceipts(env.receipts), 114 } 115 if env.gasPool != nil { 116 gasPool := *env.gasPool 117 cpy.gasPool = &gasPool 118 } 119 // The content of txs and uncles are immutable, unnecessary 120 // to do the expensive deep copy for them. 121 cpy.txs = make([]*types.Transaction, len(env.txs)) 122 copy(cpy.txs, env.txs) 123 cpy.uncles = make(map[common.Hash]*types.Header) 124 for hash, uncle := range env.uncles { 125 cpy.uncles[hash] = uncle 126 } 127 return cpy 128 } 129 130 // unclelist returns the contained uncles as the list format. 131 func (env *environment) unclelist() []*types.Header { 132 var uncles []*types.Header 133 for _, uncle := range env.uncles { 134 uncles = append(uncles, uncle) 135 } 136 return uncles 137 } 138 139 // discard terminates the background prefetcher go-routine. It should 140 // always be called for all created environment instances otherwise 141 // the go-routine leak can happen. 142 func (env *environment) discard() { 143 if env.state == nil { 144 return 145 } 146 env.state.StopPrefetcher() 147 } 148 149 // task contains all information for consensus engine sealing and result submitting. 150 type task struct { 151 receipts []*types.Receipt 152 state *state.StateDB 153 block *types.Block 154 createdAt time.Time 155 } 156 157 const ( 158 commitInterruptNone int32 = iota 159 commitInterruptNewHead 160 commitInterruptResubmit 161 ) 162 163 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 164 type newWorkReq struct { 165 interrupt *int32 166 noempty bool 167 timestamp int64 168 } 169 170 // getWorkReq represents a request for getting a new sealing work with provided parameters. 171 type getWorkReq struct { 172 params *generateParams 173 result chan *types.Block // non-blocking channel 174 err chan error 175 } 176 177 // intervalAdjust represents a resubmitting interval adjustment. 178 type intervalAdjust struct { 179 ratio float64 180 inc bool 181 } 182 183 // worker is the main object which takes care of submitting new work to consensus engine 184 // and gathering the sealing result. 185 type worker struct { 186 config *Config 187 chainConfig *params.ChainConfig 188 engine consensus.Engine 189 eth Backend 190 chain *core.BlockChain 191 192 // Feeds 193 pendingLogsFeed event.Feed 194 195 // Subscriptions 196 mux *event.TypeMux 197 txsCh chan core.NewTxsEvent 198 txsSub event.Subscription 199 chainHeadCh chan core.ChainHeadEvent 200 chainHeadSub event.Subscription 201 chainSideCh chan core.ChainSideEvent 202 chainSideSub event.Subscription 203 204 // Channels 205 newWorkCh chan *newWorkReq 206 getWorkCh chan *getWorkReq 207 taskCh chan *task 208 resultCh chan *types.Block 209 startCh chan struct{} 210 exitCh chan struct{} 211 resubmitIntervalCh chan time.Duration 212 resubmitAdjustCh chan *intervalAdjust 213 214 wg sync.WaitGroup 215 216 current *environment // An environment for current running cycle. 217 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 218 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 219 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 220 221 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 222 coinbase common.Address 223 extra []byte 224 225 pendingMu sync.RWMutex 226 pendingTasks map[common.Hash]*task 227 228 snapshotMu sync.RWMutex // The lock used to protect the snapshots below 229 snapshotBlock *types.Block 230 snapshotReceipts types.Receipts 231 snapshotState *state.StateDB 232 233 // atomic status counters 234 running int32 // The indicator whether the consensus engine is running or not. 235 newTxs int32 // New arrival transaction count since last sealing work submitting. 236 237 // noempty is the flag used to control whether the feature of pre-seal empty 238 // block is enabled. The default value is false(pre-seal is enabled by default). 239 // But in some special scenario the consensus engine will seal blocks instantaneously, 240 // in this case this feature will add all empty blocks into canonical chain 241 // non-stop and no real transaction will be included. 242 noempty uint32 243 244 // External functions 245 isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. 246 247 // Test hooks 248 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 249 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 250 fullTaskHook func() // Method to call before pushing the full sealing task. 251 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 252 } 253 254 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker { 255 worker := &worker{ 256 config: config, 257 chainConfig: chainConfig, 258 engine: engine, 259 eth: eth, 260 mux: mux, 261 chain: eth.BlockChain(), 262 isLocalBlock: isLocalBlock, 263 localUncles: make(map[common.Hash]*types.Block), 264 remoteUncles: make(map[common.Hash]*types.Block), 265 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth), 266 pendingTasks: make(map[common.Hash]*task), 267 txsCh: make(chan core.NewTxsEvent, txChanSize), 268 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 269 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 270 newWorkCh: make(chan *newWorkReq), 271 getWorkCh: make(chan *getWorkReq), 272 taskCh: make(chan *task), 273 resultCh: make(chan *types.Block, resultQueueSize), 274 exitCh: make(chan struct{}), 275 startCh: make(chan struct{}, 1), 276 resubmitIntervalCh: make(chan time.Duration), 277 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 278 } 279 // Subscribe NewTxsEvent for tx pool 280 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 281 // Subscribe events for blockchain 282 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 283 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 284 285 // Sanitize recommit interval if the user-specified one is too short. 286 recommit := worker.config.Recommit 287 if recommit < minRecommitInterval { 288 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 289 recommit = minRecommitInterval 290 } 291 292 worker.wg.Add(4) 293 go worker.mainLoop() 294 go worker.newWorkLoop(recommit) 295 go worker.resultLoop() 296 go worker.taskLoop() 297 298 // Submit first work to initialize pending state. 299 if init { 300 worker.startCh <- struct{}{} 301 } 302 return worker 303 } 304 305 // setEtherbase sets the etherbase used to initialize the block coinbase field. 306 func (w *worker) setEtherbase(addr common.Address) { 307 w.mu.Lock() 308 defer w.mu.Unlock() 309 w.coinbase = addr 310 } 311 312 func (w *worker) setGasCeil(ceil uint64) { 313 w.mu.Lock() 314 defer w.mu.Unlock() 315 w.config.GasCeil = ceil 316 } 317 318 // setExtra sets the content used to initialize the block extra field. 319 func (w *worker) setExtra(extra []byte) { 320 w.mu.Lock() 321 defer w.mu.Unlock() 322 w.extra = extra 323 } 324 325 // setRecommitInterval updates the interval for miner sealing work recommitting. 326 func (w *worker) setRecommitInterval(interval time.Duration) { 327 select { 328 case w.resubmitIntervalCh <- interval: 329 case <-w.exitCh: 330 } 331 } 332 333 // disablePreseal disables pre-sealing feature 334 func (w *worker) disablePreseal() { 335 atomic.StoreUint32(&w.noempty, 1) 336 } 337 338 // enablePreseal enables pre-sealing feature 339 func (w *worker) enablePreseal() { 340 atomic.StoreUint32(&w.noempty, 0) 341 } 342 343 // pending returns the pending state and corresponding block. 344 func (w *worker) pending() (*types.Block, *state.StateDB) { 345 // return a snapshot to avoid contention on currentMu mutex 346 w.snapshotMu.RLock() 347 defer w.snapshotMu.RUnlock() 348 if w.snapshotState == nil { 349 return nil, nil 350 } 351 return w.snapshotBlock, w.snapshotState.Copy() 352 } 353 354 // pendingBlock returns pending block. 355 func (w *worker) pendingBlock() *types.Block { 356 // return a snapshot to avoid contention on currentMu mutex 357 w.snapshotMu.RLock() 358 defer w.snapshotMu.RUnlock() 359 return w.snapshotBlock 360 } 361 362 // pendingBlockAndReceipts returns pending block and corresponding receipts. 363 func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { 364 // return a snapshot to avoid contention on currentMu mutex 365 w.snapshotMu.RLock() 366 defer w.snapshotMu.RUnlock() 367 return w.snapshotBlock, w.snapshotReceipts 368 } 369 370 // start sets the running status as 1 and triggers new work submitting. 371 func (w *worker) start() { 372 atomic.StoreInt32(&w.running, 1) 373 w.startCh <- struct{}{} 374 } 375 376 // stop sets the running status as 0. 377 func (w *worker) stop() { 378 atomic.StoreInt32(&w.running, 0) 379 } 380 381 // isRunning returns an indicator whether worker is running or not. 382 func (w *worker) isRunning() bool { 383 return atomic.LoadInt32(&w.running) == 1 384 } 385 386 // close terminates all background threads maintained by the worker. 387 // Note the worker does not support being closed multiple times. 388 func (w *worker) close() { 389 atomic.StoreInt32(&w.running, 0) 390 close(w.exitCh) 391 w.wg.Wait() 392 } 393 394 // recalcRecommit recalculates the resubmitting interval upon feedback. 395 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 396 var ( 397 prevF = float64(prev.Nanoseconds()) 398 next float64 399 ) 400 if inc { 401 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 402 max := float64(maxRecommitInterval.Nanoseconds()) 403 if next > max { 404 next = max 405 } 406 } else { 407 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 408 min := float64(minRecommit.Nanoseconds()) 409 if next < min { 410 next = min 411 } 412 } 413 return time.Duration(int64(next)) 414 } 415 416 // newWorkLoop is a standalone goroutine to submit new sealing work upon received events. 417 func (w *worker) newWorkLoop(recommit time.Duration) { 418 defer w.wg.Done() 419 var ( 420 interrupt *int32 421 minRecommit = recommit // minimal resubmit interval specified by user. 422 timestamp int64 // timestamp for each round of sealing. 423 ) 424 425 timer := time.NewTimer(0) 426 defer timer.Stop() 427 <-timer.C // discard the initial tick 428 429 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 430 commit := func(noempty bool, s int32) { 431 if interrupt != nil { 432 atomic.StoreInt32(interrupt, s) 433 } 434 interrupt = new(int32) 435 select { 436 case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: 437 case <-w.exitCh: 438 return 439 } 440 timer.Reset(recommit) 441 atomic.StoreInt32(&w.newTxs, 0) 442 } 443 // clearPending cleans the stale pending tasks. 444 clearPending := func(number uint64) { 445 w.pendingMu.Lock() 446 for h, t := range w.pendingTasks { 447 if t.block.NumberU64()+staleThreshold <= number { 448 delete(w.pendingTasks, h) 449 } 450 } 451 w.pendingMu.Unlock() 452 } 453 454 for { 455 select { 456 case <-w.startCh: 457 clearPending(w.chain.CurrentBlock().NumberU64()) 458 timestamp = time.Now().Unix() 459 commit(false, commitInterruptNewHead) 460 461 case head := <-w.chainHeadCh: 462 clearPending(head.Block.NumberU64()) 463 timestamp = time.Now().Unix() 464 commit(false, commitInterruptNewHead) 465 466 case <-timer.C: 467 // If sealing is running resubmit a new work cycle periodically to pull in 468 // higher priced transactions. Disable this overhead for pending blocks. 469 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 470 // Short circuit if no new transaction arrives. 471 if atomic.LoadInt32(&w.newTxs) == 0 { 472 timer.Reset(recommit) 473 continue 474 } 475 commit(true, commitInterruptResubmit) 476 } 477 478 case interval := <-w.resubmitIntervalCh: 479 // Adjust resubmit interval explicitly by user. 480 if interval < minRecommitInterval { 481 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 482 interval = minRecommitInterval 483 } 484 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 485 minRecommit, recommit = interval, interval 486 487 if w.resubmitHook != nil { 488 w.resubmitHook(minRecommit, recommit) 489 } 490 491 case adjust := <-w.resubmitAdjustCh: 492 // Adjust resubmit interval by feedback. 493 if adjust.inc { 494 before := recommit 495 target := float64(recommit.Nanoseconds()) / adjust.ratio 496 recommit = recalcRecommit(minRecommit, recommit, target, true) 497 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 498 } else { 499 before := recommit 500 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 501 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 502 } 503 504 if w.resubmitHook != nil { 505 w.resubmitHook(minRecommit, recommit) 506 } 507 508 case <-w.exitCh: 509 return 510 } 511 } 512 } 513 514 // mainLoop is responsible for generating and submitting sealing work based on 515 // the received event. It can support two modes: automatically generate task and 516 // submit it or return task according to given parameters for various proposes. 517 func (w *worker) mainLoop() { 518 defer w.wg.Done() 519 defer w.txsSub.Unsubscribe() 520 defer w.chainHeadSub.Unsubscribe() 521 defer w.chainSideSub.Unsubscribe() 522 defer func() { 523 if w.current != nil { 524 w.current.discard() 525 } 526 }() 527 528 cleanTicker := time.NewTicker(time.Second * 10) 529 defer cleanTicker.Stop() 530 531 for { 532 select { 533 case req := <-w.newWorkCh: 534 w.commitWork(req.interrupt, req.noempty, req.timestamp) 535 536 case req := <-w.getWorkCh: 537 block, err := w.generateWork(req.params) 538 if err != nil { 539 req.err <- err 540 req.result <- nil 541 } else { 542 req.err <- nil 543 req.result <- block 544 } 545 case ev := <-w.chainSideCh: 546 // Short circuit for duplicate side blocks 547 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 548 continue 549 } 550 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 551 continue 552 } 553 // Add side block to possible uncle block set depending on the author. 554 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block.Header()) { 555 w.localUncles[ev.Block.Hash()] = ev.Block 556 } else { 557 w.remoteUncles[ev.Block.Hash()] = ev.Block 558 } 559 // If our sealing block contains less than 2 uncle blocks, 560 // add the new uncle block if valid and regenerate a new 561 // sealing block for higher profit. 562 if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 { 563 start := time.Now() 564 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 565 w.commit(w.current.copy(), nil, true, start) 566 } 567 } 568 569 case <-cleanTicker.C: 570 chainHead := w.chain.CurrentBlock() 571 for hash, uncle := range w.localUncles { 572 if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() { 573 delete(w.localUncles, hash) 574 } 575 } 576 for hash, uncle := range w.remoteUncles { 577 if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() { 578 delete(w.remoteUncles, hash) 579 } 580 } 581 582 case ev := <-w.txsCh: 583 // Apply transactions to the pending state if we're not sealing 584 // 585 // Note all transactions received may not be continuous with transactions 586 // already included in the current sealing block. These transactions will 587 // be automatically eliminated. 588 if !w.isRunning() && w.current != nil { 589 // If block is already full, abort 590 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 591 continue 592 } 593 txs := make(map[common.Address]types.Transactions) 594 for _, tx := range ev.Txs { 595 acc, _ := types.Sender(w.current.signer, tx) 596 txs[acc] = append(txs[acc], tx) 597 } 598 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) 599 tcount := w.current.tcount 600 w.commitTransactions(w.current, txset, nil) 601 602 // Only update the snapshot if any new transactions were added 603 // to the pending block 604 if tcount != w.current.tcount { 605 w.updateSnapshot(w.current) 606 } 607 } else { 608 // Special case, if the consensus engine is 0 period clique(dev mode), 609 // submit sealing work here since all empty submission will be rejected 610 // by clique. Of course the advance sealing(empty submission) is disabled. 611 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 612 w.commitWork(nil, true, time.Now().Unix()) 613 } 614 } 615 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 616 617 // System stopped 618 case <-w.exitCh: 619 return 620 case <-w.txsSub.Err(): 621 return 622 case <-w.chainHeadSub.Err(): 623 return 624 case <-w.chainSideSub.Err(): 625 return 626 } 627 } 628 } 629 630 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 631 // push them to consensus engine. 632 func (w *worker) taskLoop() { 633 defer w.wg.Done() 634 var ( 635 stopCh chan struct{} 636 prev common.Hash 637 ) 638 639 // interrupt aborts the in-flight sealing task. 640 interrupt := func() { 641 if stopCh != nil { 642 close(stopCh) 643 stopCh = nil 644 } 645 } 646 for { 647 select { 648 case task := <-w.taskCh: 649 if w.newTaskHook != nil { 650 w.newTaskHook(task) 651 } 652 // Reject duplicate sealing work due to resubmitting. 653 sealHash := w.engine.SealHash(task.block.Header()) 654 if sealHash == prev { 655 continue 656 } 657 // Interrupt previous sealing operation 658 interrupt() 659 stopCh, prev = make(chan struct{}), sealHash 660 661 if w.skipSealHook != nil && w.skipSealHook(task) { 662 continue 663 } 664 w.pendingMu.Lock() 665 w.pendingTasks[sealHash] = task 666 w.pendingMu.Unlock() 667 668 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 669 log.Warn("Block sealing failed", "err", err) 670 w.pendingMu.Lock() 671 delete(w.pendingTasks, sealHash) 672 w.pendingMu.Unlock() 673 } 674 case <-w.exitCh: 675 interrupt() 676 return 677 } 678 } 679 } 680 681 // resultLoop is a standalone goroutine to handle sealing result submitting 682 // and flush relative data to the database. 683 func (w *worker) resultLoop() { 684 defer w.wg.Done() 685 for { 686 select { 687 case block := <-w.resultCh: 688 // Short circuit when receiving empty result. 689 if block == nil { 690 continue 691 } 692 // Short circuit when receiving duplicate result caused by resubmitting. 693 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 694 continue 695 } 696 var ( 697 sealhash = w.engine.SealHash(block.Header()) 698 hash = block.Hash() 699 ) 700 w.pendingMu.RLock() 701 task, exist := w.pendingTasks[sealhash] 702 w.pendingMu.RUnlock() 703 if !exist { 704 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 705 continue 706 } 707 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 708 var ( 709 receipts = make([]*types.Receipt, len(task.receipts)) 710 logs []*types.Log 711 ) 712 for i, taskReceipt := range task.receipts { 713 receipt := new(types.Receipt) 714 receipts[i] = receipt 715 *receipt = *taskReceipt 716 717 // add block location fields 718 receipt.BlockHash = hash 719 receipt.BlockNumber = block.Number() 720 receipt.TransactionIndex = uint(i) 721 722 // Update the block hash in all logs since it is now available and not when the 723 // receipt/log of individual transactions were created. 724 receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) 725 for i, taskLog := range taskReceipt.Logs { 726 log := new(types.Log) 727 receipt.Logs[i] = log 728 *log = *taskLog 729 log.BlockHash = hash 730 } 731 logs = append(logs, receipt.Logs...) 732 } 733 // Commit block and state to database. 734 _, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) 735 if err != nil { 736 log.Error("Failed writing block to chain", "err", err) 737 continue 738 } 739 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 740 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 741 742 // Broadcast the block and announce chain insertion event 743 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 744 745 // Insert the block into the set of pending ones to resultLoop for confirmations 746 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 747 748 case <-w.exitCh: 749 return 750 } 751 } 752 } 753 754 // makeEnv creates a new environment for the sealing block. 755 func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) { 756 // Retrieve the parent state to execute on top and start a prefetcher for 757 // the miner to speed block sealing up a bit. 758 state, err := w.chain.StateAt(parent.Root()) 759 if err != nil { 760 // Note since the sealing block can be created upon the arbitrary parent 761 // block, but the state of parent block may already be pruned, so the necessary 762 // state recovery is needed here in the future. 763 // 764 // The maximum acceptable reorg depth can be limited by the finalised block 765 // somehow. TODO(rjl493456442) fix the hard-coded number here later. 766 state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false) 767 log.Warn("Recovered mining state", "root", parent.Root(), "err", err) 768 } 769 if err != nil { 770 return nil, err 771 } 772 state.StartPrefetcher("miner") 773 774 // Note the passed coinbase may be different with header.Coinbase. 775 env := &environment{ 776 signer: types.MakeSigner(w.chainConfig, header.Number), 777 state: state, 778 coinbase: coinbase, 779 ancestors: mapset.NewSet(), 780 family: mapset.NewSet(), 781 header: header, 782 uncles: make(map[common.Hash]*types.Header), 783 } 784 // when 08 is processed ancestors contain 07 (quick block) 785 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 786 for _, uncle := range ancestor.Uncles() { 787 env.family.Add(uncle.Hash()) 788 } 789 env.family.Add(ancestor.Hash()) 790 env.ancestors.Add(ancestor.Hash()) 791 } 792 // Keep track of transactions which return errors so they can be removed 793 env.tcount = 0 794 return env, nil 795 } 796 797 // commitUncle adds the given block to uncle block set, returns error if failed to add. 798 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 799 if w.isTTDReached(env.header) { 800 return errors.New("ignore uncle for beacon block") 801 } 802 hash := uncle.Hash() 803 if _, exist := env.uncles[hash]; exist { 804 return errors.New("uncle not unique") 805 } 806 if env.header.ParentHash == uncle.ParentHash { 807 return errors.New("uncle is sibling") 808 } 809 if !env.ancestors.Contains(uncle.ParentHash) { 810 return errors.New("uncle's parent unknown") 811 } 812 if env.family.Contains(hash) { 813 return errors.New("uncle already included") 814 } 815 env.uncles[hash] = uncle 816 return nil 817 } 818 819 // updateSnapshot updates pending snapshot block, receipts and state. 820 func (w *worker) updateSnapshot(env *environment) { 821 w.snapshotMu.Lock() 822 defer w.snapshotMu.Unlock() 823 824 w.snapshotBlock = types.NewBlock( 825 env.header, 826 env.txs, 827 env.unclelist(), 828 env.receipts, 829 trie.NewStackTrie(nil), 830 ) 831 w.snapshotReceipts = copyReceipts(env.receipts) 832 w.snapshotState = env.state.Copy() 833 } 834 835 func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { 836 snap := env.state.Snapshot() 837 838 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) 839 if err != nil { 840 env.state.RevertToSnapshot(snap) 841 return nil, err 842 } 843 env.txs = append(env.txs, tx) 844 env.receipts = append(env.receipts, receipt) 845 846 return receipt.Logs, nil 847 } 848 849 func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) error { 850 gasLimit := env.header.GasLimit 851 if env.gasPool == nil { 852 env.gasPool = new(core.GasPool).AddGas(gasLimit) 853 } 854 var coalescedLogs []*types.Log 855 856 for { 857 // In the following three cases, we will interrupt the execution of the transaction. 858 // (1) new head block event arrival, the interrupt signal is 1 859 // (2) worker start or restart, the interrupt signal is 1 860 // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2. 861 // For the first two cases, the semi-finished work will be discarded. 862 // For the third case, the semi-finished work will be submitted to the consensus engine. 863 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 864 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 865 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 866 ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit) 867 if ratio < 0.1 { 868 ratio = 0.1 869 } 870 w.resubmitAdjustCh <- &intervalAdjust{ 871 ratio: ratio, 872 inc: true, 873 } 874 return errBlockInterruptedByRecommit 875 } 876 return errBlockInterruptedByNewHead 877 } 878 // If we don't have enough gas for any further transactions then we're done 879 if env.gasPool.Gas() < params.TxGas { 880 log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) 881 break 882 } 883 // Retrieve the next transaction and abort if all done 884 tx := txs.Peek() 885 if tx == nil { 886 break 887 } 888 // Error may be ignored here. The error has already been checked 889 // during transaction acceptance is the transaction pool. 890 // 891 // We use the eip155 signer regardless of the current hf. 892 from, _ := types.Sender(env.signer, tx) 893 // Check whether the tx is replay protected. If we're not in the EIP155 hf 894 // phase, start ignoring the sender until we do. 895 if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { 896 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 897 898 txs.Pop() 899 continue 900 } 901 // Start executing the transaction 902 env.state.Prepare(tx.Hash(), env.tcount) 903 904 logs, err := w.commitTransaction(env, tx) 905 switch { 906 case errors.Is(err, core.ErrGasLimitReached): 907 // Pop the current out-of-gas transaction without shifting in the next from the account 908 log.Trace("Gas limit exceeded for current block", "sender", from) 909 txs.Pop() 910 911 case errors.Is(err, core.ErrNonceTooLow): 912 // New head notification data race between the transaction pool and miner, shift 913 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 914 txs.Shift() 915 916 case errors.Is(err, core.ErrNonceTooHigh): 917 // Reorg notification data race between the transaction pool and miner, skip account = 918 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 919 txs.Pop() 920 921 case errors.Is(err, nil): 922 // Everything ok, collect the logs and shift in the next transaction from the same account 923 coalescedLogs = append(coalescedLogs, logs...) 924 env.tcount++ 925 txs.Shift() 926 927 case errors.Is(err, core.ErrTxTypeNotSupported): 928 // Pop the unsupported transaction without shifting in the next from the account 929 log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) 930 txs.Pop() 931 932 default: 933 // Strange error, discard the transaction and get the next in line (note, the 934 // nonce-too-high clause will prevent us from executing in vain). 935 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 936 txs.Shift() 937 } 938 } 939 940 if !w.isRunning() && len(coalescedLogs) > 0 { 941 // We don't push the pendingLogsEvent while we are sealing. The reason is that 942 // when we are sealing, the worker will regenerate a sealing block every 3 seconds. 943 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 944 945 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 946 // logs by filling in the block hash when the block was mined by the local miner. This can 947 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 948 cpy := make([]*types.Log, len(coalescedLogs)) 949 for i, l := range coalescedLogs { 950 cpy[i] = new(types.Log) 951 *cpy[i] = *l 952 } 953 w.pendingLogsFeed.Send(cpy) 954 } 955 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 956 // than the user-specified one. 957 if interrupt != nil { 958 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 959 } 960 return nil 961 } 962 963 // generateParams wraps various of settings for generating sealing task. 964 type generateParams struct { 965 timestamp uint64 // The timstamp for sealing task 966 forceTime bool // Flag whether the given timestamp is immutable or not 967 parentHash common.Hash // Parent block hash, empty means the latest chain head 968 coinbase common.Address // The fee recipient address for including transaction 969 random common.Hash // The randomness generated by beacon chain, empty before the merge 970 noUncle bool // Flag whether the uncle block inclusion is allowed 971 noExtra bool // Flag whether the extra field assignment is allowed 972 noTxs bool // Flag whether an empty block without any transaction is expected 973 } 974 975 // prepareWork constructs the sealing task according to the given parameters, 976 // either based on the last chain head or specified parent. In this function 977 // the pending transactions are not filled yet, only the empty task returned. 978 func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { 979 w.mu.RLock() 980 defer w.mu.RUnlock() 981 982 // Find the parent block for sealing task 983 parent := w.chain.CurrentBlock() 984 if genParams.parentHash != (common.Hash{}) { 985 parent = w.chain.GetBlockByHash(genParams.parentHash) 986 } 987 if parent == nil { 988 return nil, fmt.Errorf("missing parent") 989 } 990 // Sanity check the timestamp correctness, recap the timestamp 991 // to parent+1 if the mutation is allowed. 992 timestamp := genParams.timestamp 993 if parent.Time() >= timestamp { 994 if genParams.forceTime { 995 return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp) 996 } 997 timestamp = parent.Time() + 1 998 } 999 // Construct the sealing block header, set the extra field if it's allowed 1000 num := parent.Number() 1001 header := &types.Header{ 1002 ParentHash: parent.Hash(), 1003 Number: num.Add(num, common.Big1), 1004 GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil), 1005 Time: timestamp, 1006 Coinbase: genParams.coinbase, 1007 } 1008 if !genParams.noExtra && len(w.extra) != 0 { 1009 header.Extra = w.extra 1010 } 1011 // Set the randomness field from the beacon chain if it's available. 1012 if genParams.random != (common.Hash{}) { 1013 header.MixDigest = genParams.random 1014 } 1015 // Set baseFee and GasLimit if we are on an EIP-1559 chain 1016 if w.chainConfig.IsLondon(header.Number) { 1017 header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header()) 1018 if !w.chainConfig.IsLondon(parent.Number()) { 1019 parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier 1020 header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) 1021 } 1022 } 1023 // Run the consensus preparation with the default or customized consensus engine. 1024 if err := w.engine.Prepare(w.chain, header); err != nil { 1025 log.Error("Failed to prepare header for sealing", "err", err) 1026 return nil, err 1027 } 1028 // Could potentially happen if starting to mine in an odd state. 1029 // Note genParams.coinbase can be different with header.Coinbase 1030 // since clique algorithm can modify the coinbase field in header. 1031 env, err := w.makeEnv(parent, header, genParams.coinbase) 1032 if err != nil { 1033 log.Error("Failed to create sealing context", "err", err) 1034 return nil, err 1035 } 1036 // Accumulate the uncles for the sealing work only if it's allowed. 1037 if !genParams.noUncle { 1038 commitUncles := func(blocks map[common.Hash]*types.Block) { 1039 for hash, uncle := range blocks { 1040 if len(env.uncles) == 2 { 1041 break 1042 } 1043 if err := w.commitUncle(env, uncle.Header()); err != nil { 1044 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 1045 } else { 1046 log.Debug("Committing new uncle to block", "hash", hash) 1047 } 1048 } 1049 } 1050 // Prefer to locally generated uncle 1051 commitUncles(w.localUncles) 1052 commitUncles(w.remoteUncles) 1053 } 1054 return env, nil 1055 } 1056 1057 // fillTransactions retrieves the pending transactions from the txpool and fills them 1058 // into the given sealing block. The transaction selection and ordering strategy can 1059 // be customized with the plugin in the future. 1060 func (w *worker) fillTransactions(interrupt *int32, env *environment) error { 1061 // Split the pending transactions into locals and remotes 1062 // Fill the block with all available pending transactions. 1063 pending := w.eth.TxPool().Pending(true) 1064 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 1065 for _, account := range w.eth.TxPool().Locals() { 1066 if txs := remoteTxs[account]; len(txs) > 0 { 1067 delete(remoteTxs, account) 1068 localTxs[account] = txs 1069 } 1070 } 1071 if len(localTxs) > 0 { 1072 txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) 1073 if err := w.commitTransactions(env, txs, interrupt); err != nil { 1074 return err 1075 } 1076 } 1077 if len(remoteTxs) > 0 { 1078 txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) 1079 if err := w.commitTransactions(env, txs, interrupt); err != nil { 1080 return err 1081 } 1082 } 1083 return nil 1084 } 1085 1086 // generateWork generates a sealing block based on the given parameters. 1087 func (w *worker) generateWork(params *generateParams) (*types.Block, error) { 1088 work, err := w.prepareWork(params) 1089 if err != nil { 1090 return nil, err 1091 } 1092 defer work.discard() 1093 1094 if !params.noTxs { 1095 w.fillTransactions(nil, work) 1096 } 1097 return w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts) 1098 } 1099 1100 // commitWork generates several new sealing tasks based on the parent block 1101 // and submit them to the sealer. 1102 func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) { 1103 start := time.Now() 1104 1105 // Set the coinbase if the worker is running or it's required 1106 var coinbase common.Address 1107 if w.isRunning() { 1108 if w.coinbase == (common.Address{}) { 1109 log.Error("Refusing to mine without etherbase") 1110 return 1111 } 1112 coinbase = w.coinbase // Use the preset address as the fee recipient 1113 } 1114 work, err := w.prepareWork(&generateParams{ 1115 timestamp: uint64(timestamp), 1116 coinbase: coinbase, 1117 }) 1118 if err != nil { 1119 return 1120 } 1121 // Create an empty block based on temporary copied state for 1122 // sealing in advance without waiting block execution finished. 1123 if !noempty && atomic.LoadUint32(&w.noempty) == 0 { 1124 w.commit(work.copy(), nil, false, start) 1125 } 1126 1127 // Fill pending transactions from the txpool 1128 err = w.fillTransactions(interrupt, work) 1129 if errors.Is(err, errBlockInterruptedByNewHead) { 1130 work.discard() 1131 return 1132 } 1133 w.commit(work.copy(), w.fullTaskHook, true, start) 1134 1135 // Swap out the old work with the new one, terminating any leftover 1136 // prefetcher processes in the mean time and starting a new one. 1137 if w.current != nil { 1138 w.current.discard() 1139 } 1140 w.current = work 1141 } 1142 1143 // commit runs any post-transaction state modifications, assembles the final block 1144 // and commits new work if consensus engine is running. 1145 // Note the assumption is held that the mutation is allowed to the passed env, do 1146 // the deep copy first. 1147 func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error { 1148 if w.isRunning() { 1149 if interval != nil { 1150 interval() 1151 } 1152 // Create a local environment copy, avoid the data race with snapshot state. 1153 // https://github.com/ethereum/go-ethereum/issues/24299 1154 env := env.copy() 1155 block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts) 1156 if err != nil { 1157 return err 1158 } 1159 // If we're post merge, just ignore 1160 if !w.isTTDReached(block.Header()) { 1161 select { 1162 case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: 1163 w.unconfirmed.Shift(block.NumberU64() - 1) 1164 log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1165 "uncles", len(env.uncles), "txs", env.tcount, 1166 "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), 1167 "elapsed", common.PrettyDuration(time.Since(start))) 1168 1169 case <-w.exitCh: 1170 log.Info("Worker has exited") 1171 } 1172 } 1173 } 1174 if update { 1175 w.updateSnapshot(env) 1176 } 1177 return nil 1178 } 1179 1180 // getSealingBlock generates the sealing block based on the given parameters. 1181 // The generation result will be passed back via the given channel no matter 1182 // the generation itself succeeds or not. 1183 func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash, noTxs bool) (chan *types.Block, chan error, error) { 1184 var ( 1185 resCh = make(chan *types.Block, 1) 1186 errCh = make(chan error, 1) 1187 ) 1188 req := &getWorkReq{ 1189 params: &generateParams{ 1190 timestamp: timestamp, 1191 forceTime: true, 1192 parentHash: parent, 1193 coinbase: coinbase, 1194 random: random, 1195 noUncle: true, 1196 noExtra: true, 1197 noTxs: noTxs, 1198 }, 1199 result: resCh, 1200 err: errCh, 1201 } 1202 select { 1203 case w.getWorkCh <- req: 1204 return resCh, errCh, nil 1205 case <-w.exitCh: 1206 return nil, nil, errors.New("miner closed") 1207 } 1208 } 1209 1210 // isTTDReached returns the indicator if the given block has reached the total 1211 // terminal difficulty for The Merge transition. 1212 func (w *worker) isTTDReached(header *types.Header) bool { 1213 td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty 1214 return td != nil && ttd != nil && td.Cmp(ttd) >= 0 1215 } 1216 1217 // copyReceipts makes a deep copy of the given receipts. 1218 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 1219 result := make([]*types.Receipt, len(receipts)) 1220 for i, l := range receipts { 1221 cpy := *l 1222 result[i] = &cpy 1223 } 1224 return result 1225 } 1226 1227 // postSideBlock fires a side chain event, only use it for testing. 1228 func (w *worker) postSideBlock(event core.ChainSideEvent) { 1229 select { 1230 case w.chainSideCh <- event: 1231 case <-w.exitCh: 1232 } 1233 } 1234 1235 // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. 1236 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { 1237 feesWei := new(big.Int) 1238 for i, tx := range block.Transactions() { 1239 minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) 1240 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) 1241 } 1242 return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1243 }