github.com/cryptogateway/go-paymex@v0.0.0-20210204174735-96277fb1e602/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/cryptogateway/go-paymex/common" 29 "github.com/cryptogateway/go-paymex/consensus" 30 "github.com/cryptogateway/go-paymex/consensus/misc" 31 "github.com/cryptogateway/go-paymex/core" 32 "github.com/cryptogateway/go-paymex/core/state" 33 "github.com/cryptogateway/go-paymex/core/types" 34 "github.com/cryptogateway/go-paymex/event" 35 "github.com/cryptogateway/go-paymex/log" 36 "github.com/cryptogateway/go-paymex/params" 37 "github.com/cryptogateway/go-paymex/trie" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // miningLogAtDepth is the number of confirmations before logging successful mining. 58 miningLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the mining block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the mining block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 // environment is the worker's current environment and holds all of the current state information. 81 type environment struct { 82 signer types.Signer 83 84 state *state.StateDB // apply state changes here 85 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 86 family mapset.Set // family set (used for checking uncle invalidity) 87 uncles mapset.Set // uncle set 88 tcount int // tx count in cycle 89 gasPool *core.GasPool // available gas used to pack transactions 90 91 header *types.Header 92 txs []*types.Transaction 93 receipts []*types.Receipt 94 } 95 96 // task contains all information for consensus engine sealing and result submitting. 97 type task struct { 98 receipts []*types.Receipt 99 state *state.StateDB 100 block *types.Block 101 createdAt time.Time 102 } 103 104 const ( 105 commitInterruptNone int32 = iota 106 commitInterruptNewHead 107 commitInterruptResubmit 108 ) 109 110 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 111 type newWorkReq struct { 112 interrupt *int32 113 noempty bool 114 timestamp int64 115 } 116 117 // intervalAdjust represents a resubmitting interval adjustment. 118 type intervalAdjust struct { 119 ratio float64 120 inc bool 121 } 122 123 // worker is the main object which takes care of submitting new work to consensus engine 124 // and gathering the sealing result. 125 type worker struct { 126 config *Config 127 chainConfig *params.ChainConfig 128 engine consensus.Engine 129 eth Backend 130 chain *core.BlockChain 131 132 // Feeds 133 pendingLogsFeed event.Feed 134 135 // Subscriptions 136 mux *event.TypeMux 137 txsCh chan core.NewTxsEvent 138 txsSub event.Subscription 139 chainHeadCh chan core.ChainHeadEvent 140 chainHeadSub event.Subscription 141 chainSideCh chan core.ChainSideEvent 142 chainSideSub event.Subscription 143 144 // Channels 145 newWorkCh chan *newWorkReq 146 taskCh chan *task 147 resultCh chan *types.Block 148 startCh chan struct{} 149 exitCh chan struct{} 150 resubmitIntervalCh chan time.Duration 151 resubmitAdjustCh chan *intervalAdjust 152 153 current *environment // An environment for current running cycle. 154 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 155 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 156 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 157 158 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 159 coinbase common.Address 160 extra []byte 161 162 pendingMu sync.RWMutex 163 pendingTasks map[common.Hash]*task 164 165 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 166 snapshotBlock *types.Block 167 snapshotState *state.StateDB 168 169 // atomic status counters 170 running int32 // The indicator whether the consensus engine is running or not. 171 newTxs int32 // New arrival transaction count since last sealing work submitting. 172 173 // noempty is the flag used to control whether the feature of pre-seal empty 174 // block is enabled. The default value is false(pre-seal is enabled by default). 175 // But in some special scenario the consensus engine will seal blocks instantaneously, 176 // in this case this feature will add all empty blocks into canonical chain 177 // non-stop and no real transaction will be included. 178 noempty uint32 179 180 // External functions 181 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 182 183 // Test hooks 184 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 185 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 186 fullTaskHook func() // Method to call before pushing the full sealing task. 187 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 188 } 189 190 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { 191 worker := &worker{ 192 config: config, 193 chainConfig: chainConfig, 194 engine: engine, 195 eth: eth, 196 mux: mux, 197 chain: eth.BlockChain(), 198 isLocalBlock: isLocalBlock, 199 localUncles: make(map[common.Hash]*types.Block), 200 remoteUncles: make(map[common.Hash]*types.Block), 201 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 202 pendingTasks: make(map[common.Hash]*task), 203 txsCh: make(chan core.NewTxsEvent, txChanSize), 204 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 205 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 206 newWorkCh: make(chan *newWorkReq), 207 taskCh: make(chan *task), 208 resultCh: make(chan *types.Block, resultQueueSize), 209 exitCh: make(chan struct{}), 210 startCh: make(chan struct{}, 1), 211 resubmitIntervalCh: make(chan time.Duration), 212 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 213 } 214 // Subscribe NewTxsEvent for tx pool 215 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 216 // Subscribe events for blockchain 217 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 218 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 219 220 // Sanitize recommit interval if the user-specified one is too short. 221 recommit := worker.config.Recommit 222 if recommit < minRecommitInterval { 223 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 224 recommit = minRecommitInterval 225 } 226 227 go worker.mainLoop() 228 go worker.newWorkLoop(recommit) 229 go worker.resultLoop() 230 go worker.taskLoop() 231 232 // Submit first work to initialize pending state. 233 if init { 234 worker.startCh <- struct{}{} 235 } 236 return worker 237 } 238 239 // setEtherbase sets the etherbase used to initialize the block coinbase field. 240 func (w *worker) setEtherbase(addr common.Address) { 241 w.mu.Lock() 242 defer w.mu.Unlock() 243 w.coinbase = addr 244 } 245 246 // setExtra sets the content used to initialize the block extra field. 247 func (w *worker) setExtra(extra []byte) { 248 w.mu.Lock() 249 defer w.mu.Unlock() 250 w.extra = extra 251 } 252 253 // setRecommitInterval updates the interval for miner sealing work recommitting. 254 func (w *worker) setRecommitInterval(interval time.Duration) { 255 w.resubmitIntervalCh <- interval 256 } 257 258 // disablePreseal disables pre-sealing mining feature 259 func (w *worker) disablePreseal() { 260 atomic.StoreUint32(&w.noempty, 1) 261 } 262 263 // enablePreseal enables pre-sealing mining feature 264 func (w *worker) enablePreseal() { 265 atomic.StoreUint32(&w.noempty, 0) 266 } 267 268 // pending returns the pending state and corresponding block. 269 func (w *worker) pending() (*types.Block, *state.StateDB) { 270 // return a snapshot to avoid contention on currentMu mutex 271 w.snapshotMu.RLock() 272 defer w.snapshotMu.RUnlock() 273 if w.snapshotState == nil { 274 return nil, nil 275 } 276 return w.snapshotBlock, w.snapshotState.Copy() 277 } 278 279 // pendingBlock returns pending block. 280 func (w *worker) pendingBlock() *types.Block { 281 // return a snapshot to avoid contention on currentMu mutex 282 w.snapshotMu.RLock() 283 defer w.snapshotMu.RUnlock() 284 return w.snapshotBlock 285 } 286 287 // start sets the running status as 1 and triggers new work submitting. 288 func (w *worker) start() { 289 atomic.StoreInt32(&w.running, 1) 290 w.startCh <- struct{}{} 291 } 292 293 // stop sets the running status as 0. 294 func (w *worker) stop() { 295 atomic.StoreInt32(&w.running, 0) 296 } 297 298 // isRunning returns an indicator whether worker is running or not. 299 func (w *worker) isRunning() bool { 300 return atomic.LoadInt32(&w.running) == 1 301 } 302 303 // close terminates all background threads maintained by the worker. 304 // Note the worker does not support being closed multiple times. 305 func (w *worker) close() { 306 if w.current != nil && w.current.state != nil { 307 w.current.state.StopPrefetcher() 308 } 309 atomic.StoreInt32(&w.running, 0) 310 close(w.exitCh) 311 } 312 313 // recalcRecommit recalculates the resubmitting interval upon feedback. 314 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 315 var ( 316 prevF = float64(prev.Nanoseconds()) 317 next float64 318 ) 319 if inc { 320 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 321 max := float64(maxRecommitInterval.Nanoseconds()) 322 if next > max { 323 next = max 324 } 325 } else { 326 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 327 min := float64(minRecommit.Nanoseconds()) 328 if next < min { 329 next = min 330 } 331 } 332 return time.Duration(int64(next)) 333 } 334 335 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 336 func (w *worker) newWorkLoop(recommit time.Duration) { 337 var ( 338 interrupt *int32 339 minRecommit = recommit // minimal resubmit interval specified by user. 340 timestamp int64 // timestamp for each round of mining. 341 ) 342 343 timer := time.NewTimer(0) 344 defer timer.Stop() 345 <-timer.C // discard the initial tick 346 347 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 348 commit := func(noempty bool, s int32) { 349 if interrupt != nil { 350 atomic.StoreInt32(interrupt, s) 351 } 352 interrupt = new(int32) 353 select { 354 case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: 355 case <-w.exitCh: 356 return 357 } 358 timer.Reset(recommit) 359 atomic.StoreInt32(&w.newTxs, 0) 360 } 361 // clearPending cleans the stale pending tasks. 362 clearPending := func(number uint64) { 363 w.pendingMu.Lock() 364 for h, t := range w.pendingTasks { 365 if t.block.NumberU64()+staleThreshold <= number { 366 delete(w.pendingTasks, h) 367 } 368 } 369 w.pendingMu.Unlock() 370 } 371 372 for { 373 select { 374 case <-w.startCh: 375 clearPending(w.chain.CurrentBlock().NumberU64()) 376 timestamp = time.Now().Unix() 377 commit(false, commitInterruptNewHead) 378 379 case head := <-w.chainHeadCh: 380 clearPending(head.Block.NumberU64()) 381 timestamp = time.Now().Unix() 382 commit(false, commitInterruptNewHead) 383 384 case <-timer.C: 385 // If mining is running resubmit a new work cycle periodically to pull in 386 // higher priced transactions. Disable this overhead for pending blocks. 387 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 388 // Short circuit if no new transaction arrives. 389 if atomic.LoadInt32(&w.newTxs) == 0 { 390 timer.Reset(recommit) 391 continue 392 } 393 commit(true, commitInterruptResubmit) 394 } 395 396 case interval := <-w.resubmitIntervalCh: 397 // Adjust resubmit interval explicitly by user. 398 if interval < minRecommitInterval { 399 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 400 interval = minRecommitInterval 401 } 402 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 403 minRecommit, recommit = interval, interval 404 405 if w.resubmitHook != nil { 406 w.resubmitHook(minRecommit, recommit) 407 } 408 409 case adjust := <-w.resubmitAdjustCh: 410 // Adjust resubmit interval by feedback. 411 if adjust.inc { 412 before := recommit 413 target := float64(recommit.Nanoseconds()) / adjust.ratio 414 recommit = recalcRecommit(minRecommit, recommit, target, true) 415 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 416 } else { 417 before := recommit 418 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 419 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 420 } 421 422 if w.resubmitHook != nil { 423 w.resubmitHook(minRecommit, recommit) 424 } 425 426 case <-w.exitCh: 427 return 428 } 429 } 430 } 431 432 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 433 func (w *worker) mainLoop() { 434 defer w.txsSub.Unsubscribe() 435 defer w.chainHeadSub.Unsubscribe() 436 defer w.chainSideSub.Unsubscribe() 437 438 for { 439 select { 440 case req := <-w.newWorkCh: 441 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 442 443 case ev := <-w.chainSideCh: 444 // Short circuit for duplicate side blocks 445 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 446 continue 447 } 448 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 449 continue 450 } 451 // Add side block to possible uncle block set depending on the author. 452 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 453 w.localUncles[ev.Block.Hash()] = ev.Block 454 } else { 455 w.remoteUncles[ev.Block.Hash()] = ev.Block 456 } 457 // If our mining block contains less than 2 uncle blocks, 458 // add the new uncle block if valid and regenerate a mining block. 459 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 460 start := time.Now() 461 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 462 var uncles []*types.Header 463 w.current.uncles.Each(func(item interface{}) bool { 464 hash, ok := item.(common.Hash) 465 if !ok { 466 return false 467 } 468 uncle, exist := w.localUncles[hash] 469 if !exist { 470 uncle, exist = w.remoteUncles[hash] 471 } 472 if !exist { 473 return false 474 } 475 uncles = append(uncles, uncle.Header()) 476 return false 477 }) 478 w.commit(uncles, nil, true, start) 479 } 480 } 481 482 case ev := <-w.txsCh: 483 // Apply transactions to the pending state if we're not mining. 484 // 485 // Note all transactions received may not be continuous with transactions 486 // already included in the current mining block. These transactions will 487 // be automatically eliminated. 488 if !w.isRunning() && w.current != nil { 489 // If block is already full, abort 490 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 491 continue 492 } 493 w.mu.RLock() 494 coinbase := w.coinbase 495 w.mu.RUnlock() 496 497 txs := make(map[common.Address]types.Transactions) 498 for _, tx := range ev.Txs { 499 acc, _ := types.Sender(w.current.signer, tx) 500 txs[acc] = append(txs[acc], tx) 501 } 502 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 503 tcount := w.current.tcount 504 w.commitTransactions(txset, coinbase, nil) 505 // Only update the snapshot if any new transactons were added 506 // to the pending block 507 if tcount != w.current.tcount { 508 w.updateSnapshot() 509 } 510 } else { 511 // Special case, if the consensus engine is 0 period clique(dev mode), 512 // submit mining work here since all empty submission will be rejected 513 // by clique. Of course the advance sealing(empty submission) is disabled. 514 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 515 w.commitNewWork(nil, true, time.Now().Unix()) 516 } 517 } 518 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 519 520 // System stopped 521 case <-w.exitCh: 522 return 523 case <-w.txsSub.Err(): 524 return 525 case <-w.chainHeadSub.Err(): 526 return 527 case <-w.chainSideSub.Err(): 528 return 529 } 530 } 531 } 532 533 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 534 // push them to consensus engine. 535 func (w *worker) taskLoop() { 536 var ( 537 stopCh chan struct{} 538 prev common.Hash 539 ) 540 541 // interrupt aborts the in-flight sealing task. 542 interrupt := func() { 543 if stopCh != nil { 544 close(stopCh) 545 stopCh = nil 546 } 547 } 548 for { 549 select { 550 case task := <-w.taskCh: 551 if w.newTaskHook != nil { 552 w.newTaskHook(task) 553 } 554 // Reject duplicate sealing work due to resubmitting. 555 sealHash := w.engine.SealHash(task.block.Header()) 556 if sealHash == prev { 557 continue 558 } 559 // Interrupt previous sealing operation 560 interrupt() 561 stopCh, prev = make(chan struct{}), sealHash 562 563 if w.skipSealHook != nil && w.skipSealHook(task) { 564 continue 565 } 566 w.pendingMu.Lock() 567 w.pendingTasks[sealHash] = task 568 w.pendingMu.Unlock() 569 570 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 571 log.Warn("Block sealing failed", "err", err) 572 } 573 case <-w.exitCh: 574 interrupt() 575 return 576 } 577 } 578 } 579 580 // resultLoop is a standalone goroutine to handle sealing result submitting 581 // and flush relative data to the database. 582 func (w *worker) resultLoop() { 583 for { 584 select { 585 case block := <-w.resultCh: 586 // Short circuit when receiving empty result. 587 if block == nil { 588 continue 589 } 590 // Short circuit when receiving duplicate result caused by resubmitting. 591 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 592 continue 593 } 594 var ( 595 sealhash = w.engine.SealHash(block.Header()) 596 hash = block.Hash() 597 ) 598 w.pendingMu.RLock() 599 task, exist := w.pendingTasks[sealhash] 600 w.pendingMu.RUnlock() 601 if !exist { 602 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 603 continue 604 } 605 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 606 var ( 607 receipts = make([]*types.Receipt, len(task.receipts)) 608 logs []*types.Log 609 ) 610 for i, receipt := range task.receipts { 611 // add block location fields 612 receipt.BlockHash = hash 613 receipt.BlockNumber = block.Number() 614 receipt.TransactionIndex = uint(i) 615 616 receipts[i] = new(types.Receipt) 617 *receipts[i] = *receipt 618 // Update the block hash in all logs since it is now available and not when the 619 // receipt/log of individual transactions were created. 620 for _, log := range receipt.Logs { 621 log.BlockHash = hash 622 } 623 logs = append(logs, receipt.Logs...) 624 } 625 // Commit block and state to database. 626 _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) 627 if err != nil { 628 log.Error("Failed writing block to chain", "err", err) 629 continue 630 } 631 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 632 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 633 634 // Broadcast the block and announce chain insertion event 635 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 636 637 // Insert the block into the set of pending ones to resultLoop for confirmations 638 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 639 640 case <-w.exitCh: 641 return 642 } 643 } 644 } 645 646 // makeCurrent creates a new environment for the current cycle. 647 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 648 // Retrieve the parent state to execute on top and start a prefetcher for 649 // the miner to speed block sealing up a bit 650 state, err := w.chain.StateAt(parent.Root()) 651 if err != nil { 652 return err 653 } 654 state.StartPrefetcher("miner") 655 656 env := &environment{ 657 signer: types.NewEIP155Signer(w.chainConfig.ChainID), 658 state: state, 659 ancestors: mapset.NewSet(), 660 family: mapset.NewSet(), 661 uncles: mapset.NewSet(), 662 header: header, 663 } 664 // when 08 is processed ancestors contain 07 (quick block) 665 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 666 for _, uncle := range ancestor.Uncles() { 667 env.family.Add(uncle.Hash()) 668 } 669 env.family.Add(ancestor.Hash()) 670 env.ancestors.Add(ancestor.Hash()) 671 } 672 // Keep track of transactions which return errors so they can be removed 673 env.tcount = 0 674 675 // Swap out the old work with the new one, terminating any leftover prefetcher 676 // processes in the mean time and starting a new one. 677 if w.current != nil && w.current.state != nil { 678 w.current.state.StopPrefetcher() 679 } 680 w.current = env 681 return nil 682 } 683 684 // commitUncle adds the given block to uncle block set, returns error if failed to add. 685 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 686 hash := uncle.Hash() 687 if env.uncles.Contains(hash) { 688 return errors.New("uncle not unique") 689 } 690 if env.header.ParentHash == uncle.ParentHash { 691 return errors.New("uncle is sibling") 692 } 693 if !env.ancestors.Contains(uncle.ParentHash) { 694 return errors.New("uncle's parent unknown") 695 } 696 if env.family.Contains(hash) { 697 return errors.New("uncle already included") 698 } 699 env.uncles.Add(uncle.Hash()) 700 return nil 701 } 702 703 // updateSnapshot updates pending snapshot block and state. 704 // Note this function assumes the current variable is thread safe. 705 func (w *worker) updateSnapshot() { 706 w.snapshotMu.Lock() 707 defer w.snapshotMu.Unlock() 708 709 var uncles []*types.Header 710 w.current.uncles.Each(func(item interface{}) bool { 711 hash, ok := item.(common.Hash) 712 if !ok { 713 return false 714 } 715 uncle, exist := w.localUncles[hash] 716 if !exist { 717 uncle, exist = w.remoteUncles[hash] 718 } 719 if !exist { 720 return false 721 } 722 uncles = append(uncles, uncle.Header()) 723 return false 724 }) 725 726 w.snapshotBlock = types.NewBlock( 727 w.current.header, 728 w.current.txs, 729 uncles, 730 w.current.receipts, 731 new(trie.Trie), 732 ) 733 w.snapshotState = w.current.state.Copy() 734 } 735 736 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 737 snap := w.current.state.Snapshot() 738 739 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) 740 if err != nil { 741 w.current.state.RevertToSnapshot(snap) 742 return nil, err 743 } 744 w.current.txs = append(w.current.txs, tx) 745 w.current.receipts = append(w.current.receipts, receipt) 746 747 return receipt.Logs, nil 748 } 749 750 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 751 // Short circuit if current is nil 752 if w.current == nil { 753 return true 754 } 755 756 if w.current.gasPool == nil { 757 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 758 } 759 760 var coalescedLogs []*types.Log 761 762 for { 763 // In the following three cases, we will interrupt the execution of the transaction. 764 // (1) new head block event arrival, the interrupt signal is 1 765 // (2) worker start or restart, the interrupt signal is 1 766 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 767 // For the first two cases, the semi-finished work will be discarded. 768 // For the third case, the semi-finished work will be submitted to the consensus engine. 769 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 770 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 771 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 772 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 773 if ratio < 0.1 { 774 ratio = 0.1 775 } 776 w.resubmitAdjustCh <- &intervalAdjust{ 777 ratio: ratio, 778 inc: true, 779 } 780 } 781 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 782 } 783 // If we don't have enough gas for any further transactions then we're done 784 if w.current.gasPool.Gas() < params.TxGas { 785 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 786 break 787 } 788 // Retrieve the next transaction and abort if all done 789 tx := txs.Peek() 790 if tx == nil { 791 break 792 } 793 // Error may be ignored here. The error has already been checked 794 // during transaction acceptance is the transaction pool. 795 // 796 // We use the eip155 signer regardless of the current hf. 797 from, _ := types.Sender(w.current.signer, tx) 798 // Check whether the tx is replay protected. If we're not in the EIP155 hf 799 // phase, start ignoring the sender until we do. 800 if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) { 801 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 802 803 txs.Pop() 804 continue 805 } 806 // Start executing the transaction 807 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 808 809 logs, err := w.commitTransaction(tx, coinbase) 810 switch { 811 case errors.Is(err, core.ErrGasLimitReached): 812 // Pop the current out-of-gas transaction without shifting in the next from the account 813 log.Trace("Gas limit exceeded for current block", "sender", from) 814 txs.Pop() 815 816 case errors.Is(err, core.ErrNonceTooLow): 817 // New head notification data race between the transaction pool and miner, shift 818 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 819 txs.Shift() 820 821 case errors.Is(err, core.ErrNonceTooHigh): 822 // Reorg notification data race between the transaction pool and miner, skip account = 823 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 824 txs.Pop() 825 826 case errors.Is(err, nil): 827 // Everything ok, collect the logs and shift in the next transaction from the same account 828 coalescedLogs = append(coalescedLogs, logs...) 829 w.current.tcount++ 830 txs.Shift() 831 832 default: 833 // Strange error, discard the transaction and get the next in line (note, the 834 // nonce-too-high clause will prevent us from executing in vain). 835 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 836 txs.Shift() 837 } 838 } 839 840 if !w.isRunning() && len(coalescedLogs) > 0 { 841 // We don't push the pendingLogsEvent while we are mining. The reason is that 842 // when we are mining, the worker will regenerate a mining block every 3 seconds. 843 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 844 845 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 846 // logs by filling in the block hash when the block was mined by the local miner. This can 847 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 848 cpy := make([]*types.Log, len(coalescedLogs)) 849 for i, l := range coalescedLogs { 850 cpy[i] = new(types.Log) 851 *cpy[i] = *l 852 } 853 w.pendingLogsFeed.Send(cpy) 854 } 855 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 856 // than the user-specified one. 857 if interrupt != nil { 858 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 859 } 860 return false 861 } 862 863 // commitNewWork generates several new sealing tasks based on the parent block. 864 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 865 w.mu.RLock() 866 defer w.mu.RUnlock() 867 868 tstart := time.Now() 869 parent := w.chain.CurrentBlock() 870 871 if parent.Time() >= uint64(timestamp) { 872 timestamp = int64(parent.Time() + 1) 873 } 874 num := parent.Number() 875 header := &types.Header{ 876 ParentHash: parent.Hash(), 877 Number: num.Add(num, common.Big1), 878 GasLimit: core.CalcGasLimit(parent, w.config.GasFloor, w.config.GasCeil), 879 Extra: w.extra, 880 Time: uint64(timestamp), 881 } 882 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 883 if w.isRunning() { 884 if w.coinbase == (common.Address{}) { 885 log.Error("Refusing to mine without etherbase") 886 return 887 } 888 header.Coinbase = w.coinbase 889 } 890 if err := w.engine.Prepare(w.chain, header); err != nil { 891 log.Error("Failed to prepare header for mining", "err", err) 892 return 893 } 894 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 895 if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { 896 // Check whether the block is among the fork extra-override range 897 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 898 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 899 // Depending whether we support or oppose the fork, override differently 900 if w.chainConfig.DAOForkSupport { 901 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 902 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 903 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 904 } 905 } 906 } 907 // Could potentially happen if starting to mine in an odd state. 908 err := w.makeCurrent(parent, header) 909 if err != nil { 910 log.Error("Failed to create mining context", "err", err) 911 return 912 } 913 // Create the current work task and check any fork transitions needed 914 env := w.current 915 if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { 916 misc.ApplyDAOHardFork(env.state) 917 } 918 // Accumulate the uncles for the current block 919 uncles := make([]*types.Header, 0, 2) 920 commitUncles := func(blocks map[common.Hash]*types.Block) { 921 // Clean up stale uncle blocks first 922 for hash, uncle := range blocks { 923 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 924 delete(blocks, hash) 925 } 926 } 927 for hash, uncle := range blocks { 928 if len(uncles) == 2 { 929 break 930 } 931 if err := w.commitUncle(env, uncle.Header()); err != nil { 932 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 933 } else { 934 log.Debug("Committing new uncle to block", "hash", hash) 935 uncles = append(uncles, uncle.Header()) 936 } 937 } 938 } 939 // Prefer to locally generated uncle 940 commitUncles(w.localUncles) 941 commitUncles(w.remoteUncles) 942 943 // Create an empty block based on temporary copied state for 944 // sealing in advance without waiting block execution finished. 945 if !noempty && atomic.LoadUint32(&w.noempty) == 0 { 946 w.commit(uncles, nil, false, tstart) 947 } 948 949 // Fill the block with all available pending transactions. 950 pending, err := w.eth.TxPool().Pending() 951 if err != nil { 952 log.Error("Failed to fetch pending transactions", "err", err) 953 return 954 } 955 // Short circuit if there is no available pending transactions. 956 // But if we disable empty precommit already, ignore it. Since 957 // empty block is necessary to keep the liveness of the network. 958 if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 { 959 w.updateSnapshot() 960 return 961 } 962 // Split the pending transactions into locals and remotes 963 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 964 for _, account := range w.eth.TxPool().Locals() { 965 if txs := remoteTxs[account]; len(txs) > 0 { 966 delete(remoteTxs, account) 967 localTxs[account] = txs 968 } 969 } 970 if len(localTxs) > 0 { 971 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 972 if w.commitTransactions(txs, w.coinbase, interrupt) { 973 return 974 } 975 } 976 if len(remoteTxs) > 0 { 977 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 978 if w.commitTransactions(txs, w.coinbase, interrupt) { 979 return 980 } 981 } 982 w.commit(uncles, w.fullTaskHook, true, tstart) 983 } 984 985 // commit runs any post-transaction state modifications, assembles the final block 986 // and commits new work if consensus engine is running. 987 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 988 // Deep copy receipts here to avoid interaction between different tasks. 989 receipts := copyReceipts(w.current.receipts) 990 s := w.current.state.Copy() 991 block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts) 992 if err != nil { 993 return err 994 } 995 if w.isRunning() { 996 if interval != nil { 997 interval() 998 } 999 select { 1000 case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: 1001 w.unconfirmed.Shift(block.NumberU64() - 1) 1002 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1003 "uncles", len(uncles), "txs", w.current.tcount, 1004 "gas", block.GasUsed(), "fees", totalFees(block, receipts), 1005 "elapsed", common.PrettyDuration(time.Since(start))) 1006 1007 case <-w.exitCh: 1008 log.Info("Worker has exited") 1009 } 1010 } 1011 if update { 1012 w.updateSnapshot() 1013 } 1014 return nil 1015 } 1016 1017 // copyReceipts makes a deep copy of the given receipts. 1018 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 1019 result := make([]*types.Receipt, len(receipts)) 1020 for i, l := range receipts { 1021 cpy := *l 1022 result[i] = &cpy 1023 } 1024 return result 1025 } 1026 1027 // postSideBlock fires a side chain event, only use it for testing. 1028 func (w *worker) postSideBlock(event core.ChainSideEvent) { 1029 select { 1030 case w.chainSideCh <- event: 1031 case <-w.exitCh: 1032 } 1033 } 1034 1035 // totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order. 1036 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { 1037 feesWei := new(big.Int) 1038 for i, tx := range block.Transactions() { 1039 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 1040 } 1041 return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1042 }