github.com/Debrief-BC/go-debrief@v0.0.0-20200420203408-0c26ca968123/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/Debrief-BC/go-debrief/common" 29 "github.com/Debrief-BC/go-debrief/consensus" 30 "github.com/Debrief-BC/go-debrief/consensus/misc" 31 "github.com/Debrief-BC/go-debrief/core" 32 "github.com/Debrief-BC/go-debrief/core/state" 33 "github.com/Debrief-BC/go-debrief/core/types" 34 "github.com/Debrief-BC/go-debrief/event" 35 "github.com/Debrief-BC/go-debrief/log" 36 "github.com/Debrief-BC/go-debrief/params" 37 ) 38 39 const ( 40 // resultQueueSize is the size of channel listening to sealing result. 41 resultQueueSize = 10 42 43 // txChanSize is the size of channel listening to NewTxsEvent. 44 // The number is referenced from the size of tx pool. 45 txChanSize = 4096 46 47 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 48 chainHeadChanSize = 10 49 50 // chainSideChanSize is the size of channel listening to ChainSideEvent. 51 chainSideChanSize = 10 52 53 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 54 resubmitAdjustChanSize = 10 55 56 // miningLogAtDepth is the number of confirmations before logging successful mining. 57 miningLogAtDepth = 7 58 59 // minRecommitInterval is the minimal time interval to recreate the mining block with 60 // any newly arrived transactions. 61 minRecommitInterval = 1 * time.Second 62 63 // maxRecommitInterval is the maximum time interval to recreate the mining block with 64 // any newly arrived transactions. 65 maxRecommitInterval = 15 * time.Second 66 67 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 68 // resubmitting interval. 69 intervalAdjustRatio = 0.1 70 71 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 72 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 73 intervalAdjustBias = 200 * 1000.0 * 1000.0 74 75 // staleThreshold is the maximum depth of the acceptable stale block. 76 staleThreshold = 7 77 ) 78 79 // environment is the worker's current environment and holds all of the current state information. 80 type environment struct { 81 signer types.Signer 82 83 state *state.StateDB // apply state changes here 84 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 85 family mapset.Set // family set (used for checking uncle invalidity) 86 uncles mapset.Set // uncle set 87 tcount int // tx count in cycle 88 gasPool *core.GasPool // available gas used to pack transactions 89 90 header *types.Header 91 txs []*types.Transaction 92 receipts []*types.Receipt 93 } 94 95 // task contains all information for consensus engine sealing and result submitting. 96 type task struct { 97 receipts []*types.Receipt 98 state *state.StateDB 99 block *types.Block 100 createdAt time.Time 101 } 102 103 const ( 104 commitInterruptNone int32 = iota 105 commitInterruptNewHead 106 commitInterruptResubmit 107 ) 108 109 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 110 type newWorkReq struct { 111 interrupt *int32 112 noempty bool 113 timestamp int64 114 } 115 116 // intervalAdjust represents a resubmitting interval adjustment. 117 type intervalAdjust struct { 118 ratio float64 119 inc bool 120 } 121 122 // worker is the main object which takes care of submitting new work to consensus engine 123 // and gathering the sealing result. 124 type worker struct { 125 config *Config 126 chainConfig *params.ChainConfig 127 engine consensus.Engine 128 eth Backend 129 chain *core.BlockChain 130 131 // Feeds 132 pendingLogsFeed event.Feed 133 134 // Subscriptions 135 mux *event.TypeMux 136 txsCh chan core.NewTxsEvent 137 txsSub event.Subscription 138 chainHeadCh chan core.ChainHeadEvent 139 chainHeadSub event.Subscription 140 chainSideCh chan core.ChainSideEvent 141 chainSideSub event.Subscription 142 143 // Channels 144 newWorkCh chan *newWorkReq 145 taskCh chan *task 146 resultCh chan *types.Block 147 startCh chan struct{} 148 exitCh chan struct{} 149 resubmitIntervalCh chan time.Duration 150 resubmitAdjustCh chan *intervalAdjust 151 152 current *environment // An environment for current running cycle. 153 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 154 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 155 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 156 157 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 158 coinbase common.Address 159 extra []byte 160 161 pendingMu sync.RWMutex 162 pendingTasks map[common.Hash]*task 163 164 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 165 snapshotBlock *types.Block 166 snapshotState *state.StateDB 167 168 // atomic status counters 169 running int32 // The indicator whether the consensus engine is running or not. 170 newTxs int32 // New arrival transaction count since last sealing work submitting. 171 172 // External functions 173 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 174 175 // Test hooks 176 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 177 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 178 fullTaskHook func() // Method to call before pushing the full sealing task. 179 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 180 } 181 182 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { 183 worker := &worker{ 184 config: config, 185 chainConfig: chainConfig, 186 engine: engine, 187 eth: eth, 188 mux: mux, 189 chain: eth.BlockChain(), 190 isLocalBlock: isLocalBlock, 191 localUncles: make(map[common.Hash]*types.Block), 192 remoteUncles: make(map[common.Hash]*types.Block), 193 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 194 pendingTasks: make(map[common.Hash]*task), 195 txsCh: make(chan core.NewTxsEvent, txChanSize), 196 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 197 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 198 newWorkCh: make(chan *newWorkReq), 199 taskCh: make(chan *task), 200 resultCh: make(chan *types.Block, resultQueueSize), 201 exitCh: make(chan struct{}), 202 startCh: make(chan struct{}, 1), 203 resubmitIntervalCh: make(chan time.Duration), 204 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 205 } 206 // Subscribe NewTxsEvent for tx pool 207 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 208 // Subscribe events for blockchain 209 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 210 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 211 212 // Sanitize recommit interval if the user-specified one is too short. 213 recommit := worker.config.Recommit 214 if recommit < minRecommitInterval { 215 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 216 recommit = minRecommitInterval 217 } 218 219 go worker.mainLoop() 220 go worker.newWorkLoop(recommit) 221 go worker.resultLoop() 222 go worker.taskLoop() 223 224 // Submit first work to initialize pending state. 225 if init { 226 worker.startCh <- struct{}{} 227 } 228 return worker 229 } 230 231 // setEtherbase sets the etherbase used to initialize the block coinbase field. 232 func (w *worker) setEtherbase(addr common.Address) { 233 w.mu.Lock() 234 defer w.mu.Unlock() 235 w.coinbase = addr 236 } 237 238 // setExtra sets the content used to initialize the block extra field. 239 func (w *worker) setExtra(extra []byte) { 240 w.mu.Lock() 241 defer w.mu.Unlock() 242 w.extra = extra 243 } 244 245 // setRecommitInterval updates the interval for miner sealing work recommitting. 246 func (w *worker) setRecommitInterval(interval time.Duration) { 247 w.resubmitIntervalCh <- interval 248 } 249 250 // pending returns the pending state and corresponding block. 251 func (w *worker) pending() (*types.Block, *state.StateDB) { 252 // return a snapshot to avoid contention on currentMu mutex 253 w.snapshotMu.RLock() 254 defer w.snapshotMu.RUnlock() 255 if w.snapshotState == nil { 256 return nil, nil 257 } 258 return w.snapshotBlock, w.snapshotState.Copy() 259 } 260 261 // pendingBlock returns pending block. 262 func (w *worker) pendingBlock() *types.Block { 263 // return a snapshot to avoid contention on currentMu mutex 264 w.snapshotMu.RLock() 265 defer w.snapshotMu.RUnlock() 266 return w.snapshotBlock 267 } 268 269 // start sets the running status as 1 and triggers new work submitting. 270 func (w *worker) start() { 271 atomic.StoreInt32(&w.running, 1) 272 w.startCh <- struct{}{} 273 } 274 275 // stop sets the running status as 0. 276 func (w *worker) stop() { 277 atomic.StoreInt32(&w.running, 0) 278 } 279 280 // isRunning returns an indicator whether worker is running or not. 281 func (w *worker) isRunning() bool { 282 return atomic.LoadInt32(&w.running) == 1 283 } 284 285 // close terminates all background threads maintained by the worker. 286 // Note the worker does not support being closed multiple times. 287 func (w *worker) close() { 288 close(w.exitCh) 289 } 290 291 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 292 func (w *worker) newWorkLoop(recommit time.Duration) { 293 var ( 294 interrupt *int32 295 minRecommit = recommit // minimal resubmit interval specified by user. 296 timestamp int64 // timestamp for each round of mining. 297 ) 298 299 timer := time.NewTimer(0) 300 defer timer.Stop() 301 <-timer.C // discard the initial tick 302 303 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 304 commit := func(noempty bool, s int32) { 305 if interrupt != nil { 306 atomic.StoreInt32(interrupt, s) 307 } 308 interrupt = new(int32) 309 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 310 timer.Reset(recommit) 311 atomic.StoreInt32(&w.newTxs, 0) 312 } 313 // recalcRecommit recalculates the resubmitting interval upon feedback. 314 recalcRecommit := func(target float64, inc bool) { 315 var ( 316 prev = float64(recommit.Nanoseconds()) 317 next float64 318 ) 319 if inc { 320 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 321 // Recap if interval is larger than the maximum time interval 322 if next > float64(maxRecommitInterval.Nanoseconds()) { 323 next = float64(maxRecommitInterval.Nanoseconds()) 324 } 325 } else { 326 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 327 // Recap if interval is less than the user specified minimum 328 if next < float64(minRecommit.Nanoseconds()) { 329 next = float64(minRecommit.Nanoseconds()) 330 } 331 } 332 recommit = time.Duration(int64(next)) 333 } 334 // clearPending cleans the stale pending tasks. 335 clearPending := func(number uint64) { 336 w.pendingMu.Lock() 337 for h, t := range w.pendingTasks { 338 if t.block.NumberU64()+staleThreshold <= number { 339 delete(w.pendingTasks, h) 340 } 341 } 342 w.pendingMu.Unlock() 343 } 344 345 for { 346 select { 347 case <-w.startCh: 348 clearPending(w.chain.CurrentBlock().NumberU64()) 349 timestamp = time.Now().Unix() 350 commit(false, commitInterruptNewHead) 351 352 case head := <-w.chainHeadCh: 353 clearPending(head.Block.NumberU64()) 354 timestamp = time.Now().Unix() 355 commit(false, commitInterruptNewHead) 356 357 case <-timer.C: 358 // If mining is running resubmit a new work cycle periodically to pull in 359 // higher priced transactions. Disable this overhead for pending blocks. 360 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 361 // Short circuit if no new transaction arrives. 362 if atomic.LoadInt32(&w.newTxs) == 0 { 363 timer.Reset(recommit) 364 continue 365 } 366 commit(true, commitInterruptResubmit) 367 } 368 369 case interval := <-w.resubmitIntervalCh: 370 // Adjust resubmit interval explicitly by user. 371 if interval < minRecommitInterval { 372 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 373 interval = minRecommitInterval 374 } 375 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 376 minRecommit, recommit = interval, interval 377 378 if w.resubmitHook != nil { 379 w.resubmitHook(minRecommit, recommit) 380 } 381 382 case adjust := <-w.resubmitAdjustCh: 383 // Adjust resubmit interval by feedback. 384 if adjust.inc { 385 before := recommit 386 recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true) 387 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 388 } else { 389 before := recommit 390 recalcRecommit(float64(minRecommit.Nanoseconds()), false) 391 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 392 } 393 394 if w.resubmitHook != nil { 395 w.resubmitHook(minRecommit, recommit) 396 } 397 398 case <-w.exitCh: 399 return 400 } 401 } 402 } 403 404 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 405 func (w *worker) mainLoop() { 406 defer w.txsSub.Unsubscribe() 407 defer w.chainHeadSub.Unsubscribe() 408 defer w.chainSideSub.Unsubscribe() 409 410 for { 411 select { 412 case req := <-w.newWorkCh: 413 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 414 415 case ev := <-w.chainSideCh: 416 // Short circuit for duplicate side blocks 417 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 418 continue 419 } 420 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 421 continue 422 } 423 // Add side block to possible uncle block set depending on the author. 424 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 425 w.localUncles[ev.Block.Hash()] = ev.Block 426 } else { 427 w.remoteUncles[ev.Block.Hash()] = ev.Block 428 } 429 // If our mining block contains less than 2 uncle blocks, 430 // add the new uncle block if valid and regenerate a mining block. 431 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 432 start := time.Now() 433 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 434 var uncles []*types.Header 435 w.current.uncles.Each(func(item interface{}) bool { 436 hash, ok := item.(common.Hash) 437 if !ok { 438 return false 439 } 440 uncle, exist := w.localUncles[hash] 441 if !exist { 442 uncle, exist = w.remoteUncles[hash] 443 } 444 if !exist { 445 return false 446 } 447 uncles = append(uncles, uncle.Header()) 448 return false 449 }) 450 w.commit(uncles, nil, true, start) 451 } 452 } 453 454 case ev := <-w.txsCh: 455 // Apply transactions to the pending state if we're not mining. 456 // 457 // Note all transactions received may not be continuous with transactions 458 // already included in the current mining block. These transactions will 459 // be automatically eliminated. 460 if !w.isRunning() && w.current != nil { 461 // If block is already full, abort 462 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 463 continue 464 } 465 w.mu.RLock() 466 coinbase := w.coinbase 467 w.mu.RUnlock() 468 469 txs := make(map[common.Address]types.Transactions) 470 for _, tx := range ev.Txs { 471 acc, _ := types.Sender(w.current.signer, tx) 472 txs[acc] = append(txs[acc], tx) 473 } 474 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 475 tcount := w.current.tcount 476 w.commitTransactions(txset, coinbase, nil) 477 // Only update the snapshot if any new transactons were added 478 // to the pending block 479 if tcount != w.current.tcount { 480 w.updateSnapshot() 481 } 482 } else { 483 // If clique is running in dev mode(period is 0), disable 484 // advance sealing here. 485 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 486 w.commitNewWork(nil, true, time.Now().Unix()) 487 } 488 } 489 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 490 491 // System stopped 492 case <-w.exitCh: 493 return 494 case <-w.txsSub.Err(): 495 return 496 case <-w.chainHeadSub.Err(): 497 return 498 case <-w.chainSideSub.Err(): 499 return 500 } 501 } 502 } 503 504 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 505 // push them to consensus engine. 506 func (w *worker) taskLoop() { 507 var ( 508 stopCh chan struct{} 509 prev common.Hash 510 ) 511 512 // interrupt aborts the in-flight sealing task. 513 interrupt := func() { 514 if stopCh != nil { 515 close(stopCh) 516 stopCh = nil 517 } 518 } 519 for { 520 select { 521 case task := <-w.taskCh: 522 if w.newTaskHook != nil { 523 w.newTaskHook(task) 524 } 525 // Reject duplicate sealing work due to resubmitting. 526 sealHash := w.engine.SealHash(task.block.Header()) 527 if sealHash == prev { 528 continue 529 } 530 // Interrupt previous sealing operation 531 interrupt() 532 stopCh, prev = make(chan struct{}), sealHash 533 534 if w.skipSealHook != nil && w.skipSealHook(task) { 535 continue 536 } 537 w.pendingMu.Lock() 538 w.pendingTasks[w.engine.SealHash(task.block.Header())] = task 539 w.pendingMu.Unlock() 540 541 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 542 log.Warn("Block sealing failed", "err", err) 543 } 544 case <-w.exitCh: 545 interrupt() 546 return 547 } 548 } 549 } 550 551 // resultLoop is a standalone goroutine to handle sealing result submitting 552 // and flush relative data to the database. 553 func (w *worker) resultLoop() { 554 for { 555 select { 556 case block := <-w.resultCh: 557 // Short circuit when receiving empty result. 558 if block == nil { 559 continue 560 } 561 // Short circuit when receiving duplicate result caused by resubmitting. 562 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 563 continue 564 } 565 var ( 566 sealhash = w.engine.SealHash(block.Header()) 567 hash = block.Hash() 568 ) 569 w.pendingMu.RLock() 570 task, exist := w.pendingTasks[sealhash] 571 w.pendingMu.RUnlock() 572 if !exist { 573 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 574 continue 575 } 576 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 577 var ( 578 receipts = make([]*types.Receipt, len(task.receipts)) 579 logs []*types.Log 580 ) 581 for i, receipt := range task.receipts { 582 // add block location fields 583 receipt.BlockHash = hash 584 receipt.BlockNumber = block.Number() 585 receipt.TransactionIndex = uint(i) 586 587 receipts[i] = new(types.Receipt) 588 *receipts[i] = *receipt 589 // Update the block hash in all logs since it is now available and not when the 590 // receipt/log of individual transactions were created. 591 for _, log := range receipt.Logs { 592 log.BlockHash = hash 593 } 594 logs = append(logs, receipt.Logs...) 595 } 596 // Commit block and state to database. 597 _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) 598 if err != nil { 599 log.Error("Failed writing block to chain", "err", err) 600 continue 601 } 602 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 603 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 604 605 // Broadcast the block and announce chain insertion event 606 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 607 608 // Insert the block into the set of pending ones to resultLoop for confirmations 609 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 610 611 case <-w.exitCh: 612 return 613 } 614 } 615 } 616 617 // makeCurrent creates a new environment for the current cycle. 618 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 619 state, err := w.chain.StateAt(parent.Root()) 620 if err != nil { 621 return err 622 } 623 env := &environment{ 624 signer: types.NewEIP155Signer(w.chainConfig.ChainID), 625 state: state, 626 ancestors: mapset.NewSet(), 627 family: mapset.NewSet(), 628 uncles: mapset.NewSet(), 629 header: header, 630 } 631 632 // when 08 is processed ancestors contain 07 (quick block) 633 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 634 for _, uncle := range ancestor.Uncles() { 635 env.family.Add(uncle.Hash()) 636 } 637 env.family.Add(ancestor.Hash()) 638 env.ancestors.Add(ancestor.Hash()) 639 } 640 641 // Keep track of transactions which return errors so they can be removed 642 env.tcount = 0 643 w.current = env 644 return nil 645 } 646 647 // commitUncle adds the given block to uncle block set, returns error if failed to add. 648 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 649 hash := uncle.Hash() 650 if env.uncles.Contains(hash) { 651 return errors.New("uncle not unique") 652 } 653 if env.header.ParentHash == uncle.ParentHash { 654 return errors.New("uncle is sibling") 655 } 656 if !env.ancestors.Contains(uncle.ParentHash) { 657 return errors.New("uncle's parent unknown") 658 } 659 if env.family.Contains(hash) { 660 return errors.New("uncle already included") 661 } 662 env.uncles.Add(uncle.Hash()) 663 return nil 664 } 665 666 // updateSnapshot updates pending snapshot block and state. 667 // Note this function assumes the current variable is thread safe. 668 func (w *worker) updateSnapshot() { 669 w.snapshotMu.Lock() 670 defer w.snapshotMu.Unlock() 671 672 var uncles []*types.Header 673 w.current.uncles.Each(func(item interface{}) bool { 674 hash, ok := item.(common.Hash) 675 if !ok { 676 return false 677 } 678 uncle, exist := w.localUncles[hash] 679 if !exist { 680 uncle, exist = w.remoteUncles[hash] 681 } 682 if !exist { 683 return false 684 } 685 uncles = append(uncles, uncle.Header()) 686 return false 687 }) 688 689 w.snapshotBlock = types.NewBlock( 690 w.current.header, 691 w.current.txs, 692 uncles, 693 w.current.receipts, 694 ) 695 696 w.snapshotState = w.current.state.Copy() 697 } 698 699 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 700 snap := w.current.state.Snapshot() 701 702 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) 703 if err != nil { 704 w.current.state.RevertToSnapshot(snap) 705 return nil, err 706 } 707 w.current.txs = append(w.current.txs, tx) 708 w.current.receipts = append(w.current.receipts, receipt) 709 710 return receipt.Logs, nil 711 } 712 713 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 714 // Short circuit if current is nil 715 if w.current == nil { 716 return true 717 } 718 719 if w.current.gasPool == nil { 720 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 721 } 722 723 var coalescedLogs []*types.Log 724 725 for { 726 // In the following three cases, we will interrupt the execution of the transaction. 727 // (1) new head block event arrival, the interrupt signal is 1 728 // (2) worker start or restart, the interrupt signal is 1 729 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 730 // For the first two cases, the semi-finished work will be discarded. 731 // For the third case, the semi-finished work will be submitted to the consensus engine. 732 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 733 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 734 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 735 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 736 if ratio < 0.1 { 737 ratio = 0.1 738 } 739 w.resubmitAdjustCh <- &intervalAdjust{ 740 ratio: ratio, 741 inc: true, 742 } 743 } 744 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 745 } 746 // If we don't have enough gas for any further transactions then we're done 747 if w.current.gasPool.Gas() < params.TxGas { 748 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 749 break 750 } 751 // Retrieve the next transaction and abort if all done 752 tx := txs.Peek() 753 if tx == nil { 754 break 755 } 756 // Error may be ignored here. The error has already been checked 757 // during transaction acceptance is the transaction pool. 758 // 759 // We use the eip155 signer regardless of the current hf. 760 from, _ := types.Sender(w.current.signer, tx) 761 // Check whether the tx is replay protected. If we're not in the EIP155 hf 762 // phase, start ignoring the sender until we do. 763 if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) { 764 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 765 766 txs.Pop() 767 continue 768 } 769 // Start executing the transaction 770 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 771 772 logs, err := w.commitTransaction(tx, coinbase) 773 switch err { 774 case core.ErrGasLimitReached: 775 // Pop the current out-of-gas transaction without shifting in the next from the account 776 log.Trace("Gas limit exceeded for current block", "sender", from) 777 txs.Pop() 778 779 case core.ErrNonceTooLow: 780 // New head notification data race between the transaction pool and miner, shift 781 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 782 txs.Shift() 783 784 case core.ErrNonceTooHigh: 785 // Reorg notification data race between the transaction pool and miner, skip account = 786 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 787 txs.Pop() 788 789 case nil: 790 // Everything ok, collect the logs and shift in the next transaction from the same account 791 coalescedLogs = append(coalescedLogs, logs...) 792 w.current.tcount++ 793 txs.Shift() 794 795 default: 796 // Strange error, discard the transaction and get the next in line (note, the 797 // nonce-too-high clause will prevent us from executing in vain). 798 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 799 txs.Shift() 800 } 801 } 802 803 if !w.isRunning() && len(coalescedLogs) > 0 { 804 // We don't push the pendingLogsEvent while we are mining. The reason is that 805 // when we are mining, the worker will regenerate a mining block every 3 seconds. 806 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 807 808 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 809 // logs by filling in the block hash when the block was mined by the local miner. This can 810 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 811 cpy := make([]*types.Log, len(coalescedLogs)) 812 for i, l := range coalescedLogs { 813 cpy[i] = new(types.Log) 814 *cpy[i] = *l 815 } 816 w.pendingLogsFeed.Send(cpy) 817 } 818 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 819 // than the user-specified one. 820 if interrupt != nil { 821 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 822 } 823 return false 824 } 825 826 // commitNewWork generates several new sealing tasks based on the parent block. 827 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 828 w.mu.RLock() 829 defer w.mu.RUnlock() 830 831 tstart := time.Now() 832 parent := w.chain.CurrentBlock() 833 834 if parent.Time() >= uint64(timestamp) { 835 timestamp = int64(parent.Time() + 1) 836 } 837 // this will ensure we're not going off too far in the future 838 if now := time.Now().Unix(); timestamp > now+1 { 839 wait := time.Duration(timestamp-now) * time.Second 840 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 841 time.Sleep(wait) 842 } 843 844 num := parent.Number() 845 header := &types.Header{ 846 ParentHash: parent.Hash(), 847 Number: num.Add(num, common.Big1), 848 GasLimit: core.CalcGasLimit(parent, w.config.GasFloor, w.config.GasCeil), 849 Extra: w.extra, 850 Time: uint64(timestamp), 851 } 852 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 853 if w.isRunning() { 854 if w.coinbase == (common.Address{}) { 855 log.Error("Refusing to mine without etherbase") 856 return 857 } 858 header.Coinbase = w.coinbase 859 } 860 if err := w.engine.Prepare(w.chain, header); err != nil { 861 log.Error("Failed to prepare header for mining", "err", err) 862 return 863 } 864 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 865 if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { 866 // Check whether the block is among the fork extra-override range 867 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 868 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 869 // Depending whether we support or oppose the fork, override differently 870 if w.chainConfig.DAOForkSupport { 871 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 872 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 873 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 874 } 875 } 876 } 877 // Could potentially happen if starting to mine in an odd state. 878 err := w.makeCurrent(parent, header) 879 if err != nil { 880 log.Error("Failed to create mining context", "err", err) 881 return 882 } 883 // Create the current work task and check any fork transitions needed 884 env := w.current 885 if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { 886 misc.ApplyDAOHardFork(env.state) 887 } 888 // Accumulate the uncles for the current block 889 uncles := make([]*types.Header, 0, 2) 890 commitUncles := func(blocks map[common.Hash]*types.Block) { 891 // Clean up stale uncle blocks first 892 for hash, uncle := range blocks { 893 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 894 delete(blocks, hash) 895 } 896 } 897 for hash, uncle := range blocks { 898 if len(uncles) == 2 { 899 break 900 } 901 if err := w.commitUncle(env, uncle.Header()); err != nil { 902 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 903 } else { 904 log.Debug("Committing new uncle to block", "hash", hash) 905 uncles = append(uncles, uncle.Header()) 906 } 907 } 908 } 909 // Prefer to locally generated uncle 910 commitUncles(w.localUncles) 911 commitUncles(w.remoteUncles) 912 913 if !noempty { 914 // Create an empty block based on temporary copied state for sealing in advance without waiting block 915 // execution finished. 916 w.commit(uncles, nil, false, tstart) 917 } 918 919 // Fill the block with all available pending transactions. 920 pending, err := w.eth.TxPool().Pending() 921 if err != nil { 922 log.Error("Failed to fetch pending transactions", "err", err) 923 return 924 } 925 // Short circuit if there is no available pending transactions 926 if len(pending) == 0 { 927 w.updateSnapshot() 928 return 929 } 930 // Split the pending transactions into locals and remotes 931 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 932 for _, account := range w.eth.TxPool().Locals() { 933 if txs := remoteTxs[account]; len(txs) > 0 { 934 delete(remoteTxs, account) 935 localTxs[account] = txs 936 } 937 } 938 if len(localTxs) > 0 { 939 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 940 if w.commitTransactions(txs, w.coinbase, interrupt) { 941 return 942 } 943 } 944 if len(remoteTxs) > 0 { 945 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 946 if w.commitTransactions(txs, w.coinbase, interrupt) { 947 return 948 } 949 } 950 w.commit(uncles, w.fullTaskHook, true, tstart) 951 } 952 953 // commit runs any post-transaction state modifications, assembles the final block 954 // and commits new work if consensus engine is running. 955 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 956 // Deep copy receipts here to avoid interaction between different tasks. 957 receipts := make([]*types.Receipt, len(w.current.receipts)) 958 for i, l := range w.current.receipts { 959 receipts[i] = new(types.Receipt) 960 *receipts[i] = *l 961 } 962 s := w.current.state.Copy() 963 block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts) 964 if err != nil { 965 return err 966 } 967 if w.isRunning() { 968 if interval != nil { 969 interval() 970 } 971 select { 972 case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: 973 w.unconfirmed.Shift(block.NumberU64() - 1) 974 975 feesWei := new(big.Int) 976 for i, tx := range block.Transactions() { 977 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 978 } 979 feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 980 981 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 982 "uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start))) 983 984 case <-w.exitCh: 985 log.Info("Worker has exited") 986 } 987 } 988 if update { 989 w.updateSnapshot() 990 } 991 return nil 992 } 993 994 // postSideBlock fires a side chain event, only use it for testing. 995 func (w *worker) postSideBlock(event core.ChainSideEvent) { 996 select { 997 case w.chainSideCh <- event: 998 case <-w.exitCh: 999 } 1000 }