github.com/bencicandrej/quorum@v2.2.6-0.20190909091323-878cab86f711+incompatible/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/consensus" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/state" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/core/vm" 35 "github.com/ethereum/go-ethereum/event" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/params" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // miningLogAtDepth is the number of confirmations before logging successful mining. 58 miningLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the mining block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the mining block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 // environment is the worker's current environment and holds all of the current state information. 81 type environment struct { 82 signer types.Signer 83 84 state *state.StateDB // apply state changes here 85 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 86 family mapset.Set // family set (used for checking uncle invalidity) 87 uncles mapset.Set // uncle set 88 tcount int // tx count in cycle 89 gasPool *core.GasPool // available gas used to pack transactions 90 91 header *types.Header 92 txs []*types.Transaction 93 receipts []*types.Receipt 94 95 privateReceipts []*types.Receipt 96 // Leave this publicState named state, add privateState which most code paths can just ignore 97 privateState *state.StateDB 98 } 99 100 // task contains all information for consensus engine sealing and result submitting. 101 type task struct { 102 receipts []*types.Receipt 103 state *state.StateDB 104 block *types.Block 105 createdAt time.Time 106 107 privateReceipts []*types.Receipt 108 // Leave this publicState named state, add privateState which most code paths can just ignore 109 privateState *state.StateDB 110 } 111 112 const ( 113 commitInterruptNone int32 = iota 114 commitInterruptNewHead 115 commitInterruptResubmit 116 ) 117 118 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 119 type newWorkReq struct { 120 interrupt *int32 121 noempty bool 122 timestamp int64 123 } 124 125 // intervalAdjust represents a resubmitting interval adjustment. 126 type intervalAdjust struct { 127 ratio float64 128 inc bool 129 } 130 131 // worker is the main object which takes care of submitting new work to consensus engine 132 // and gathering the sealing result. 133 type worker struct { 134 config *params.ChainConfig 135 engine consensus.Engine 136 eth Backend 137 chain *core.BlockChain 138 139 gasFloor uint64 140 gasCeil uint64 141 142 // Subscriptions 143 mux *event.TypeMux 144 txsCh chan core.NewTxsEvent 145 txsSub event.Subscription 146 chainHeadCh chan core.ChainHeadEvent 147 chainHeadSub event.Subscription 148 chainSideCh chan core.ChainSideEvent 149 chainSideSub event.Subscription 150 151 // Channels 152 newWorkCh chan *newWorkReq 153 taskCh chan *task 154 resultCh chan *types.Block 155 startCh chan struct{} 156 exitCh chan struct{} 157 resubmitIntervalCh chan time.Duration 158 resubmitAdjustCh chan *intervalAdjust 159 160 current *environment // An environment for current running cycle. 161 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 162 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 163 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 164 165 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 166 coinbase common.Address 167 extra []byte 168 169 pendingMu sync.RWMutex 170 pendingTasks map[common.Hash]*task 171 172 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 173 snapshotBlock *types.Block 174 snapshotState *state.StateDB 175 176 // atomic status counters 177 running int32 // The indicator whether the consensus engine is running or not. 178 newTxs int32 // New arrival transaction count since last sealing work submitting. 179 180 // External functions 181 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 182 183 // Test hooks 184 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 185 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 186 fullTaskHook func() // Method to call before pushing the full sealing task. 187 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 188 } 189 190 func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(*types.Block) bool) *worker { 191 worker := &worker{ 192 config: config, 193 engine: engine, 194 eth: eth, 195 mux: mux, 196 chain: eth.BlockChain(), 197 gasFloor: gasFloor, 198 gasCeil: gasCeil, 199 isLocalBlock: isLocalBlock, 200 localUncles: make(map[common.Hash]*types.Block), 201 remoteUncles: make(map[common.Hash]*types.Block), 202 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 203 pendingTasks: make(map[common.Hash]*task), 204 txsCh: make(chan core.NewTxsEvent, txChanSize), 205 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 206 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 207 newWorkCh: make(chan *newWorkReq), 208 taskCh: make(chan *task), 209 resultCh: make(chan *types.Block, resultQueueSize), 210 exitCh: make(chan struct{}), 211 startCh: make(chan struct{}, 1), 212 resubmitIntervalCh: make(chan time.Duration), 213 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 214 } 215 if _, ok := engine.(consensus.Istanbul); ok || !config.IsQuorum || config.Clique != nil { 216 // Subscribe NewTxsEvent for tx pool 217 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 218 // Subscribe events for blockchain 219 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 220 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 221 222 // Sanitize recommit interval if the user-specified one is too short. 223 if recommit < minRecommitInterval { 224 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 225 recommit = minRecommitInterval 226 } 227 228 go worker.mainLoop() 229 go worker.newWorkLoop(recommit) 230 go worker.resultLoop() 231 go worker.taskLoop() 232 233 // Submit first work to initialize pending state. 234 worker.startCh <- struct{}{} 235 } 236 237 return worker 238 } 239 240 // setEtherbase sets the etherbase used to initialize the block coinbase field. 241 func (w *worker) setEtherbase(addr common.Address) { 242 w.mu.Lock() 243 defer w.mu.Unlock() 244 w.coinbase = addr 245 } 246 247 // setExtra sets the content used to initialize the block extra field. 248 func (w *worker) setExtra(extra []byte) { 249 w.mu.Lock() 250 defer w.mu.Unlock() 251 w.extra = extra 252 } 253 254 // setRecommitInterval updates the interval for miner sealing work recommitting. 255 func (w *worker) setRecommitInterval(interval time.Duration) { 256 w.resubmitIntervalCh <- interval 257 } 258 259 // pending returns the pending state and corresponding block. 260 func (w *worker) pending() (*types.Block, *state.StateDB, *state.StateDB) { 261 // return a snapshot to avoid contention on currentMu mutex 262 w.snapshotMu.RLock() 263 defer w.snapshotMu.RUnlock() 264 if w.snapshotState == nil { 265 return nil, nil, nil 266 } 267 return w.snapshotBlock, w.snapshotState.Copy(), w.current.privateState.Copy() 268 } 269 270 // pendingBlock returns pending block. 271 func (w *worker) pendingBlock() *types.Block { 272 // return a snapshot to avoid contention on currentMu mutex 273 w.snapshotMu.RLock() 274 defer w.snapshotMu.RUnlock() 275 return w.snapshotBlock 276 } 277 278 // start sets the running status as 1 and triggers new work submitting. 279 func (w *worker) start() { 280 atomic.StoreInt32(&w.running, 1) 281 if istanbul, ok := w.engine.(consensus.Istanbul); ok { 282 istanbul.Start(w.chain, w.chain.CurrentBlock, w.chain.HasBadBlock) 283 } 284 w.startCh <- struct{}{} 285 } 286 287 // stop sets the running status as 0. 288 func (w *worker) stop() { 289 if istanbul, ok := w.engine.(consensus.Istanbul); ok { 290 istanbul.Stop() 291 } 292 atomic.StoreInt32(&w.running, 0) 293 } 294 295 // isRunning returns an indicator whether worker is running or not. 296 func (w *worker) isRunning() bool { 297 return atomic.LoadInt32(&w.running) == 1 298 } 299 300 // close terminates all background threads maintained by the worker. 301 // Note the worker does not support being closed multiple times. 302 func (w *worker) close() { 303 close(w.exitCh) 304 } 305 306 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 307 func (w *worker) newWorkLoop(recommit time.Duration) { 308 var ( 309 interrupt *int32 310 minRecommit = recommit // minimal resubmit interval specified by user. 311 timestamp int64 // timestamp for each round of mining. 312 ) 313 314 timer := time.NewTimer(0) 315 <-timer.C // discard the initial tick 316 317 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 318 commit := func(noempty bool, s int32) { 319 if interrupt != nil { 320 atomic.StoreInt32(interrupt, s) 321 } 322 interrupt = new(int32) 323 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 324 timer.Reset(recommit) 325 atomic.StoreInt32(&w.newTxs, 0) 326 } 327 // recalcRecommit recalculates the resubmitting interval upon feedback. 328 recalcRecommit := func(target float64, inc bool) { 329 var ( 330 prev = float64(recommit.Nanoseconds()) 331 next float64 332 ) 333 if inc { 334 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 335 // Recap if interval is larger than the maximum time interval 336 if next > float64(maxRecommitInterval.Nanoseconds()) { 337 next = float64(maxRecommitInterval.Nanoseconds()) 338 } 339 } else { 340 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 341 // Recap if interval is less than the user specified minimum 342 if next < float64(minRecommit.Nanoseconds()) { 343 next = float64(minRecommit.Nanoseconds()) 344 } 345 } 346 recommit = time.Duration(int64(next)) 347 } 348 // clearPending cleans the stale pending tasks. 349 clearPending := func(number uint64) { 350 w.pendingMu.Lock() 351 for h, t := range w.pendingTasks { 352 if t.block.NumberU64()+staleThreshold <= number { 353 delete(w.pendingTasks, h) 354 } 355 } 356 w.pendingMu.Unlock() 357 } 358 359 for { 360 select { 361 case <-w.startCh: 362 clearPending(w.chain.CurrentBlock().NumberU64()) 363 timestamp = time.Now().Unix() 364 commit(false, commitInterruptNewHead) 365 366 case head := <-w.chainHeadCh: 367 if h, ok := w.engine.(consensus.Handler); ok { 368 h.NewChainHead() 369 } 370 clearPending(head.Block.NumberU64()) 371 timestamp = time.Now().Unix() 372 commit(false, commitInterruptNewHead) 373 374 case <-timer.C: 375 // If mining is running resubmit a new work cycle periodically to pull in 376 // higher priced transactions. Disable this overhead for pending blocks. 377 if w.isRunning() && (w.config.Clique == nil || w.config.Clique.Period > 0) { 378 // Short circuit if no new transaction arrives. 379 if atomic.LoadInt32(&w.newTxs) == 0 { 380 timer.Reset(recommit) 381 continue 382 } 383 commit(true, commitInterruptResubmit) 384 } 385 386 case interval := <-w.resubmitIntervalCh: 387 // Adjust resubmit interval explicitly by user. 388 if interval < minRecommitInterval { 389 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 390 interval = minRecommitInterval 391 } 392 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 393 minRecommit, recommit = interval, interval 394 395 if w.resubmitHook != nil { 396 w.resubmitHook(minRecommit, recommit) 397 } 398 399 case adjust := <-w.resubmitAdjustCh: 400 // Adjust resubmit interval by feedback. 401 if adjust.inc { 402 before := recommit 403 recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true) 404 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 405 } else { 406 before := recommit 407 recalcRecommit(float64(minRecommit.Nanoseconds()), false) 408 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 409 } 410 411 if w.resubmitHook != nil { 412 w.resubmitHook(minRecommit, recommit) 413 } 414 415 case <-w.exitCh: 416 return 417 } 418 } 419 } 420 421 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 422 func (w *worker) mainLoop() { 423 defer w.txsSub.Unsubscribe() 424 defer w.chainHeadSub.Unsubscribe() 425 defer w.chainSideSub.Unsubscribe() 426 427 for { 428 select { 429 case req := <-w.newWorkCh: 430 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 431 432 case ev := <-w.chainSideCh: 433 // Short circuit for duplicate side blocks 434 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 435 continue 436 } 437 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 438 continue 439 } 440 // Add side block to possible uncle block set depending on the author. 441 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 442 w.localUncles[ev.Block.Hash()] = ev.Block 443 } else { 444 w.remoteUncles[ev.Block.Hash()] = ev.Block 445 } 446 // If our mining block contains less than 2 uncle blocks, 447 // add the new uncle block if valid and regenerate a mining block. 448 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 449 start := time.Now() 450 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 451 var uncles []*types.Header 452 w.current.uncles.Each(func(item interface{}) bool { 453 hash, ok := item.(common.Hash) 454 if !ok { 455 return false 456 } 457 uncle, exist := w.localUncles[hash] 458 if !exist { 459 uncle, exist = w.remoteUncles[hash] 460 } 461 if !exist { 462 return false 463 } 464 uncles = append(uncles, uncle.Header()) 465 return false 466 }) 467 w.commit(uncles, nil, true, start) 468 } 469 } 470 471 case ev := <-w.txsCh: 472 // Apply transactions to the pending state if we're not mining. 473 // 474 // Note all transactions received may not be continuous with transactions 475 // already included in the current mining block. These transactions will 476 // be automatically eliminated. 477 if !w.isRunning() && w.current != nil { 478 w.mu.RLock() 479 coinbase := w.coinbase 480 w.mu.RUnlock() 481 482 txs := make(map[common.Address]types.Transactions) 483 for _, tx := range ev.Txs { 484 acc, _ := types.Sender(w.current.signer, tx) 485 txs[acc] = append(txs[acc], tx) 486 } 487 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 488 w.commitTransactions(txset, coinbase, nil) 489 w.updateSnapshot() 490 } else { 491 // If we're mining, but nothing is being processed, wake on new transactions 492 if w.config.Clique != nil && w.config.Clique.Period == 0 { 493 w.commitNewWork(nil, false, time.Now().Unix()) 494 } 495 } 496 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 497 498 // System stopped 499 case <-w.exitCh: 500 return 501 case <-w.txsSub.Err(): 502 return 503 case <-w.chainHeadSub.Err(): 504 return 505 case <-w.chainSideSub.Err(): 506 return 507 } 508 } 509 } 510 511 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 512 // push them to consensus engine. 513 func (w *worker) taskLoop() { 514 var ( 515 stopCh chan struct{} 516 prev common.Hash 517 ) 518 519 // interrupt aborts the in-flight sealing task. 520 interrupt := func() { 521 if stopCh != nil { 522 close(stopCh) 523 stopCh = nil 524 } 525 } 526 for { 527 select { 528 case task := <-w.taskCh: 529 if w.newTaskHook != nil { 530 w.newTaskHook(task) 531 } 532 // Reject duplicate sealing work due to resubmitting. 533 sealHash := w.engine.SealHash(task.block.Header()) 534 if sealHash == prev { 535 continue 536 } 537 // Interrupt previous sealing operation 538 interrupt() 539 stopCh, prev = make(chan struct{}), sealHash 540 541 if w.skipSealHook != nil && w.skipSealHook(task) { 542 continue 543 } 544 w.pendingMu.Lock() 545 w.pendingTasks[w.engine.SealHash(task.block.Header())] = task 546 w.pendingMu.Unlock() 547 go w.seal(task.block, stopCh) 548 case <-w.exitCh: 549 interrupt() 550 return 551 } 552 } 553 } 554 555 func (w *worker) seal(b *types.Block, stop <-chan struct{}) { 556 if err := w.engine.Seal(w.chain, b, w.resultCh, stop); err != nil { 557 log.Warn("Block sealing failed", "err", err) 558 } 559 } 560 561 // resultLoop is a standalone goroutine to handle sealing result submitting 562 // and flush relative data to the database. 563 func (w *worker) resultLoop() { 564 for { 565 select { 566 case block := <-w.resultCh: 567 // Short circuit when receiving empty result. 568 if block == nil { 569 continue 570 } 571 // Short circuit when receiving duplicate result caused by resubmitting. 572 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 573 continue 574 } 575 var ( 576 sealhash = w.engine.SealHash(block.Header()) 577 hash = block.Hash() 578 ) 579 w.pendingMu.RLock() 580 task, exist := w.pendingTasks[sealhash] 581 w.pendingMu.RUnlock() 582 if !exist { 583 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 584 continue 585 } 586 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 587 var logs []*types.Log 588 work := w.current 589 590 for _, receipt := range append(work.receipts, work.privateReceipts...) { 591 // Update the block hash in all logs since it is now available and not when the 592 // receipt/log of individual transactions were created. 593 for _, log := range receipt.Logs { 594 log.BlockHash = hash 595 } 596 } 597 598 for _, log := range append(work.state.Logs(), work.privateState.Logs()...) { 599 log.BlockHash = hash 600 } 601 602 // write private transacions 603 privateStateRoot, _ := work.privateState.Commit(w.config.IsEIP158(block.Number())) 604 core.WritePrivateStateRoot(w.eth.ChainDb(), block.Root(), privateStateRoot) 605 allReceipts := mergeReceipts(work.receipts, work.privateReceipts) 606 607 // Commit block and state to database. 608 w.mu.Lock() 609 stat, err := w.chain.WriteBlockWithState(block, allReceipts, work.state, nil) 610 w.mu.Unlock() 611 if err != nil { 612 log.Error("Failed writWriteBlockAndStating block to chain", "err", err) 613 continue 614 } 615 616 if err := core.WritePrivateBlockBloom(w.eth.ChainDb(), block.NumberU64(), work.privateReceipts); err != nil { 617 log.Error("Failed writing private block bloom", "err", err) 618 continue 619 } 620 621 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 622 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 623 624 // Broadcast the block and announce chain insertion event 625 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 626 627 var events []interface{} 628 logs = append(work.state.Logs(), work.privateState.Logs()...) 629 630 switch stat { 631 case core.CanonStatTy: 632 events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 633 events = append(events, core.ChainHeadEvent{Block: block}) 634 case core.SideStatTy: 635 events = append(events, core.ChainSideEvent{Block: block}) 636 } 637 w.chain.PostChainEvents(events, logs) 638 639 // Insert the block into the set of pending ones to resultLoop for confirmations 640 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 641 642 case <-w.exitCh: 643 return 644 } 645 } 646 } 647 648 // Given a slice of public receipts and an overlapping (smaller) slice of 649 // private receipts, return a new slice where the default for each location is 650 // the public receipt but we take the private receipt in each place we have 651 // one. 652 func mergeReceipts(pub, priv types.Receipts) types.Receipts { 653 m := make(map[common.Hash]*types.Receipt) 654 for _, receipt := range pub { 655 m[receipt.TxHash] = receipt 656 } 657 for _, receipt := range priv { 658 m[receipt.TxHash] = receipt 659 } 660 661 ret := make(types.Receipts, 0, len(pub)) 662 for _, pubReceipt := range pub { 663 ret = append(ret, m[pubReceipt.TxHash]) 664 } 665 666 return ret 667 } 668 669 // makeCurrent creates a new environment for the current cycle. 670 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 671 publicState, privateState, err := w.chain.StateAt(parent.Root()) 672 if err != nil { 673 return err 674 } 675 env := &environment{ 676 signer: types.MakeSigner(w.config, header.Number), 677 state: publicState, 678 ancestors: mapset.NewSet(), 679 family: mapset.NewSet(), 680 uncles: mapset.NewSet(), 681 header: header, 682 privateState: privateState, 683 } 684 685 // when 08 is processed ancestors contain 07 (quick block) 686 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 687 for _, uncle := range ancestor.Uncles() { 688 env.family.Add(uncle.Hash()) 689 } 690 env.family.Add(ancestor.Hash()) 691 env.ancestors.Add(ancestor.Hash()) 692 } 693 694 // Keep track of transactions which return errors so they can be removed 695 env.tcount = 0 696 w.current = env 697 return nil 698 } 699 700 // commitUncle adds the given block to uncle block set, returns error if failed to add. 701 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 702 hash := uncle.Hash() 703 if env.uncles.Contains(hash) { 704 return errors.New("uncle not unique") 705 } 706 if env.header.ParentHash == uncle.ParentHash { 707 return errors.New("uncle is sibling") 708 } 709 if !env.ancestors.Contains(uncle.ParentHash) { 710 return errors.New("uncle's parent unknown") 711 } 712 if env.family.Contains(hash) { 713 return errors.New("uncle already included") 714 } 715 env.uncles.Add(uncle.Hash()) 716 return nil 717 } 718 719 // updateSnapshot updates pending snapshot block and state. 720 // Note this function assumes the current variable is thread safe. 721 func (w *worker) updateSnapshot() { 722 w.snapshotMu.Lock() 723 defer w.snapshotMu.Unlock() 724 725 var uncles []*types.Header 726 w.current.uncles.Each(func(item interface{}) bool { 727 hash, ok := item.(common.Hash) 728 if !ok { 729 return false 730 } 731 uncle, exist := w.localUncles[hash] 732 if !exist { 733 uncle, exist = w.remoteUncles[hash] 734 } 735 if !exist { 736 return false 737 } 738 uncles = append(uncles, uncle.Header()) 739 return false 740 }) 741 742 w.snapshotBlock = types.NewBlock( 743 w.current.header, 744 w.current.txs, 745 uncles, 746 w.current.receipts, 747 ) 748 749 w.snapshotState = w.current.state.Copy() 750 } 751 752 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 753 snap := w.current.state.Snapshot() 754 privateSnap := w.current.privateState.Snapshot() 755 756 receipt, privateReceipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.privateState, w.current.header, tx, &w.current.header.GasUsed, vm.Config{}) 757 if err != nil { 758 w.current.state.RevertToSnapshot(snap) 759 w.current.privateState.RevertToSnapshot(privateSnap) 760 return nil, err 761 } 762 w.current.txs = append(w.current.txs, tx) 763 w.current.receipts = append(w.current.receipts, receipt) 764 765 logs := receipt.Logs 766 if privateReceipt != nil { 767 logs = append(receipt.Logs, privateReceipt.Logs...) 768 w.current.privateReceipts = append(w.current.privateReceipts, privateReceipt) 769 } 770 return logs, nil 771 } 772 773 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 774 // Short circuit if current is nil 775 if w.current == nil { 776 return true 777 } 778 779 if w.current.gasPool == nil { 780 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 781 } 782 783 var coalescedLogs []*types.Log 784 785 for { 786 // In the following three cases, we will interrupt the execution of the transaction. 787 // (1) new head block event arrival, the interrupt signal is 1 788 // (2) worker start or restart, the interrupt signal is 1 789 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 790 // For the first two cases, the semi-finished work will be discarded. 791 // For the third case, the semi-finished work will be submitted to the consensus engine. 792 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 793 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 794 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 795 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 796 if ratio < 0.1 { 797 ratio = 0.1 798 } 799 w.resubmitAdjustCh <- &intervalAdjust{ 800 ratio: ratio, 801 inc: true, 802 } 803 } 804 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 805 } 806 // If we don't have enough gas for any further transactions then we're done 807 if w.current.gasPool.Gas() < params.TxGas { 808 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 809 break 810 } 811 // Retrieve the next transaction and abort if all done 812 tx := txs.Peek() 813 if tx == nil { 814 break 815 } 816 // Error may be ignored here. The error has already been checked 817 // during transaction acceptance is the transaction pool. 818 // 819 // We use the eip155 signer regardless of the current hf. 820 from, _ := types.Sender(w.current.signer, tx) 821 // Check whether the tx is replay protected. If we're not in the EIP155 hf 822 // phase, start ignoring the sender until we do. 823 if tx.Protected() && !w.config.IsEIP155(w.current.header.Number) && !tx.IsPrivate() { 824 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.config.EIP155Block) 825 826 txs.Pop() 827 continue 828 } 829 // Start executing the transaction 830 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 831 w.current.privateState.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 832 833 logs, err := w.commitTransaction(tx, coinbase) 834 switch err { 835 case core.ErrGasLimitReached: 836 // Pop the current out-of-gas transaction without shifting in the next from the account 837 log.Trace("Gas limit exceeded for current block", "sender", from) 838 txs.Pop() 839 840 case core.ErrNonceTooLow: 841 // New head notification data race between the transaction pool and miner, shift 842 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 843 txs.Shift() 844 845 case core.ErrNonceTooHigh: 846 // Reorg notification data race between the transaction pool and miner, skip account = 847 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 848 txs.Pop() 849 850 case nil: 851 // Everything ok, collect the logs and shift in the next transaction from the same account 852 coalescedLogs = append(coalescedLogs, logs...) 853 w.current.tcount++ 854 txs.Shift() 855 856 default: 857 // Strange error, discard the transaction and get the next in line (note, the 858 // nonce-too-high clause will prevent us from executing in vain). 859 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 860 txs.Shift() 861 } 862 } 863 864 if !w.isRunning() && len(coalescedLogs) > 0 { 865 // We don't push the pendingLogsEvent while we are mining. The reason is that 866 // when we are mining, the worker will regenerate a mining block every 3 seconds. 867 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 868 869 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 870 // logs by filling in the block hash when the block was mined by the local miner. This can 871 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 872 cpy := make([]*types.Log, len(coalescedLogs)) 873 for i, l := range coalescedLogs { 874 cpy[i] = new(types.Log) 875 *cpy[i] = *l 876 } 877 go w.mux.Post(core.PendingLogsEvent{Logs: cpy}) 878 } 879 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 880 // than the user-specified one. 881 if interrupt != nil { 882 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 883 } 884 return false 885 } 886 887 // commitNewWork generates several new sealing tasks based on the parent block. 888 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 889 w.mu.RLock() 890 defer w.mu.RUnlock() 891 892 tstart := time.Now() 893 parent := w.chain.CurrentBlock() 894 895 if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 { 896 timestamp = parent.Time().Int64() + 1 897 } 898 // this will ensure we're not going off too far in the future 899 if now := time.Now().Unix(); timestamp > now+1 { 900 wait := time.Duration(timestamp-now) * time.Second 901 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 902 time.Sleep(wait) 903 } 904 905 num := parent.Number() 906 header := &types.Header{ 907 ParentHash: parent.Hash(), 908 Number: num.Add(num, common.Big1), 909 GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil), 910 Extra: w.extra, 911 Time: big.NewInt(timestamp), 912 } 913 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 914 if w.isRunning() { 915 if w.coinbase == (common.Address{}) { 916 log.Error("Refusing to mine without etherbase") 917 return 918 } 919 header.Coinbase = w.coinbase 920 } 921 if err := w.engine.Prepare(w.chain, header); err != nil { 922 log.Error("Failed to prepare header for mining", "err", err) 923 return 924 } 925 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 926 if daoBlock := w.config.DAOForkBlock; daoBlock != nil { 927 // Check whether the block is among the fork extra-override range 928 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 929 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 930 // Depending whether we support or oppose the fork, override differently 931 if w.config.DAOForkSupport { 932 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 933 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 934 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 935 } 936 } 937 } 938 // Could potentially happen if starting to mine in an odd state. 939 err := w.makeCurrent(parent, header) 940 if err != nil { 941 log.Error("Failed to create mining context", "err", err) 942 return 943 } 944 // Create the current work task and check any fork transitions needed 945 env := w.current 946 if w.config.DAOForkSupport && w.config.DAOForkBlock != nil && w.config.DAOForkBlock.Cmp(header.Number) == 0 { 947 misc.ApplyDAOHardFork(env.state) 948 } 949 // Accumulate the uncles for the current block 950 uncles := make([]*types.Header, 0, 2) 951 commitUncles := func(blocks map[common.Hash]*types.Block) { 952 // Clean up stale uncle blocks first 953 for hash, uncle := range blocks { 954 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 955 delete(blocks, hash) 956 } 957 } 958 for hash, uncle := range blocks { 959 if len(uncles) == 2 { 960 break 961 } 962 if err := w.commitUncle(env, uncle.Header()); err != nil { 963 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 964 } else { 965 log.Debug("Committing new uncle to block", "hash", hash) 966 uncles = append(uncles, uncle.Header()) 967 } 968 } 969 } 970 // Prefer to locally generated uncle 971 commitUncles(w.localUncles) 972 commitUncles(w.remoteUncles) 973 974 if !noempty { 975 // Create an empty block based on temporary copied state for sealing in advance without waiting block 976 // execution finished. 977 w.commit(uncles, nil, false, tstart) 978 } 979 980 // Fill the block with all available pending transactions. 981 pending, err := w.eth.TxPool().Pending() 982 if err != nil { 983 log.Error("Failed to fetch pending transactions", "err", err) 984 return 985 } 986 // Short circuit if there is no available pending transactions 987 if len(pending) == 0 { 988 w.updateSnapshot() 989 return 990 } 991 // Split the pending transactions into locals and remotes 992 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 993 for _, account := range w.eth.TxPool().Locals() { 994 if txs := remoteTxs[account]; len(txs) > 0 { 995 delete(remoteTxs, account) 996 localTxs[account] = txs 997 } 998 } 999 if len(localTxs) > 0 { 1000 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 1001 if w.commitTransactions(txs, w.coinbase, interrupt) { 1002 return 1003 } 1004 } 1005 if len(remoteTxs) > 0 { 1006 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 1007 if w.commitTransactions(txs, w.coinbase, interrupt) { 1008 return 1009 } 1010 } 1011 w.commit(uncles, w.fullTaskHook, true, tstart) 1012 } 1013 1014 // commit runs any post-transaction state modifications, assembles the final block 1015 // and commits new work if consensus engine is running. 1016 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 1017 // Deep copy receipts here to avoid interaction between different tasks. 1018 receipts := make([]*types.Receipt, len(w.current.receipts)) 1019 for i, l := range w.current.receipts { 1020 receipts[i] = new(types.Receipt) 1021 *receipts[i] = *l 1022 } 1023 1024 privateReceipts := make([]*types.Receipt, len(w.current.privateReceipts)) 1025 for i, l := range w.current.privateReceipts { 1026 receipts[i] = new(types.Receipt) 1027 *receipts[i] = *l 1028 } 1029 1030 s := w.current.state.Copy() 1031 ps := w.current.privateState.Copy() 1032 block, err := w.engine.Finalize(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts) 1033 if err != nil { 1034 return err 1035 } 1036 if w.isRunning() { 1037 if interval != nil { 1038 interval() 1039 } 1040 select { 1041 case w.taskCh <- &task{receipts: receipts, privateReceipts: privateReceipts, state: s, privateState: ps, block: block, createdAt: time.Now()}: 1042 w.unconfirmed.Shift(block.NumberU64() - 1) 1043 1044 feesWei := new(big.Int) 1045 for i, tx := range block.Transactions() { 1046 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 1047 } 1048 feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1049 1050 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1051 "uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start))) 1052 1053 case <-w.exitCh: 1054 log.Info("Worker has exited") 1055 } 1056 } 1057 if update { 1058 w.updateSnapshot() 1059 } 1060 return nil 1061 }