github.com/jpmorganchase/quorum@v21.1.0+incompatible/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/deckarep/golang-set" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/consensus" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/rawdb" 33 "github.com/ethereum/go-ethereum/core/state" 34 "github.com/ethereum/go-ethereum/core/types" 35 "github.com/ethereum/go-ethereum/event" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/params" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // miningLogAtDepth is the number of confirmations before logging successful mining. 58 miningLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the mining block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the mining block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 // environment is the worker's current environment and holds all of the current state information. 81 type environment struct { 82 signer types.Signer 83 84 state *state.StateDB // apply state changes here 85 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 86 family mapset.Set // family set (used for checking uncle invalidity) 87 uncles mapset.Set // uncle set 88 tcount int // tx count in cycle 89 gasPool *core.GasPool // available gas used to pack transactions 90 91 header *types.Header 92 txs []*types.Transaction 93 receipts []*types.Receipt 94 95 privateReceipts []*types.Receipt 96 // Leave this publicState named state, add privateState which most code paths can just ignore 97 privateState *state.StateDB 98 } 99 100 // task contains all information for consensus engine sealing and result submitting. 101 type task struct { 102 receipts []*types.Receipt 103 state *state.StateDB 104 block *types.Block 105 createdAt time.Time 106 107 privateReceipts []*types.Receipt 108 // Leave this publicState named state, add privateState which most code paths can just ignore 109 privateState *state.StateDB 110 } 111 112 const ( 113 commitInterruptNone int32 = iota 114 commitInterruptNewHead 115 commitInterruptResubmit 116 ) 117 118 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 119 type newWorkReq struct { 120 interrupt *int32 121 noempty bool 122 timestamp int64 123 } 124 125 // intervalAdjust represents a resubmitting interval adjustment. 126 type intervalAdjust struct { 127 ratio float64 128 inc bool 129 } 130 131 // worker is the main object which takes care of submitting new work to consensus engine 132 // and gathering the sealing result. 133 type worker struct { 134 config *Config 135 chainConfig *params.ChainConfig 136 engine consensus.Engine 137 eth Backend 138 chain *core.BlockChain 139 140 // Subscriptions 141 mux *event.TypeMux 142 txsCh chan core.NewTxsEvent 143 txsSub event.Subscription 144 chainHeadCh chan core.ChainHeadEvent 145 chainHeadSub event.Subscription 146 chainSideCh chan core.ChainSideEvent 147 chainSideSub event.Subscription 148 149 // Channels 150 newWorkCh chan *newWorkReq 151 taskCh chan *task 152 resultCh chan *types.Block 153 startCh chan struct{} 154 exitCh chan struct{} 155 resubmitIntervalCh chan time.Duration 156 resubmitAdjustCh chan *intervalAdjust 157 158 current *environment // An environment for current running cycle. 159 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 160 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 161 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 162 163 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 164 coinbase common.Address 165 extra []byte 166 167 pendingMu sync.RWMutex 168 pendingTasks map[common.Hash]*task 169 170 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 171 snapshotBlock *types.Block 172 snapshotState *state.StateDB 173 174 // atomic status counters 175 running int32 // The indicator whether the consensus engine is running or not. 176 newTxs int32 // New arrival transaction count since last sealing work submitting. 177 178 // External functions 179 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 180 181 // Test hooks 182 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 183 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 184 fullTaskHook func() // Method to call before pushing the full sealing task. 185 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 186 } 187 188 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool) *worker { 189 worker := &worker{ 190 config: config, 191 chainConfig: chainConfig, 192 engine: engine, 193 eth: eth, 194 mux: mux, 195 chain: eth.BlockChain(), 196 isLocalBlock: isLocalBlock, 197 localUncles: make(map[common.Hash]*types.Block), 198 remoteUncles: make(map[common.Hash]*types.Block), 199 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 200 pendingTasks: make(map[common.Hash]*task), 201 txsCh: make(chan core.NewTxsEvent, txChanSize), 202 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 203 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 204 newWorkCh: make(chan *newWorkReq), 205 taskCh: make(chan *task), 206 resultCh: make(chan *types.Block, resultQueueSize), 207 exitCh: make(chan struct{}), 208 startCh: make(chan struct{}, 1), 209 resubmitIntervalCh: make(chan time.Duration), 210 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 211 } 212 if _, ok := engine.(consensus.Istanbul); ok || !chainConfig.IsQuorum || chainConfig.Clique != nil { 213 // Subscribe NewTxsEvent for tx pool 214 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 215 // Subscribe events for blockchain 216 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 217 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 218 // Sanitize recommit interval if the user-specified one is too short. 219 recommit := worker.config.Recommit 220 if recommit < minRecommitInterval { 221 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 222 recommit = minRecommitInterval 223 } 224 225 go worker.mainLoop() 226 go worker.newWorkLoop(recommit) 227 go worker.resultLoop() 228 go worker.taskLoop() 229 230 // Submit first work to initialize pending state. 231 worker.startCh <- struct{}{} 232 } 233 234 return worker 235 } 236 237 // setEtherbase sets the etherbase used to initialize the block coinbase field. 238 func (w *worker) setEtherbase(addr common.Address) { 239 w.mu.Lock() 240 defer w.mu.Unlock() 241 w.coinbase = addr 242 } 243 244 // setExtra sets the content used to initialize the block extra field. 245 func (w *worker) setExtra(extra []byte) { 246 w.mu.Lock() 247 defer w.mu.Unlock() 248 w.extra = extra 249 } 250 251 // setRecommitInterval updates the interval for miner sealing work recommitting. 252 func (w *worker) setRecommitInterval(interval time.Duration) { 253 w.resubmitIntervalCh <- interval 254 } 255 256 // pending returns the pending state and corresponding block. 257 func (w *worker) pending() (*types.Block, *state.StateDB, *state.StateDB) { 258 // return a snapshot to avoid contention on currentMu mutex 259 w.snapshotMu.RLock() 260 defer w.snapshotMu.RUnlock() 261 if w.snapshotState == nil { 262 return nil, nil, nil 263 } 264 return w.snapshotBlock, w.snapshotState.Copy(), w.current.privateState.Copy() 265 } 266 267 // pendingBlock returns pending block. 268 func (w *worker) pendingBlock() *types.Block { 269 // return a snapshot to avoid contention on currentMu mutex 270 w.snapshotMu.RLock() 271 defer w.snapshotMu.RUnlock() 272 return w.snapshotBlock 273 } 274 275 // start sets the running status as 1 and triggers new work submitting. 276 func (w *worker) start() { 277 atomic.StoreInt32(&w.running, 1) 278 if istanbul, ok := w.engine.(consensus.Istanbul); ok { 279 istanbul.Start(w.chain, w.chain.CurrentBlock, w.chain.HasBadBlock) 280 } 281 w.startCh <- struct{}{} 282 } 283 284 // stop sets the running status as 0. 285 func (w *worker) stop() { 286 if istanbul, ok := w.engine.(consensus.Istanbul); ok { 287 istanbul.Stop() 288 } 289 atomic.StoreInt32(&w.running, 0) 290 } 291 292 // isRunning returns an indicator whether worker is running or not. 293 func (w *worker) isRunning() bool { 294 return atomic.LoadInt32(&w.running) == 1 295 } 296 297 // close terminates all background threads maintained by the worker. 298 // Note the worker does not support being closed multiple times. 299 func (w *worker) close() { 300 close(w.exitCh) 301 } 302 303 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 304 func (w *worker) newWorkLoop(recommit time.Duration) { 305 var ( 306 interrupt *int32 307 minRecommit = recommit // minimal resubmit interval specified by user. 308 timestamp int64 // timestamp for each round of mining. 309 ) 310 311 timer := time.NewTimer(0) 312 <-timer.C // discard the initial tick 313 314 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 315 commit := func(noempty bool, s int32) { 316 if interrupt != nil { 317 atomic.StoreInt32(interrupt, s) 318 } 319 interrupt = new(int32) 320 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 321 timer.Reset(recommit) 322 atomic.StoreInt32(&w.newTxs, 0) 323 } 324 // recalcRecommit recalculates the resubmitting interval upon feedback. 325 recalcRecommit := func(target float64, inc bool) { 326 var ( 327 prev = float64(recommit.Nanoseconds()) 328 next float64 329 ) 330 if inc { 331 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 332 // Recap if interval is larger than the maximum time interval 333 if next > float64(maxRecommitInterval.Nanoseconds()) { 334 next = float64(maxRecommitInterval.Nanoseconds()) 335 } 336 } else { 337 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 338 // Recap if interval is less than the user specified minimum 339 if next < float64(minRecommit.Nanoseconds()) { 340 next = float64(minRecommit.Nanoseconds()) 341 } 342 } 343 recommit = time.Duration(int64(next)) 344 } 345 // clearPending cleans the stale pending tasks. 346 clearPending := func(number uint64) { 347 w.pendingMu.Lock() 348 for h, t := range w.pendingTasks { 349 if t.block.NumberU64()+staleThreshold <= number { 350 delete(w.pendingTasks, h) 351 } 352 } 353 w.pendingMu.Unlock() 354 } 355 356 for { 357 select { 358 case <-w.startCh: 359 clearPending(w.chain.CurrentBlock().NumberU64()) 360 timestamp = time.Now().Unix() 361 commit(false, commitInterruptNewHead) 362 363 case head := <-w.chainHeadCh: 364 if h, ok := w.engine.(consensus.Handler); ok { 365 h.NewChainHead() 366 } 367 clearPending(head.Block.NumberU64()) 368 timestamp = time.Now().Unix() 369 commit(false, commitInterruptNewHead) 370 371 case <-timer.C: 372 // If mining is running resubmit a new work cycle periodically to pull in 373 // higher priced transactions. Disable this overhead for pending blocks. 374 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 375 // Short circuit if no new transaction arrives. 376 if atomic.LoadInt32(&w.newTxs) == 0 { 377 timer.Reset(recommit) 378 continue 379 } 380 commit(true, commitInterruptResubmit) 381 } 382 383 case interval := <-w.resubmitIntervalCh: 384 // Adjust resubmit interval explicitly by user. 385 if interval < minRecommitInterval { 386 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 387 interval = minRecommitInterval 388 } 389 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 390 minRecommit, recommit = interval, interval 391 392 if w.resubmitHook != nil { 393 w.resubmitHook(minRecommit, recommit) 394 } 395 396 case adjust := <-w.resubmitAdjustCh: 397 // Adjust resubmit interval by feedback. 398 if adjust.inc { 399 before := recommit 400 recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true) 401 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 402 } else { 403 before := recommit 404 recalcRecommit(float64(minRecommit.Nanoseconds()), false) 405 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 406 } 407 408 if w.resubmitHook != nil { 409 w.resubmitHook(minRecommit, recommit) 410 } 411 412 case <-w.exitCh: 413 return 414 } 415 } 416 } 417 418 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 419 func (w *worker) mainLoop() { 420 defer w.txsSub.Unsubscribe() 421 defer w.chainHeadSub.Unsubscribe() 422 defer w.chainSideSub.Unsubscribe() 423 424 for { 425 select { 426 case req := <-w.newWorkCh: 427 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 428 429 case ev := <-w.chainSideCh: 430 // Short circuit for duplicate side blocks 431 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 432 continue 433 } 434 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 435 continue 436 } 437 // Add side block to possible uncle block set depending on the author. 438 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 439 w.localUncles[ev.Block.Hash()] = ev.Block 440 } else { 441 w.remoteUncles[ev.Block.Hash()] = ev.Block 442 } 443 // If our mining block contains less than 2 uncle blocks, 444 // add the new uncle block if valid and regenerate a mining block. 445 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 446 start := time.Now() 447 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 448 var uncles []*types.Header 449 w.current.uncles.Each(func(item interface{}) bool { 450 hash, ok := item.(common.Hash) 451 if !ok { 452 return false 453 } 454 uncle, exist := w.localUncles[hash] 455 if !exist { 456 uncle, exist = w.remoteUncles[hash] 457 } 458 if !exist { 459 return false 460 } 461 uncles = append(uncles, uncle.Header()) 462 return false 463 }) 464 w.commit(uncles, nil, true, start) 465 } 466 } 467 468 case ev := <-w.txsCh: 469 // Apply transactions to the pending state if we're not mining. 470 // 471 // Note all transactions received may not be continuous with transactions 472 // already included in the current mining block. These transactions will 473 // be automatically eliminated. 474 if !w.isRunning() && w.current != nil { 475 // If block is already full, abort 476 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 477 continue 478 } 479 w.mu.RLock() 480 coinbase := w.coinbase 481 w.mu.RUnlock() 482 483 txs := make(map[common.Address]types.Transactions) 484 for _, tx := range ev.Txs { 485 acc, _ := types.Sender(w.current.signer, tx) 486 txs[acc] = append(txs[acc], tx) 487 } 488 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 489 tcount := w.current.tcount 490 w.commitTransactions(txset, coinbase, nil) 491 // Only update the snapshot if any new transactons were added 492 // to the pending block 493 if tcount != w.current.tcount { 494 w.updateSnapshot() 495 } 496 } else { 497 // If clique is running in dev mode(period is 0), disable 498 // advance sealing here. 499 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 500 w.commitNewWork(nil, true, time.Now().Unix()) 501 } 502 } 503 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 504 505 // System stopped 506 case <-w.exitCh: 507 return 508 case <-w.txsSub.Err(): 509 return 510 case <-w.chainHeadSub.Err(): 511 return 512 case <-w.chainSideSub.Err(): 513 return 514 } 515 } 516 } 517 518 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 519 // push them to consensus engine. 520 func (w *worker) taskLoop() { 521 var ( 522 stopCh chan struct{} 523 prev common.Hash 524 ) 525 526 // interrupt aborts the in-flight sealing task. 527 interrupt := func() { 528 if stopCh != nil { 529 close(stopCh) 530 stopCh = nil 531 } 532 } 533 for { 534 select { 535 case task := <-w.taskCh: 536 if w.newTaskHook != nil { 537 w.newTaskHook(task) 538 } 539 // Reject duplicate sealing work due to resubmitting. 540 sealHash := w.engine.SealHash(task.block.Header()) 541 if sealHash == prev { 542 continue 543 } 544 // Interrupt previous sealing operation 545 interrupt() 546 stopCh, prev = make(chan struct{}), sealHash 547 548 if w.skipSealHook != nil && w.skipSealHook(task) { 549 continue 550 } 551 w.pendingMu.Lock() 552 w.pendingTasks[w.engine.SealHash(task.block.Header())] = task 553 w.pendingMu.Unlock() 554 555 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 556 log.Warn("Block sealing failed", "err", err) 557 } 558 case <-w.exitCh: 559 interrupt() 560 return 561 } 562 } 563 } 564 565 // resultLoop is a standalone goroutine to handle sealing result submitting 566 // and flush relative data to the database. 567 func (w *worker) resultLoop() { 568 for { 569 select { 570 case block := <-w.resultCh: 571 // Short circuit when receiving empty result. 572 if block == nil { 573 continue 574 } 575 // Short circuit when receiving duplicate result caused by resubmitting. 576 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 577 continue 578 } 579 var ( 580 sealhash = w.engine.SealHash(block.Header()) 581 hash = block.Hash() 582 ) 583 w.pendingMu.RLock() 584 task, exist := w.pendingTasks[sealhash] 585 w.pendingMu.RUnlock() 586 if !exist { 587 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 588 continue 589 } 590 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 591 var ( 592 pubReceipts = make([]*types.Receipt, len(task.receipts)) 593 prvReceipts = make([]*types.Receipt, len(task.privateReceipts)) 594 logs []*types.Log 595 ) 596 offset := len(task.receipts) 597 for i, receipt := range task.receipts { 598 // add block location fields 599 receipt.BlockHash = hash 600 receipt.BlockNumber = block.Number() 601 receipt.TransactionIndex = uint(i) 602 603 pubReceipts[i] = new(types.Receipt) 604 *pubReceipts[i] = *receipt 605 // Update the block hash in all logs since it is now available and not when the 606 // receipt/log of individual transactions were created. 607 for _, log := range receipt.Logs { 608 log.BlockHash = hash 609 } 610 logs = append(logs, receipt.Logs...) 611 } 612 613 for i, receipt := range task.privateReceipts { 614 // add block location fields 615 receipt.BlockHash = hash 616 receipt.BlockNumber = block.Number() 617 receipt.TransactionIndex = uint(i + offset) 618 619 prvReceipts[i] = new(types.Receipt) 620 *prvReceipts[i] = *receipt 621 // Update the block hash in all logs since it is now available and not when the 622 // receipt/log of individual transactions were created. 623 for _, log := range receipt.Logs { 624 log.BlockHash = hash 625 } 626 logs = append(logs, receipt.Logs...) 627 } 628 629 allReceipts := mergeReceipts(pubReceipts, prvReceipts) 630 631 // Commit block and state to database. 632 stat, err := w.chain.WriteBlockWithState(block, allReceipts, task.state, task.privateState) 633 if err != nil { 634 log.Error("Failed writing block to chain", "err", err) 635 continue 636 } 637 if err := rawdb.WritePrivateBlockBloom(w.eth.ChainDb(), block.NumberU64(), task.privateReceipts); err != nil { 638 log.Error("Failed writing private block bloom", "err", err) 639 continue 640 } 641 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 642 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 643 644 // Broadcast the block and announce chain insertion event 645 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 646 647 var events []interface{} 648 649 switch stat { 650 case core.CanonStatTy: 651 events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 652 events = append(events, core.ChainHeadEvent{Block: block}) 653 case core.SideStatTy: 654 events = append(events, core.ChainSideEvent{Block: block}) 655 } 656 w.chain.PostChainEvents(events, logs) 657 658 // Insert the block into the set of pending ones to resultLoop for confirmations 659 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 660 661 case <-w.exitCh: 662 return 663 } 664 } 665 } 666 667 // Given a slice of public receipts and an overlapping (smaller) slice of 668 // private receipts, return a new slice where the default for each location is 669 // the public receipt but we take the private receipt in each place we have 670 // one. 671 func mergeReceipts(pub, priv types.Receipts) types.Receipts { 672 m := make(map[common.Hash]*types.Receipt) 673 for _, receipt := range pub { 674 m[receipt.TxHash] = receipt 675 } 676 for _, receipt := range priv { 677 m[receipt.TxHash] = receipt 678 } 679 680 ret := make(types.Receipts, 0, len(pub)) 681 for _, pubReceipt := range pub { 682 ret = append(ret, m[pubReceipt.TxHash]) 683 } 684 685 return ret 686 } 687 688 // makeCurrent creates a new environment for the current cycle. 689 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 690 publicState, privateState, err := w.chain.StateAt(parent.Root()) 691 if err != nil { 692 return err 693 } 694 env := &environment{ 695 signer: types.MakeSigner(w.chainConfig, header.Number), 696 state: publicState, 697 ancestors: mapset.NewSet(), 698 family: mapset.NewSet(), 699 uncles: mapset.NewSet(), 700 header: header, 701 privateState: privateState, 702 } 703 704 // when 08 is processed ancestors contain 07 (quick block) 705 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 706 for _, uncle := range ancestor.Uncles() { 707 env.family.Add(uncle.Hash()) 708 } 709 env.family.Add(ancestor.Hash()) 710 env.ancestors.Add(ancestor.Hash()) 711 } 712 713 // Keep track of transactions which return errors so they can be removed 714 env.tcount = 0 715 w.current = env 716 return nil 717 } 718 719 // commitUncle adds the given block to uncle block set, returns error if failed to add. 720 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 721 hash := uncle.Hash() 722 if env.uncles.Contains(hash) { 723 return errors.New("uncle not unique") 724 } 725 if env.header.ParentHash == uncle.ParentHash { 726 return errors.New("uncle is sibling") 727 } 728 if !env.ancestors.Contains(uncle.ParentHash) { 729 return errors.New("uncle's parent unknown") 730 } 731 if env.family.Contains(hash) { 732 return errors.New("uncle already included") 733 } 734 env.uncles.Add(uncle.Hash()) 735 return nil 736 } 737 738 // updateSnapshot updates pending snapshot block and state. 739 // Note this function assumes the current variable is thread safe. 740 func (w *worker) updateSnapshot() { 741 w.snapshotMu.Lock() 742 defer w.snapshotMu.Unlock() 743 744 var uncles []*types.Header 745 w.current.uncles.Each(func(item interface{}) bool { 746 hash, ok := item.(common.Hash) 747 if !ok { 748 return false 749 } 750 uncle, exist := w.localUncles[hash] 751 if !exist { 752 uncle, exist = w.remoteUncles[hash] 753 } 754 if !exist { 755 return false 756 } 757 uncles = append(uncles, uncle.Header()) 758 return false 759 }) 760 761 w.snapshotBlock = types.NewBlock( 762 w.current.header, 763 w.current.txs, 764 uncles, 765 w.current.receipts, 766 ) 767 768 w.snapshotState = w.current.state.Copy() 769 } 770 771 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 772 snap := w.current.state.Snapshot() 773 privateSnap := w.current.privateState.Snapshot() 774 775 txnStart := time.Now() 776 receipt, privateReceipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.privateState, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) 777 if err != nil { 778 w.current.state.RevertToSnapshot(snap) 779 w.current.privateState.RevertToSnapshot(privateSnap) 780 return nil, err 781 } 782 w.current.txs = append(w.current.txs, tx) 783 w.current.receipts = append(w.current.receipts, receipt) 784 log.EmitCheckpoint(log.TxCompleted, "tx", tx.Hash().Hex(), "time", time.Since(txnStart)) 785 786 logs := receipt.Logs 787 if privateReceipt != nil { 788 logs = append(receipt.Logs, privateReceipt.Logs...) 789 w.current.privateReceipts = append(w.current.privateReceipts, privateReceipt) 790 w.chain.CheckAndSetPrivateState(logs, w.current.privateState) 791 } 792 return logs, nil 793 } 794 795 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 796 // Short circuit if current is nil 797 if w.current == nil { 798 return true 799 } 800 801 if w.current.gasPool == nil { 802 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 803 } 804 805 var coalescedLogs []*types.Log 806 807 for { 808 // In the following three cases, we will interrupt the execution of the transaction. 809 // (1) new head block event arrival, the interrupt signal is 1 810 // (2) worker start or restart, the interrupt signal is 1 811 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 812 // For the first two cases, the semi-finished work will be discarded. 813 // For the third case, the semi-finished work will be submitted to the consensus engine. 814 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 815 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 816 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 817 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 818 if ratio < 0.1 { 819 ratio = 0.1 820 } 821 w.resubmitAdjustCh <- &intervalAdjust{ 822 ratio: ratio, 823 inc: true, 824 } 825 } 826 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 827 } 828 // If we don't have enough gas for any further transactions then we're done 829 if w.current.gasPool.Gas() < params.TxGas { 830 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 831 break 832 } 833 // Retrieve the next transaction and abort if all done 834 tx := txs.Peek() 835 if tx == nil { 836 break 837 } 838 // Error may be ignored here. The error has already been checked 839 // during transaction acceptance is the transaction pool. 840 // 841 // We use the eip155 signer regardless of the current hf. 842 from, _ := types.Sender(w.current.signer, tx) 843 // Check whether the tx is replay protected. If we're not in the EIP155 hf 844 // phase, start ignoring the sender until we do. 845 if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) && !tx.IsPrivate() { 846 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 847 848 txs.Pop() 849 continue 850 } 851 // Start executing the transaction 852 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 853 w.current.privateState.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 854 855 logs, err := w.commitTransaction(tx, coinbase) 856 switch err { 857 case core.ErrGasLimitReached: 858 // Pop the current out-of-gas transaction without shifting in the next from the account 859 log.Trace("Gas limit exceeded for current block", "sender", from) 860 txs.Pop() 861 862 case core.ErrNonceTooLow: 863 // New head notification data race between the transaction pool and miner, shift 864 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 865 txs.Shift() 866 867 case core.ErrNonceTooHigh: 868 // Reorg notification data race between the transaction pool and miner, skip account = 869 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 870 txs.Pop() 871 872 case nil: 873 // Everything ok, collect the logs and shift in the next transaction from the same account 874 coalescedLogs = append(coalescedLogs, logs...) 875 w.current.tcount++ 876 txs.Shift() 877 878 default: 879 // Strange error, discard the transaction and get the next in line (note, the 880 // nonce-too-high clause will prevent us from executing in vain). 881 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 882 txs.Shift() 883 } 884 } 885 886 if !w.isRunning() && len(coalescedLogs) > 0 { 887 // We don't push the pendingLogsEvent while we are mining. The reason is that 888 // when we are mining, the worker will regenerate a mining block every 3 seconds. 889 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 890 891 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 892 // logs by filling in the block hash when the block was mined by the local miner. This can 893 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 894 cpy := make([]*types.Log, len(coalescedLogs)) 895 for i, l := range coalescedLogs { 896 cpy[i] = new(types.Log) 897 *cpy[i] = *l 898 } 899 go w.mux.Post(core.PendingLogsEvent{Logs: cpy}) 900 } 901 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 902 // than the user-specified one. 903 if interrupt != nil { 904 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 905 } 906 return false 907 } 908 909 // commitNewWork generates several new sealing tasks based on the parent block. 910 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 911 w.mu.RLock() 912 defer w.mu.RUnlock() 913 914 tstart := time.Now() 915 parent := w.chain.CurrentBlock() 916 917 if parent.Time() >= uint64(timestamp) { 918 timestamp = int64(parent.Time() + 1) 919 } 920 921 allowedFutureBlockTime := int64(w.config.AllowedFutureBlockTime) //Quorum - get AllowedFutureBlockTime to fix issue # 1004 922 923 // this will ensure we're not going off too far in the future 924 if now := time.Now().Unix(); timestamp > now+1+allowedFutureBlockTime { 925 wait := time.Duration(timestamp-now) * time.Second 926 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 927 time.Sleep(wait) 928 } 929 930 num := parent.Number() 931 header := &types.Header{ 932 ParentHash: parent.Hash(), 933 Number: num.Add(num, common.Big1), 934 GasLimit: core.CalcGasLimit(parent, w.config.GasFloor, w.config.GasCeil), 935 Extra: w.extra, 936 Time: uint64(timestamp), 937 } 938 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 939 if w.isRunning() { 940 if w.coinbase == (common.Address{}) { 941 log.Error("Refusing to mine without etherbase") 942 return 943 } 944 header.Coinbase = w.coinbase 945 } 946 if err := w.engine.Prepare(w.chain, header); err != nil { 947 log.Error("Failed to prepare header for mining", "err", err) 948 return 949 } 950 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 951 if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { 952 // Check whether the block is among the fork extra-override range 953 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 954 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 955 // Depending whether we support or oppose the fork, override differently 956 if w.chainConfig.DAOForkSupport { 957 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 958 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 959 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 960 } 961 } 962 } 963 // Could potentially happen if starting to mine in an odd state. 964 err := w.makeCurrent(parent, header) 965 if err != nil { 966 log.Error("Failed to create mining context", "err", err) 967 return 968 } 969 // Create the current work task and check any fork transitions needed 970 env := w.current 971 if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { 972 misc.ApplyDAOHardFork(env.state) 973 } 974 // Accumulate the uncles for the current block 975 uncles := make([]*types.Header, 0, 2) 976 commitUncles := func(blocks map[common.Hash]*types.Block) { 977 // Clean up stale uncle blocks first 978 for hash, uncle := range blocks { 979 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 980 delete(blocks, hash) 981 } 982 } 983 for hash, uncle := range blocks { 984 if len(uncles) == 2 { 985 break 986 } 987 if err := w.commitUncle(env, uncle.Header()); err != nil { 988 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 989 } else { 990 log.Debug("Committing new uncle to block", "hash", hash) 991 uncles = append(uncles, uncle.Header()) 992 } 993 } 994 } 995 // Prefer to locally generated uncle 996 commitUncles(w.localUncles) 997 commitUncles(w.remoteUncles) 998 999 if !noempty { 1000 // Create an empty block based on temporary copied state for sealing in advance without waiting block 1001 // execution finished. 1002 w.commit(uncles, nil, false, tstart) 1003 } 1004 1005 // Fill the block with all available pending transactions. 1006 pending, err := w.eth.TxPool().Pending() 1007 if err != nil { 1008 log.Error("Failed to fetch pending transactions", "err", err) 1009 return 1010 } 1011 // Short circuit if there is no available pending transactions 1012 if len(pending) == 0 { 1013 w.updateSnapshot() 1014 return 1015 } 1016 // Split the pending transactions into locals and remotes 1017 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 1018 for _, account := range w.eth.TxPool().Locals() { 1019 if txs := remoteTxs[account]; len(txs) > 0 { 1020 delete(remoteTxs, account) 1021 localTxs[account] = txs 1022 } 1023 } 1024 if len(localTxs) > 0 { 1025 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 1026 if w.commitTransactions(txs, w.coinbase, interrupt) { 1027 return 1028 } 1029 } 1030 if len(remoteTxs) > 0 { 1031 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 1032 if w.commitTransactions(txs, w.coinbase, interrupt) { 1033 return 1034 } 1035 } 1036 w.commit(uncles, w.fullTaskHook, true, tstart) 1037 } 1038 1039 // commit runs any post-transaction state modifications, assembles the final block 1040 // and commits new work if consensus engine is running. 1041 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 1042 // Deep copy receipts here to avoid interaction between different tasks. 1043 receipts := make([]*types.Receipt, len(w.current.receipts)) 1044 for i, l := range w.current.receipts { 1045 receipts[i] = new(types.Receipt) 1046 *receipts[i] = *l 1047 } 1048 1049 privateReceipts := make([]*types.Receipt, len(w.current.privateReceipts)) 1050 for i, l := range w.current.privateReceipts { 1051 privateReceipts[i] = new(types.Receipt) 1052 *privateReceipts[i] = *l 1053 } 1054 1055 s := w.current.state.Copy() 1056 ps := w.current.privateState.Copy() 1057 block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts) 1058 if err != nil { 1059 return err 1060 } 1061 if w.isRunning() { 1062 if interval != nil { 1063 interval() 1064 } 1065 select { 1066 case w.taskCh <- &task{receipts: receipts, privateReceipts: privateReceipts, state: s, privateState: ps, block: block, createdAt: time.Now()}: 1067 w.unconfirmed.Shift(block.NumberU64() - 1) 1068 1069 feesWei := new(big.Int) 1070 for i, tx := range block.Transactions() { 1071 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 1072 } 1073 feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1074 1075 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1076 "uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start))) 1077 1078 case <-w.exitCh: 1079 log.Info("Worker has exited") 1080 } 1081 } 1082 if update { 1083 w.updateSnapshot() 1084 } 1085 return nil 1086 }