github.com/aswedchain/aswed@v1.0.1/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "strconv" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 mapset "github.com/deckarep/golang-set" 29 "github.com/aswedchain/aswed/common" 30 "github.com/aswedchain/aswed/consensus" 31 "github.com/aswedchain/aswed/core" 32 "github.com/aswedchain/aswed/core/state" 33 "github.com/aswedchain/aswed/core/types" 34 "github.com/aswedchain/aswed/event" 35 "github.com/aswedchain/aswed/log" 36 "github.com/aswedchain/aswed/params" 37 "github.com/aswedchain/aswed/trie" 38 ) 39 40 const ( 41 // resultQueueSize is the size of channel listening to sealing result. 42 resultQueueSize = 10 43 44 // txChanSize is the size of channel listening to NewTxsEvent. 45 // The number is referenced from the size of tx pool. 46 txChanSize = 4096 47 48 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 49 chainHeadChanSize = 10 50 51 // chainSideChanSize is the size of channel listening to ChainSideEvent. 52 chainSideChanSize = 10 53 54 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 55 resubmitAdjustChanSize = 10 56 57 // miningLogAtDepth is the number of confirmations before logging successful mining. 58 miningLogAtDepth = 7 59 60 // minRecommitInterval is the minimal time interval to recreate the mining block with 61 // any newly arrived transactions. 62 minRecommitInterval = 1 * time.Second 63 64 // maxRecommitInterval is the maximum time interval to recreate the mining block with 65 // any newly arrived transactions. 66 maxRecommitInterval = 15 * time.Second 67 68 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 69 // resubmitting interval. 70 intervalAdjustRatio = 0.1 71 72 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 73 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 74 intervalAdjustBias = 200 * 1000.0 * 1000.0 75 76 // staleThreshold is the maximum depth of the acceptable stale block. 77 staleThreshold = 7 78 ) 79 80 // environment is the worker's current environment and holds all of the current state information. 81 type environment struct { 82 signer types.Signer 83 84 state *state.StateDB // apply state changes here 85 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 86 family mapset.Set // family set (used for checking uncle invalidity) 87 uncles mapset.Set // uncle set 88 tcount int // tx count in cycle 89 gasPool *core.GasPool // available gas used to pack transactions 90 91 header *types.Header 92 txs []*types.Transaction 93 receipts []*types.Receipt 94 } 95 96 // task contains all information for consensus engine sealing and result submitting. 97 type task struct { 98 receipts []*types.Receipt 99 state *state.StateDB 100 block *types.Block 101 createdAt time.Time 102 } 103 104 const ( 105 commitInterruptNone int32 = iota 106 commitInterruptNewHead 107 commitInterruptResubmit 108 ) 109 110 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 111 type newWorkReq struct { 112 interrupt *int32 113 noempty bool 114 timestamp int64 115 } 116 117 // intervalAdjust represents a resubmitting interval adjustment. 118 type intervalAdjust struct { 119 ratio float64 120 inc bool 121 } 122 123 // worker is the main object which takes care of submitting new work to consensus engine 124 // and gathering the sealing result. 125 type worker struct { 126 config *Config 127 chainConfig *params.ChainConfig 128 engine consensus.Engine 129 eth Backend 130 chain *core.BlockChain 131 132 // Is the engine a PoSA engine? 133 posa consensus.PoSA 134 isPoSA bool 135 136 // Feeds 137 pendingLogsFeed event.Feed 138 139 // Subscriptions 140 mux *event.TypeMux 141 txsCh chan core.NewTxsEvent 142 txsSub event.Subscription 143 chainHeadCh chan core.ChainHeadEvent 144 chainHeadSub event.Subscription 145 chainSideCh chan core.ChainSideEvent 146 chainSideSub event.Subscription 147 148 // Channels 149 newWorkCh chan *newWorkReq 150 taskCh chan *task 151 resultCh chan *types.Block 152 startCh chan struct{} 153 exitCh chan struct{} 154 resubmitIntervalCh chan time.Duration 155 resubmitAdjustCh chan *intervalAdjust 156 157 current *environment // An environment for current running cycle. 158 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 159 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 160 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 161 162 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 163 coinbase common.Address 164 extra []byte 165 166 pendingMu sync.RWMutex 167 pendingTasks map[common.Hash]*task 168 169 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 170 snapshotBlock *types.Block 171 snapshotState *state.StateDB 172 173 // atomic status counters 174 running int32 // The indicator whether the consensus engine is running or not. 175 newTxs int32 // New arrival transaction count since last sealing work submitting. 176 177 // noempty is the flag used to control whether the feature of pre-seal empty 178 // block is enabled. The default value is false(pre-seal is enabled by default). 179 // But in some special scenario the consensus engine will seal blocks instantaneously, 180 // in this case this feature will add all empty blocks into canonical chain 181 // non-stop and no real transaction will be included. 182 noempty uint32 183 184 // External functions 185 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 186 187 // Test hooks 188 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 189 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 190 fullTaskHook func() // Method to call before pushing the full sealing task. 191 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 192 } 193 194 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { 195 posa, isPoSA := engine.(consensus.PoSA) 196 worker := &worker{ 197 config: config, 198 chainConfig: chainConfig, 199 engine: engine, 200 isPoSA: isPoSA, 201 posa: posa, 202 eth: eth, 203 mux: mux, 204 chain: eth.BlockChain(), 205 isLocalBlock: isLocalBlock, 206 localUncles: make(map[common.Hash]*types.Block), 207 remoteUncles: make(map[common.Hash]*types.Block), 208 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 209 pendingTasks: make(map[common.Hash]*task), 210 txsCh: make(chan core.NewTxsEvent, txChanSize), 211 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 212 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 213 newWorkCh: make(chan *newWorkReq), 214 taskCh: make(chan *task), 215 resultCh: make(chan *types.Block, resultQueueSize), 216 exitCh: make(chan struct{}), 217 startCh: make(chan struct{}, 1), 218 resubmitIntervalCh: make(chan time.Duration), 219 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 220 } 221 // Subscribe NewTxsEvent for tx pool 222 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 223 // Subscribe events for blockchain 224 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 225 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 226 227 // Sanitize recommit interval if the user-specified one is too short. 228 recommit := worker.config.Recommit 229 if recommit < minRecommitInterval { 230 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 231 recommit = minRecommitInterval 232 } 233 234 go worker.mainLoop() 235 go worker.newWorkLoop(recommit) 236 go worker.resultLoop() 237 go worker.taskLoop() 238 239 // Submit first work to initialize pending state. 240 if init { 241 worker.startCh <- struct{}{} 242 } 243 return worker 244 } 245 246 // setEtherbase sets the etherbase used to initialize the block coinbase field. 247 func (w *worker) setEtherbase(addr common.Address) { 248 w.mu.Lock() 249 defer w.mu.Unlock() 250 w.coinbase = addr 251 } 252 253 // setExtra sets the content used to initialize the block extra field. 254 func (w *worker) setExtra(extra []byte) { 255 w.mu.Lock() 256 defer w.mu.Unlock() 257 w.extra = extra 258 } 259 260 // setRecommitInterval updates the interval for miner sealing work recommitting. 261 func (w *worker) setRecommitInterval(interval time.Duration) { 262 w.resubmitIntervalCh <- interval 263 } 264 265 // disablePreseal disables pre-sealing mining feature 266 func (w *worker) disablePreseal() { 267 atomic.StoreUint32(&w.noempty, 1) 268 } 269 270 // enablePreseal enables pre-sealing mining feature 271 func (w *worker) enablePreseal() { 272 atomic.StoreUint32(&w.noempty, 0) 273 } 274 275 // pending returns the pending state and corresponding block. 276 func (w *worker) pending() (*types.Block, *state.StateDB) { 277 // return a snapshot to avoid contention on currentMu mutex 278 w.snapshotMu.RLock() 279 defer w.snapshotMu.RUnlock() 280 if w.snapshotState == nil { 281 return nil, nil 282 } 283 return w.snapshotBlock, w.snapshotState.Copy() 284 } 285 286 // pendingBlock returns pending block. 287 func (w *worker) pendingBlock() *types.Block { 288 // return a snapshot to avoid contention on currentMu mutex 289 w.snapshotMu.RLock() 290 defer w.snapshotMu.RUnlock() 291 return w.snapshotBlock 292 } 293 294 // start sets the running status as 1 and triggers new work submitting. 295 func (w *worker) start() { 296 atomic.StoreInt32(&w.running, 1) 297 w.startCh <- struct{}{} 298 } 299 300 // stop sets the running status as 0. 301 func (w *worker) stop() { 302 atomic.StoreInt32(&w.running, 0) 303 } 304 305 // isRunning returns an indicator whether worker is running or not. 306 func (w *worker) isRunning() bool { 307 return atomic.LoadInt32(&w.running) == 1 308 } 309 310 // close terminates all background threads maintained by the worker. 311 // Note the worker does not support being closed multiple times. 312 func (w *worker) close() { 313 atomic.StoreInt32(&w.running, 0) 314 close(w.exitCh) 315 } 316 317 // recalcRecommit recalculates the resubmitting interval upon feedback. 318 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 319 var ( 320 prevF = float64(prev.Nanoseconds()) 321 next float64 322 ) 323 if inc { 324 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 325 max := float64(maxRecommitInterval.Nanoseconds()) 326 if next > max { 327 next = max 328 } 329 } else { 330 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 331 min := float64(minRecommit.Nanoseconds()) 332 if next < min { 333 next = min 334 } 335 } 336 return time.Duration(int64(next)) 337 } 338 339 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 340 func (w *worker) newWorkLoop(recommit time.Duration) { 341 var ( 342 interrupt *int32 343 minRecommit = recommit // minimal resubmit interval specified by user. 344 timestamp int64 // timestamp for each round of mining. 345 ) 346 347 timer := time.NewTimer(0) 348 defer timer.Stop() 349 <-timer.C // discard the initial tick 350 351 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 352 commit := func(noempty bool, s int32) { 353 if interrupt != nil { 354 atomic.StoreInt32(interrupt, s) 355 } 356 interrupt = new(int32) 357 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 358 timer.Reset(recommit) 359 atomic.StoreInt32(&w.newTxs, 0) 360 } 361 // clearPending cleans the stale pending tasks. 362 clearPending := func(number uint64) { 363 w.pendingMu.Lock() 364 for h, t := range w.pendingTasks { 365 if t.block.NumberU64()+staleThreshold <= number { 366 delete(w.pendingTasks, h) 367 } 368 } 369 w.pendingMu.Unlock() 370 } 371 372 for { 373 select { 374 case <-w.startCh: 375 clearPending(w.chain.CurrentBlock().NumberU64()) 376 timestamp = time.Now().Unix() 377 commit(false, commitInterruptNewHead) 378 379 case head := <-w.chainHeadCh: 380 clearPending(head.Block.NumberU64()) 381 timestamp = time.Now().Unix() 382 commit(false, commitInterruptNewHead) 383 384 case <-timer.C: 385 // If mining is running resubmit a new work cycle periodically to pull in 386 // higher priced transactions. Disable this overhead for pending blocks. 387 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 388 // Short circuit if no new transaction arrives. 389 if atomic.LoadInt32(&w.newTxs) == 0 { 390 timer.Reset(recommit) 391 continue 392 } 393 commit(true, commitInterruptResubmit) 394 } 395 396 case interval := <-w.resubmitIntervalCh: 397 // Adjust resubmit interval explicitly by user. 398 if interval < minRecommitInterval { 399 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 400 interval = minRecommitInterval 401 } 402 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 403 minRecommit, recommit = interval, interval 404 405 if w.resubmitHook != nil { 406 w.resubmitHook(minRecommit, recommit) 407 } 408 409 case adjust := <-w.resubmitAdjustCh: 410 // Adjust resubmit interval by feedback. 411 if adjust.inc { 412 before := recommit 413 target := float64(recommit.Nanoseconds()) / adjust.ratio 414 recommit = recalcRecommit(minRecommit, recommit, target, true) 415 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 416 } else { 417 before := recommit 418 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 419 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 420 } 421 422 if w.resubmitHook != nil { 423 w.resubmitHook(minRecommit, recommit) 424 } 425 426 case <-w.exitCh: 427 return 428 } 429 } 430 } 431 432 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 433 func (w *worker) mainLoop() { 434 defer w.txsSub.Unsubscribe() 435 defer w.chainHeadSub.Unsubscribe() 436 defer w.chainSideSub.Unsubscribe() 437 438 for { 439 select { 440 case req := <-w.newWorkCh: 441 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 442 443 case ev := <-w.chainSideCh: 444 // Short circuit for duplicate side blocks 445 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 446 continue 447 } 448 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 449 continue 450 } 451 // Add side block to possible uncle block set depending on the author. 452 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 453 w.localUncles[ev.Block.Hash()] = ev.Block 454 } else { 455 w.remoteUncles[ev.Block.Hash()] = ev.Block 456 } 457 // If our mining block contains less than 2 uncle blocks, 458 // add the new uncle block if valid and regenerate a mining block. 459 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 460 start := time.Now() 461 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 462 var uncles []*types.Header 463 w.current.uncles.Each(func(item interface{}) bool { 464 hash, ok := item.(common.Hash) 465 if !ok { 466 return false 467 } 468 uncle, exist := w.localUncles[hash] 469 if !exist { 470 uncle, exist = w.remoteUncles[hash] 471 } 472 if !exist { 473 return false 474 } 475 uncles = append(uncles, uncle.Header()) 476 return false 477 }) 478 w.commit(uncles, nil, true, start) 479 } 480 } 481 482 case ev := <-w.txsCh: 483 // Apply transactions to the pending state if we're not mining. 484 // 485 // Note all transactions received may not be continuous with transactions 486 // already included in the current mining block. These transactions will 487 // be automatically eliminated. 488 if !w.isRunning() && w.current != nil { 489 // If block is already full, abort 490 if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { 491 continue 492 } 493 w.mu.RLock() 494 coinbase := w.coinbase 495 w.mu.RUnlock() 496 497 txs := make(map[common.Address]types.Transactions) 498 for _, tx := range ev.Txs { 499 acc, _ := types.Sender(w.current.signer, tx) 500 txs[acc] = append(txs[acc], tx) 501 } 502 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 503 tcount := w.current.tcount 504 w.commitTransactions(txset, coinbase, nil) 505 // Only update the snapshot if any new transactons were added 506 // to the pending block 507 if tcount != w.current.tcount { 508 w.updateSnapshot() 509 } 510 } else { 511 // Special case, if the consensus engine is 0 period clique(dev mode), 512 // submit mining work here since all empty submission will be rejected 513 // by clique. Of course the advance sealing(empty submission) is disabled. 514 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 515 w.commitNewWork(nil, true, time.Now().Unix()) 516 } 517 } 518 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 519 520 // System stopped 521 case <-w.exitCh: 522 return 523 case <-w.txsSub.Err(): 524 return 525 case <-w.chainHeadSub.Err(): 526 return 527 case <-w.chainSideSub.Err(): 528 return 529 } 530 } 531 } 532 533 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 534 // push them to consensus engine. 535 func (w *worker) taskLoop() { 536 var ( 537 stopCh chan struct{} 538 prev common.Hash 539 ) 540 541 // interrupt aborts the in-flight sealing task. 542 interrupt := func() { 543 if stopCh != nil { 544 close(stopCh) 545 stopCh = nil 546 } 547 } 548 for { 549 select { 550 case task := <-w.taskCh: 551 if w.newTaskHook != nil { 552 w.newTaskHook(task) 553 } 554 // Reject duplicate sealing work due to resubmitting. 555 sealHash := w.engine.SealHash(task.block.Header()) 556 if sealHash == prev { 557 continue 558 } 559 // Interrupt previous sealing operation 560 interrupt() 561 stopCh, prev = make(chan struct{}), sealHash 562 563 if w.skipSealHook != nil && w.skipSealHook(task) { 564 continue 565 } 566 w.pendingMu.Lock() 567 w.pendingTasks[sealHash] = task 568 w.pendingMu.Unlock() 569 570 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 571 log.Warn("Block sealing failed", "err", err) 572 } 573 case <-w.exitCh: 574 interrupt() 575 return 576 } 577 } 578 } 579 580 // resultLoop is a standalone goroutine to handle sealing result submitting 581 // and flush relative data to the database. 582 func (w *worker) resultLoop() { 583 for { 584 select { 585 case block := <-w.resultCh: 586 // Short circuit when receiving empty result. 587 if block == nil { 588 continue 589 } 590 // Short circuit when receiving duplicate result caused by resubmitting. 591 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 592 continue 593 } 594 var ( 595 sealhash = w.engine.SealHash(block.Header()) 596 hash = block.Hash() 597 ) 598 w.pendingMu.RLock() 599 task, exist := w.pendingTasks[sealhash] 600 w.pendingMu.RUnlock() 601 if !exist { 602 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 603 continue 604 } 605 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 606 var ( 607 receipts = make([]*types.Receipt, len(task.receipts)) 608 logs []*types.Log 609 ) 610 for i, receipt := range task.receipts { 611 // add block location fields 612 receipt.BlockHash = hash 613 receipt.BlockNumber = block.Number() 614 receipt.TransactionIndex = uint(i) 615 616 receipts[i] = new(types.Receipt) 617 *receipts[i] = *receipt 618 // Update the block hash in all logs since it is now available and not when the 619 // receipt/log of individual transactions were created. 620 for _, log := range receipt.Logs { 621 log.BlockHash = hash 622 } 623 logs = append(logs, receipt.Logs...) 624 } 625 // Commit block and state to database. 626 _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) 627 if err != nil { 628 log.Error("Failed writing block to chain", "err", err) 629 continue 630 } 631 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 632 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 633 634 // Broadcast the block and announce chain insertion event 635 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 636 637 // Insert the block into the set of pending ones to resultLoop for confirmations 638 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 639 640 case <-w.exitCh: 641 return 642 } 643 } 644 } 645 646 // makeCurrent creates a new environment for the current cycle. 647 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 648 state, err := w.chain.StateAt(parent.Root()) 649 if err != nil { 650 return err 651 } 652 env := &environment{ 653 signer: types.NewEIP155Signer(w.chainConfig.ChainID), 654 state: state, 655 ancestors: mapset.NewSet(), 656 family: mapset.NewSet(), 657 uncles: mapset.NewSet(), 658 header: header, 659 } 660 661 // when 08 is processed ancestors contain 07 (quick block) 662 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 663 for _, uncle := range ancestor.Uncles() { 664 env.family.Add(uncle.Hash()) 665 } 666 env.family.Add(ancestor.Hash()) 667 env.ancestors.Add(ancestor.Hash()) 668 } 669 670 // Keep track of transactions which return errors so they can be removed 671 env.tcount = 0 672 w.current = env 673 return nil 674 } 675 676 // commitUncle adds the given block to uncle block set, returns error if failed to add. 677 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 678 hash := uncle.Hash() 679 if env.uncles.Contains(hash) { 680 return errors.New("uncle not unique") 681 } 682 if env.header.ParentHash == uncle.ParentHash { 683 return errors.New("uncle is sibling") 684 } 685 if !env.ancestors.Contains(uncle.ParentHash) { 686 return errors.New("uncle's parent unknown") 687 } 688 if env.family.Contains(hash) { 689 return errors.New("uncle already included") 690 } 691 env.uncles.Add(uncle.Hash()) 692 return nil 693 } 694 695 // updateSnapshot updates pending snapshot block and state. 696 // Note this function assumes the current variable is thread safe. 697 func (w *worker) updateSnapshot() { 698 w.snapshotMu.Lock() 699 defer w.snapshotMu.Unlock() 700 701 var uncles []*types.Header 702 w.current.uncles.Each(func(item interface{}) bool { 703 hash, ok := item.(common.Hash) 704 if !ok { 705 return false 706 } 707 uncle, exist := w.localUncles[hash] 708 if !exist { 709 uncle, exist = w.remoteUncles[hash] 710 } 711 if !exist { 712 return false 713 } 714 uncles = append(uncles, uncle.Header()) 715 return false 716 }) 717 718 w.snapshotBlock = types.NewBlock( 719 w.current.header, 720 w.current.txs, 721 uncles, 722 w.current.receipts, 723 new(trie.Trie), 724 ) 725 726 w.snapshotState = w.current.state.Copy() 727 } 728 729 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 730 snap := w.current.state.Snapshot() 731 732 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) 733 if err != nil { 734 w.current.state.RevertToSnapshot(snap) 735 return nil, err 736 } 737 w.current.txs = append(w.current.txs, tx) 738 w.current.receipts = append(w.current.receipts, receipt) 739 740 return receipt.Logs, nil 741 } 742 743 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 744 // Short circuit if current is nil 745 if w.current == nil { 746 return true 747 } 748 749 if w.current.gasPool == nil { 750 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 751 } 752 753 var coalescedLogs []*types.Log 754 755 for { 756 // In the following three cases, we will interrupt the execution of the transaction. 757 // (1) new head block event arrival, the interrupt signal is 1 758 // (2) worker start or restart, the interrupt signal is 1 759 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 760 // For the first two cases, the semi-finished work will be discarded. 761 // For the third case, the semi-finished work will be submitted to the consensus engine. 762 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 763 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 764 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 765 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 766 if ratio < 0.1 { 767 ratio = 0.1 768 } 769 w.resubmitAdjustCh <- &intervalAdjust{ 770 ratio: ratio, 771 inc: true, 772 } 773 } 774 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 775 } 776 // If we don't have enough gas for any further transactions then we're done 777 if w.current.gasPool.Gas() < params.TxGas { 778 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 779 break 780 } 781 // Retrieve the next transaction and abort if all done 782 tx := txs.Peek() 783 if tx == nil { 784 break 785 } 786 // Error may be ignored here. The error has already been checked 787 // during transaction acceptance is the transaction pool. 788 // 789 // We use the eip155 signer regardless of the current hf. 790 from, _ := types.Sender(w.current.signer, tx) 791 // Check whether the tx is replay protected. If we're not in the EIP155 hf 792 // phase, start ignoring the sender until we do. 793 if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) { 794 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) 795 796 txs.Pop() 797 continue 798 } 799 // consensus related validation 800 if w.isPoSA { 801 err := w.posa.ValidateTx(tx, w.current.header, w.current.state) 802 if err != nil { 803 log.Trace("Ignoring consensus invalid transaction", "hash", tx.Hash().String(), "from", from.String(), "to", tx.To(), "err", err) 804 txs.Pop() 805 continue 806 } 807 } 808 // Start executing the transaction 809 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 810 811 logs, err := w.commitTransaction(tx, coinbase) 812 switch err { 813 case core.ErrGasLimitReached: 814 // Pop the current out-of-gas transaction without shifting in the next from the account 815 log.Trace("Gas limit exceeded for current block", "sender", from) 816 txs.Pop() 817 818 case core.ErrNonceTooLow: 819 // New head notification data race between the transaction pool and miner, shift 820 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 821 txs.Shift() 822 823 case core.ErrNonceTooHigh: 824 // Reorg notification data race between the transaction pool and miner, skip account = 825 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 826 txs.Pop() 827 828 case nil: 829 // Everything ok, collect the logs and shift in the next transaction from the same account 830 coalescedLogs = append(coalescedLogs, logs...) 831 w.current.tcount++ 832 txs.Shift() 833 834 default: 835 // Strange error, discard the transaction and get the next in line (note, the 836 // nonce-too-high clause will prevent us from executing in vain). 837 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 838 txs.Shift() 839 } 840 } 841 842 if !w.isRunning() && len(coalescedLogs) > 0 { 843 // We don't push the pendingLogsEvent while we are mining. The reason is that 844 // when we are mining, the worker will regenerate a mining block every 3 seconds. 845 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 846 847 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 848 // logs by filling in the block hash when the block was mined by the local miner. This can 849 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 850 cpy := make([]*types.Log, len(coalescedLogs)) 851 for i, l := range coalescedLogs { 852 cpy[i] = new(types.Log) 853 *cpy[i] = *l 854 } 855 w.pendingLogsFeed.Send(cpy) 856 } 857 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 858 // than the user-specified one. 859 if interrupt != nil { 860 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 861 } 862 return false 863 } 864 865 // commitNewWork generates several new sealing tasks based on the parent block. 866 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 867 w.mu.RLock() 868 defer w.mu.RUnlock() 869 870 tstart := time.Now() 871 parent := w.chain.CurrentBlock() 872 873 if parent.Time() >= uint64(timestamp) { 874 timestamp = int64(parent.Time() + 1) 875 } 876 // this will ensure we're not going off too far in the future 877 if now := time.Now().Unix(); timestamp > now+1 { 878 wait := time.Duration(timestamp-now) * time.Second 879 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 880 time.Sleep(wait) 881 } 882 883 num := parent.Number() 884 header := &types.Header{ 885 ParentHash: parent.Hash(), 886 Number: num.Add(num, common.Big1), 887 GasLimit: core.CalcGasLimit(parent, w.config.GasFloor, w.config.GasCeil), 888 Extra: w.extra, 889 Time: uint64(timestamp), 890 } 891 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 892 if w.isRunning() { 893 if w.coinbase == (common.Address{}) { 894 log.Error("Refusing to mine without etherbase") 895 return 896 } 897 header.Coinbase = w.coinbase 898 } 899 if err := w.engine.Prepare(w.chain, header); err != nil { 900 log.Error("Failed to prepare header for mining", "err", err) 901 return 902 } 903 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 904 if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { 905 // Check whether the block is among the fork extra-override range 906 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 907 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 908 // Depending whether we support or oppose the fork, override differently 909 if w.chainConfig.DAOForkSupport { 910 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 911 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 912 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 913 } 914 } 915 } 916 // Could potentially happen if starting to mine in an odd state. 917 err := w.makeCurrent(parent, header) 918 if err != nil { 919 log.Error("Failed to create mining context", "err", err) 920 return 921 } 922 // Create the current work task and check any fork transitions needed 923 env := w.current 924 if w.isPoSA { 925 if err := w.posa.PreHandle(w.chain, header, env.state); err != nil { 926 log.Error("Failed to apply system contract upgrade", "err", err) 927 return 928 } 929 } 930 // Accumulate the uncles for the current block 931 uncles := make([]*types.Header, 0, 2) 932 commitUncles := func(blocks map[common.Hash]*types.Block) { 933 // Clean up stale uncle blocks first 934 for hash, uncle := range blocks { 935 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 936 delete(blocks, hash) 937 } 938 } 939 for hash, uncle := range blocks { 940 if len(uncles) == 2 { 941 break 942 } 943 if err := w.commitUncle(env, uncle.Header()); err != nil { 944 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 945 } else { 946 log.Debug("Committing new uncle to block", "hash", hash) 947 uncles = append(uncles, uncle.Header()) 948 } 949 } 950 } 951 // Prefer to locally generated uncle 952 commitUncles(w.localUncles) 953 commitUncles(w.remoteUncles) 954 955 // Create an empty block based on temporary copied state for 956 // sealing in advance without waiting block execution finished. 957 if !noempty && atomic.LoadUint32(&w.noempty) == 0 { 958 w.commit(uncles, nil, false, tstart) 959 } 960 961 // Fill the block with all available pending transactions. 962 pending, err := w.eth.TxPool().Pending() 963 if err != nil { 964 log.Error("Failed to fetch pending transactions", "err", err) 965 return 966 } 967 // Short circuit if there is no available pending transactions. 968 // But if we disable empty precommit already, ignore it. Since 969 // empty block is necessary to keep the liveness of the network. 970 if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 { 971 w.updateSnapshot() 972 return 973 } 974 // Split the pending transactions into locals and remotes 975 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 976 for _, account := range w.eth.TxPool().Locals() { 977 if txs := remoteTxs[account]; len(txs) > 0 { 978 delete(remoteTxs, account) 979 localTxs[account] = txs 980 } 981 } 982 if len(localTxs) > 0 { 983 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 984 if w.commitTransactions(txs, w.coinbase, interrupt) { 985 return 986 } 987 } 988 if len(remoteTxs) > 0 { 989 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 990 if w.commitTransactions(txs, w.coinbase, interrupt) { 991 return 992 } 993 } 994 w.commit(uncles, w.fullTaskHook, true, tstart) 995 } 996 997 // commit runs any post-transaction state modifications, assembles the final block 998 // and commits new work if consensus engine is running. 999 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 1000 // Deep copy receipts here to avoid interaction between different tasks. 1001 cpyReceipts := copyReceipts(w.current.receipts) 1002 // copy transactions to a new slice to avoid interaction between different tasks. 1003 txs := make([]*types.Transaction, len(w.current.txs)) 1004 copy(txs, w.current.txs) 1005 s := w.current.state.Copy() 1006 block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, txs, uncles, cpyReceipts) 1007 if err != nil { 1008 return err 1009 } 1010 if w.isRunning() { 1011 if interval != nil { 1012 interval() 1013 } 1014 select { 1015 case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: 1016 w.unconfirmed.Shift(block.NumberU64() - 1) 1017 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1018 "uncles", len(uncles), "txs", w.current.tcount, 1019 "gas", block.GasUsed(), "fees", totalFees(block, receipts), 1020 "elapsed", common.PrettyDuration(time.Since(start))) 1021 1022 case <-w.exitCh: 1023 log.Info("Worker has exited") 1024 } 1025 } 1026 if update { 1027 w.updateSnapshot() 1028 } 1029 return nil 1030 } 1031 1032 // copyReceipts makes a deep copy of the given receipts. 1033 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 1034 result := make([]*types.Receipt, len(receipts)) 1035 for i, l := range receipts { 1036 cpy := *l 1037 result[i] = &cpy 1038 } 1039 return result 1040 } 1041 1042 // postSideBlock fires a side chain event, only use it for testing. 1043 func (w *worker) postSideBlock(event core.ChainSideEvent) { 1044 select { 1045 case w.chainSideCh <- event: 1046 case <-w.exitCh: 1047 } 1048 } 1049 1050 // totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order. 1051 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { 1052 if len(block.Transactions()) != len(receipts) { 1053 // for debug 1054 log.Error("transactions len != receipts len", "blockHash", block.Hash().String(), "number", block.Number()) 1055 for i, tx := range block.Transactions() { 1056 js, _ := tx.MarshalJSON() 1057 log.Error("tx", strconv.Itoa(i), string(js)) 1058 } 1059 for i, receipt := range receipts { 1060 js, _ := receipt.MarshalJSON() 1061 log.Error("receipt", strconv.Itoa(i), string(js)) 1062 } 1063 } 1064 feesWei := new(big.Int) 1065 for i, tx := range block.Transactions() { 1066 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 1067 } 1068 return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1069 }