github.com/core-coin/go-core/v2@v2.1.9/miner/worker.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "errors" 21 "math/big" 22 "sync" 23 "sync/atomic" 24 "time" 25 26 mapset "github.com/deckarep/golang-set" 27 28 "github.com/core-coin/go-core/v2/common" 29 "github.com/core-coin/go-core/v2/consensus" 30 "github.com/core-coin/go-core/v2/core" 31 "github.com/core-coin/go-core/v2/core/state" 32 "github.com/core-coin/go-core/v2/core/types" 33 "github.com/core-coin/go-core/v2/event" 34 "github.com/core-coin/go-core/v2/log" 35 "github.com/core-coin/go-core/v2/params" 36 "github.com/core-coin/go-core/v2/trie" 37 ) 38 39 const ( 40 // resultQueueSize is the size of channel listening to sealing result. 41 resultQueueSize = 10 42 43 // txChanSize is the size of channel listening to NewTxsEvent. 44 // The number is referenced from the size of tx pool. 45 txChanSize = 4096 46 47 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 48 chainHeadChanSize = 10 49 50 // chainSideChanSize is the size of channel listening to ChainSideEvent. 51 chainSideChanSize = 10 52 53 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 54 resubmitAdjustChanSize = 10 55 56 // miningLogAtDepth is the number of confirmations before logging successful mining. 57 miningLogAtDepth = 7 58 59 // minRecommitInterval is the minimal time interval to recreate the mining block with 60 // any newly arrived transactions. 61 minRecommitInterval = 1 * time.Second 62 63 // maxRecommitInterval is the maximum time interval to recreate the mining block with 64 // any newly arrived transactions. 65 maxRecommitInterval = 15 * time.Second 66 67 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 68 // resubmitting interval. 69 intervalAdjustRatio = 0.1 70 71 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 72 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 73 intervalAdjustBias = 200 * 1000.0 * 1000.0 74 75 // staleThreshold is the maximum depth of the acceptable stale block. 76 staleThreshold = 7 77 ) 78 79 // environment is the worker's current environment and holds all of the current state information. 80 type environment struct { 81 signer types.Signer 82 83 state *state.StateDB // apply state changes here 84 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 85 family mapset.Set // family set (used for checking uncle invalidity) 86 uncles mapset.Set // uncle set 87 tcount int // tx count in cycle 88 energyPool *core.EnergyPool // available energy used to pack transactions 89 90 header *types.Header 91 txs []*types.Transaction 92 receipts []*types.Receipt 93 } 94 95 // task contains all information for consensus engine sealing and result submitting. 96 type task struct { 97 receipts []*types.Receipt 98 state *state.StateDB 99 block *types.Block 100 createdAt time.Time 101 } 102 103 const ( 104 commitInterruptNone int32 = iota 105 commitInterruptNewHead 106 commitInterruptResubmit 107 ) 108 109 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 110 type newWorkReq struct { 111 interrupt *int32 112 noempty bool 113 timestamp int64 114 } 115 116 // intervalAdjust represents a resubmitting interval adjustment. 117 type intervalAdjust struct { 118 ratio float64 119 inc bool 120 } 121 122 // worker is the main object which takes care of submitting new work to consensus engine 123 // and gathering the sealing result. 124 type worker struct { 125 config *Config 126 chainConfig *params.ChainConfig 127 engine consensus.Engine 128 xcb Backend 129 chain *core.BlockChain 130 131 // Feeds 132 pendingLogsFeed event.Feed 133 134 // Subscriptions 135 mux *event.TypeMux 136 txsCh chan core.NewTxsEvent 137 txsSub event.Subscription 138 chainHeadCh chan core.ChainHeadEvent 139 chainHeadSub event.Subscription 140 chainSideCh chan core.ChainSideEvent 141 chainSideSub event.Subscription 142 143 // Channels 144 newWorkCh chan *newWorkReq 145 taskCh chan *task 146 resultCh chan *types.Block 147 startCh chan struct{} 148 exitCh chan struct{} 149 resubmitIntervalCh chan time.Duration 150 resubmitAdjustCh chan *intervalAdjust 151 152 current *environment // An environment for current running cycle. 153 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 154 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 155 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 156 157 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 158 coinbase common.Address 159 extra []byte 160 161 pendingMu sync.RWMutex 162 pendingTasks map[common.Hash]*task 163 164 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 165 snapshotBlock *types.Block 166 snapshotState *state.StateDB 167 168 // atomic status counters 169 running int32 // The indicator whether the consensus engine is running or not. 170 newTxs int32 // New arrival transaction count since last sealing work submitting. 171 172 // noempty is the flag used to control whether the feature of pre-seal empty 173 // block is enabled. The default value is false(pre-seal is enabled by default). 174 // But in some special scenario the consensus engine will seal blocks instantaneously, 175 // in this case this feature will add all empty blocks into canonical chain 176 // non-stop and no real transaction will be included. 177 noempty uint32 178 179 // External functions 180 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 181 182 // Test hooks 183 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 184 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 185 fullTaskHook func() // Method to call before pushing the full sealing task. 186 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 187 } 188 189 func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, xcb Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { 190 worker := &worker{ 191 config: config, 192 chainConfig: chainConfig, 193 engine: engine, 194 xcb: xcb, 195 mux: mux, 196 chain: xcb.BlockChain(), 197 isLocalBlock: isLocalBlock, 198 localUncles: make(map[common.Hash]*types.Block), 199 remoteUncles: make(map[common.Hash]*types.Block), 200 unconfirmed: newUnconfirmedBlocks(xcb.BlockChain(), miningLogAtDepth), 201 pendingTasks: make(map[common.Hash]*task), 202 txsCh: make(chan core.NewTxsEvent, txChanSize), 203 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 204 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 205 newWorkCh: make(chan *newWorkReq), 206 taskCh: make(chan *task), 207 resultCh: make(chan *types.Block, resultQueueSize), 208 exitCh: make(chan struct{}), 209 startCh: make(chan struct{}, 1), 210 resubmitIntervalCh: make(chan time.Duration), 211 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 212 } 213 // Subscribe NewTxsEvent for tx pool 214 worker.txsSub = xcb.TxPool().SubscribeNewTxsEvent(worker.txsCh) 215 // Subscribe events for blockchain 216 worker.chainHeadSub = xcb.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 217 worker.chainSideSub = xcb.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 218 219 // Sanitize recommit interval if the user-specified one is too short. 220 recommit := worker.config.Recommit 221 if recommit < minRecommitInterval { 222 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 223 recommit = minRecommitInterval 224 } 225 226 go worker.mainLoop() 227 go worker.newWorkLoop(recommit) 228 go worker.resultLoop() 229 go worker.taskLoop() 230 231 // Submit first work to initialize pending state. 232 if init { 233 worker.startCh <- struct{}{} 234 } 235 return worker 236 } 237 238 // setCorebase sets the corebase used to initialize the block coinbase field. 239 func (w *worker) setCorebase(addr common.Address) { 240 w.mu.Lock() 241 defer w.mu.Unlock() 242 w.coinbase = addr 243 } 244 245 // setExtra sets the content used to initialize the block extra field. 246 func (w *worker) setExtra(extra []byte) { 247 w.mu.Lock() 248 defer w.mu.Unlock() 249 w.extra = extra 250 } 251 252 // setRecommitInterval updates the interval for miner sealing work recommitting. 253 func (w *worker) setRecommitInterval(interval time.Duration) { 254 w.resubmitIntervalCh <- interval 255 } 256 257 // disablePreseal disables pre-sealing mining feature 258 func (w *worker) disablePreseal() { 259 atomic.StoreUint32(&w.noempty, 1) 260 } 261 262 // enablePreseal enables pre-sealing mining feature 263 func (w *worker) enablePreseal() { 264 atomic.StoreUint32(&w.noempty, 0) 265 } 266 267 // pending returns the pending state and corresponding block. 268 func (w *worker) pending() (*types.Block, *state.StateDB) { 269 // return a snapshot to avoid contention on currentMu mutex 270 w.snapshotMu.RLock() 271 defer w.snapshotMu.RUnlock() 272 if w.snapshotState == nil { 273 return nil, nil 274 } 275 return w.snapshotBlock, w.snapshotState.Copy() 276 } 277 278 // pendingBlock returns pending block. 279 func (w *worker) pendingBlock() *types.Block { 280 // return a snapshot to avoid contention on currentMu mutex 281 w.snapshotMu.RLock() 282 defer w.snapshotMu.RUnlock() 283 return w.snapshotBlock 284 } 285 286 // start sets the running status as 1 and triggers new work submitting. 287 func (w *worker) start() { 288 atomic.StoreInt32(&w.running, 1) 289 w.startCh <- struct{}{} 290 } 291 292 // stop sets the running status as 0. 293 func (w *worker) stop() { 294 atomic.StoreInt32(&w.running, 0) 295 } 296 297 // isRunning returns an indicator whether worker is running or not. 298 func (w *worker) isRunning() bool { 299 return atomic.LoadInt32(&w.running) == 1 300 } 301 302 // close terminates all background threads maintained by the worker. 303 // Note the worker does not support being closed multiple times. 304 func (w *worker) close() { 305 atomic.StoreInt32(&w.running, 0) 306 close(w.exitCh) 307 } 308 309 // recalcRecommit recalculates the resubmitting interval upon feedback. 310 func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { 311 var ( 312 prevF = float64(prev.Nanoseconds()) 313 next float64 314 ) 315 if inc { 316 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 317 max := float64(maxRecommitInterval.Nanoseconds()) 318 if next > max { 319 next = max 320 } 321 } else { 322 next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 323 min := float64(minRecommit.Nanoseconds()) 324 if next < min { 325 next = min 326 } 327 } 328 return time.Duration(int64(next)) 329 } 330 331 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 332 func (w *worker) newWorkLoop(recommit time.Duration) { 333 var ( 334 interrupt *int32 335 minRecommit = recommit // minimal resubmit interval specified by user. 336 timestamp int64 // timestamp for each round of mining. 337 ) 338 339 timer := time.NewTimer(0) 340 defer timer.Stop() 341 <-timer.C // discard the initial tick 342 343 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 344 commit := func(noempty bool, s int32) { 345 if interrupt != nil { 346 atomic.StoreInt32(interrupt, s) 347 } 348 interrupt = new(int32) 349 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 350 timer.Reset(recommit) 351 atomic.StoreInt32(&w.newTxs, 0) 352 } 353 // clearPending cleans the stale pending tasks. 354 clearPending := func(number uint64) { 355 w.pendingMu.Lock() 356 for h, t := range w.pendingTasks { 357 if t.block.NumberU64()+staleThreshold <= number { 358 delete(w.pendingTasks, h) 359 } 360 } 361 w.pendingMu.Unlock() 362 } 363 364 for { 365 select { 366 case <-w.startCh: 367 clearPending(w.chain.CurrentBlock().NumberU64()) 368 timestamp = time.Now().Unix() 369 commit(false, commitInterruptNewHead) 370 371 case head := <-w.chainHeadCh: 372 clearPending(head.Block.NumberU64()) 373 timestamp = time.Now().Unix() 374 commit(false, commitInterruptNewHead) 375 376 case <-timer.C: 377 // If mining is running resubmit a new work cycle periodically to pull in 378 // higher priced transactions. Disable this overhead for pending blocks. 379 if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { 380 // Short circuit if no new transaction arrives. 381 if atomic.LoadInt32(&w.newTxs) == 0 { 382 timer.Reset(recommit) 383 continue 384 } 385 commit(true, commitInterruptResubmit) 386 } 387 388 case interval := <-w.resubmitIntervalCh: 389 // Adjust resubmit interval explicitly by user. 390 if interval < minRecommitInterval { 391 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 392 interval = minRecommitInterval 393 } 394 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 395 minRecommit, recommit = interval, interval 396 397 if w.resubmitHook != nil { 398 w.resubmitHook(minRecommit, recommit) 399 } 400 401 case adjust := <-w.resubmitAdjustCh: 402 // Adjust resubmit interval by feedback. 403 if adjust.inc { 404 before := recommit 405 target := float64(recommit.Nanoseconds()) / adjust.ratio 406 recommit = recalcRecommit(minRecommit, recommit, target, true) 407 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 408 } else { 409 before := recommit 410 recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) 411 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 412 } 413 414 if w.resubmitHook != nil { 415 w.resubmitHook(minRecommit, recommit) 416 } 417 418 case <-w.exitCh: 419 return 420 } 421 } 422 } 423 424 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 425 func (w *worker) mainLoop() { 426 defer w.txsSub.Unsubscribe() 427 defer w.chainHeadSub.Unsubscribe() 428 defer w.chainSideSub.Unsubscribe() 429 430 for { 431 select { 432 case req := <-w.newWorkCh: 433 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 434 435 case ev := <-w.chainSideCh: 436 // Short circuit for duplicate side blocks 437 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 438 continue 439 } 440 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 441 continue 442 } 443 // Add side block to possible uncle block set depending on the author. 444 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 445 w.localUncles[ev.Block.Hash()] = ev.Block 446 } else { 447 w.remoteUncles[ev.Block.Hash()] = ev.Block 448 } 449 // If our mining block contains less than 2 uncle blocks, 450 // add the new uncle block if valid and regenerate a mining block. 451 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 452 start := time.Now() 453 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 454 var uncles []*types.Header 455 w.current.uncles.Each(func(item interface{}) bool { 456 hash, ok := item.(common.Hash) 457 if !ok { 458 return false 459 } 460 uncle, exist := w.localUncles[hash] 461 if !exist { 462 uncle, exist = w.remoteUncles[hash] 463 } 464 if !exist { 465 return false 466 } 467 uncles = append(uncles, uncle.Header()) 468 return false 469 }) 470 w.commit(uncles, nil, true, start) 471 } 472 } 473 474 case ev := <-w.txsCh: 475 // Apply transactions to the pending state if we're not mining. 476 // 477 // Note all transactions received may not be continuous with transactions 478 // already included in the current mining block. These transactions will 479 // be automatically eliminated. 480 if !w.isRunning() && w.current != nil { 481 // If block is already full, abort 482 if gp := w.current.energyPool; gp != nil && gp.Energy() < params.TxEnergy { 483 continue 484 } 485 w.mu.RLock() 486 coinbase := w.coinbase 487 w.mu.RUnlock() 488 489 txs := make(map[common.Address]types.Transactions) 490 for _, tx := range ev.Txs { 491 acc, err := types.Sender(w.current.signer, tx) 492 if err != nil { 493 log.Error("Bad recipient address or signature", "err", err) 494 continue 495 } 496 txs[acc] = append(txs[acc], tx) 497 } 498 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 499 tcount := w.current.tcount 500 w.commitTransactions(txset, coinbase, nil) 501 // Only update the snapshot if any new transactons were added 502 // to the pending block 503 if tcount != w.current.tcount { 504 w.updateSnapshot() 505 } 506 } else { 507 // Special case, if the consensus engine is 0 period clique(dev mode), 508 // submit mining work here since all empty submission will be rejected 509 // by clique. Of course the advance sealing(empty submission) is disabled. 510 if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { 511 w.commitNewWork(nil, true, time.Now().Unix()) 512 } 513 } 514 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 515 516 // System stopped 517 case <-w.exitCh: 518 return 519 case <-w.txsSub.Err(): 520 return 521 case <-w.chainHeadSub.Err(): 522 return 523 case <-w.chainSideSub.Err(): 524 return 525 } 526 } 527 } 528 529 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 530 // push them to consensus engine. 531 func (w *worker) taskLoop() { 532 var ( 533 stopCh chan struct{} 534 prev common.Hash 535 ) 536 537 // interrupt aborts the in-flight sealing task. 538 interrupt := func() { 539 if stopCh != nil { 540 close(stopCh) 541 stopCh = nil 542 } 543 } 544 for { 545 select { 546 case task := <-w.taskCh: 547 if w.newTaskHook != nil { 548 w.newTaskHook(task) 549 } 550 // Reject duplicate sealing work due to resubmitting. 551 sealHash := w.engine.SealHash(task.block.Header()) 552 if sealHash == prev { 553 continue 554 } 555 // Interrupt previous sealing operation 556 interrupt() 557 stopCh, prev = make(chan struct{}), sealHash 558 559 if w.skipSealHook != nil && w.skipSealHook(task) { 560 continue 561 } 562 w.pendingMu.Lock() 563 w.pendingTasks[sealHash] = task 564 w.pendingMu.Unlock() 565 566 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 567 log.Warn("Block sealing failed", "err", err) 568 } 569 case <-w.exitCh: 570 interrupt() 571 return 572 } 573 } 574 } 575 576 // resultLoop is a standalone goroutine to handle sealing result submitting 577 // and flush relative data to the database. 578 func (w *worker) resultLoop() { 579 for { 580 select { 581 case block := <-w.resultCh: 582 // Short circuit when receiving empty result. 583 if block == nil { 584 continue 585 } 586 // Short circuit when receiving duplicate result caused by resubmitting. 587 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 588 continue 589 } 590 var ( 591 sealhash = w.engine.SealHash(block.Header()) 592 hash = block.Hash() 593 ) 594 w.pendingMu.RLock() 595 task, exist := w.pendingTasks[sealhash] 596 w.pendingMu.RUnlock() 597 if !exist { 598 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 599 continue 600 } 601 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 602 var ( 603 receipts = make([]*types.Receipt, len(task.receipts)) 604 logs []*types.Log 605 ) 606 for i, receipt := range task.receipts { 607 // add block location fields 608 receipt.BlockHash = hash 609 receipt.BlockNumber = block.Number() 610 receipt.TransactionIndex = uint(i) 611 612 receipts[i] = new(types.Receipt) 613 *receipts[i] = *receipt 614 // Update the block hash in all logs since it is now available and not when the 615 // receipt/log of individual transactions were created. 616 for _, log := range receipt.Logs { 617 log.BlockHash = hash 618 } 619 logs = append(logs, receipt.Logs...) 620 } 621 // Commit block and state to database. 622 _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) 623 if err != nil { 624 log.Error("Failed writing block to chain", "err", err) 625 continue 626 } 627 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 628 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 629 630 // Broadcast the block and announce chain insertion event 631 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 632 633 // Insert the block into the set of pending ones to resultLoop for confirmations 634 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 635 636 case <-w.exitCh: 637 return 638 } 639 } 640 } 641 642 // makeCurrent creates a new environment for the current cycle. 643 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 644 state, err := w.chain.StateAt(parent.Root()) 645 if err != nil { 646 return err 647 } 648 env := &environment{ 649 signer: types.NewNucleusSigner(w.chainConfig.NetworkID), 650 state: state, 651 ancestors: mapset.NewSet(), 652 family: mapset.NewSet(), 653 uncles: mapset.NewSet(), 654 header: header, 655 } 656 657 // when 08 is processed ancestors contain 07 (quick block) 658 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 659 for _, uncle := range ancestor.Uncles() { 660 env.family.Add(uncle.Hash()) 661 } 662 env.family.Add(ancestor.Hash()) 663 env.ancestors.Add(ancestor.Hash()) 664 } 665 666 // Keep track of transactions which return errors so they can be removed 667 env.tcount = 0 668 w.current = env 669 return nil 670 } 671 672 // commitUncle adds the given block to uncle block set, returns error if failed to add. 673 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 674 hash := uncle.Hash() 675 if env.uncles.Contains(hash) { 676 return errors.New("uncle not unique") 677 } 678 if env.header.ParentHash == uncle.ParentHash { 679 return errors.New("uncle is sibling") 680 } 681 if !env.ancestors.Contains(uncle.ParentHash) { 682 return errors.New("uncle's parent unknown") 683 } 684 if env.family.Contains(hash) { 685 return errors.New("uncle already included") 686 } 687 env.uncles.Add(uncle.Hash()) 688 return nil 689 } 690 691 // updateSnapshot updates pending snapshot block and state. 692 // Note this function assumes the current variable is thread safe. 693 func (w *worker) updateSnapshot() { 694 w.snapshotMu.Lock() 695 defer w.snapshotMu.Unlock() 696 697 var uncles []*types.Header 698 w.current.uncles.Each(func(item interface{}) bool { 699 hash, ok := item.(common.Hash) 700 if !ok { 701 return false 702 } 703 uncle, exist := w.localUncles[hash] 704 if !exist { 705 uncle, exist = w.remoteUncles[hash] 706 } 707 if !exist { 708 return false 709 } 710 uncles = append(uncles, uncle.Header()) 711 return false 712 }) 713 714 w.snapshotBlock = types.NewBlock( 715 w.current.header, 716 w.current.txs, 717 uncles, 718 w.current.receipts, 719 new(trie.Trie), 720 ) 721 722 w.snapshotState = w.current.state.Copy() 723 } 724 725 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 726 snap := w.current.state.Snapshot() 727 728 receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.energyPool, w.current.state, w.current.header, tx, &w.current.header.EnergyUsed, *w.chain.GetVMConfig()) 729 if err != nil { 730 w.current.state.RevertToSnapshot(snap) 731 return nil, err 732 } 733 w.current.txs = append(w.current.txs, tx) 734 w.current.receipts = append(w.current.receipts, receipt) 735 736 return receipt.Logs, nil 737 } 738 739 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 740 // Short circuit if current is nil 741 if w.current == nil { 742 return true 743 } 744 745 if w.current.energyPool == nil { 746 w.current.energyPool = new(core.EnergyPool).AddEnergy(w.current.header.EnergyLimit) 747 } 748 749 var coalescedLogs []*types.Log 750 751 for { 752 // In the following three cases, we will interrupt the execution of the transaction. 753 // (1) new head block event arrival, the interrupt signal is 1 754 // (2) worker start or restart, the interrupt signal is 1 755 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 756 // For the first two cases, the semi-finished work will be discarded. 757 // For the third case, the semi-finished work will be submitted to the consensus engine. 758 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 759 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 760 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 761 ratio := float64(w.current.header.EnergyLimit-w.current.energyPool.Energy()) / float64(w.current.header.EnergyLimit) 762 if ratio < 0.1 { 763 ratio = 0.1 764 } 765 w.resubmitAdjustCh <- &intervalAdjust{ 766 ratio: ratio, 767 inc: true, 768 } 769 } 770 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 771 } 772 // If we don't have enough energy for any further transactions then we're done 773 if w.current.energyPool.Energy() < params.TxEnergy { 774 log.Trace("Not enough energy for further transactions", "have", w.current.energyPool, "want", params.TxEnergy) 775 break 776 } 777 // Retrieve the next transaction and abort if all done 778 tx := txs.Peek() 779 if tx == nil { 780 break 781 } 782 783 from, err := types.Sender(w.current.signer, tx) 784 if err != nil { 785 log.Error("Bad transaction recipient or signature", "err", err) 786 txs.Shift() 787 continue 788 } 789 // Start executing the transaction 790 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 791 792 logs, err := w.commitTransaction(tx, coinbase) 793 switch { 794 case errors.Is(err, core.ErrEnergyLimitReached): 795 // Pop the current out-of-energy transaction without shifting in the next from the account 796 log.Trace("Energy limit exceeded for current block", "sender", from) 797 txs.Pop() 798 799 case errors.Is(err, core.ErrNonceTooLow): 800 // New head notification data race between the transaction pool and miner, shift 801 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 802 txs.Shift() 803 804 case errors.Is(err, core.ErrNonceTooHigh): 805 // Reorg notification data race between the transaction pool and miner, skip account = 806 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 807 txs.Pop() 808 809 case errors.Is(err, nil): 810 // Everything ok, collect the logs and shift in the next transaction from the same account 811 coalescedLogs = append(coalescedLogs, logs...) 812 w.current.tcount++ 813 txs.Shift() 814 815 default: 816 // Strange error, discard the transaction and get the next in line (note, the 817 // nonce-too-high clause will prevent us from executing in vain). 818 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 819 txs.Shift() 820 } 821 } 822 823 if !w.isRunning() && len(coalescedLogs) > 0 { 824 // We don't push the pendingLogsEvent while we are mining. The reason is that 825 // when we are mining, the worker will regenerate a mining block every 3 seconds. 826 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 827 828 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 829 // logs by filling in the block hash when the block was mined by the local miner. This can 830 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 831 cpy := make([]*types.Log, len(coalescedLogs)) 832 for i, l := range coalescedLogs { 833 cpy[i] = new(types.Log) 834 *cpy[i] = *l 835 } 836 w.pendingLogsFeed.Send(cpy) 837 } 838 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 839 // than the user-specified one. 840 if interrupt != nil { 841 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 842 } 843 return false 844 } 845 846 // commitNewWork generates several new sealing tasks based on the parent block. 847 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 848 w.mu.RLock() 849 defer w.mu.RUnlock() 850 851 tstart := time.Now() 852 parent := w.chain.CurrentBlock() 853 854 if parent.Time() >= uint64(timestamp) { 855 timestamp = int64(parent.Time() + 1) 856 } 857 // this will ensure we're not going off too far in the future 858 if now := time.Now().Unix(); timestamp > now+1 { 859 wait := time.Duration(timestamp-now) * time.Second 860 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 861 time.Sleep(wait) 862 } 863 864 num := parent.Number() 865 header := &types.Header{ 866 ParentHash: parent.Hash(), 867 Number: num.Add(num, common.Big1), 868 EnergyLimit: core.CalcEnergyLimit(parent, w.config.EnergyFloor, w.config.EnergyCeil), 869 Extra: w.extra, 870 Time: uint64(timestamp), 871 } 872 if w.isRunning() { 873 if w.coinbase == (common.Address{}) { 874 log.Error("Refusing to mine without corebase") 875 return 876 } 877 header.Coinbase = w.coinbase 878 } 879 if err := w.engine.Prepare(w.chain, header); err != nil { 880 log.Error("Failed to prepare header for mining", "err", err) 881 return 882 } 883 // Could potentially happen if starting to mine in an odd state. 884 err := w.makeCurrent(parent, header) 885 if err != nil { 886 log.Error("Failed to create mining context", "err", err) 887 return 888 } 889 // Create the current work task and check any fork transitions needed 890 env := w.current 891 // Accumulate the uncles for the current block 892 uncles := make([]*types.Header, 0, 2) 893 commitUncles := func(blocks map[common.Hash]*types.Block) { 894 // Clean up stale uncle blocks first 895 for hash, uncle := range blocks { 896 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 897 delete(blocks, hash) 898 } 899 } 900 for hash, uncle := range blocks { 901 if len(uncles) == 2 { 902 break 903 } 904 if err := w.commitUncle(env, uncle.Header()); err != nil { 905 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 906 } else { 907 log.Debug("Committing new uncle to block", "hash", hash) 908 uncles = append(uncles, uncle.Header()) 909 } 910 } 911 } 912 // Prefer to locally generated uncle 913 commitUncles(w.localUncles) 914 commitUncles(w.remoteUncles) 915 916 // Create an empty block based on temporary copied state for 917 // sealing in advance without waiting block execution finished. 918 if !noempty && atomic.LoadUint32(&w.noempty) == 0 { 919 w.commit(uncles, nil, false, tstart) 920 } 921 922 // Fill the block with all available pending transactions. 923 pending, err := w.xcb.TxPool().Pending() 924 if err != nil { 925 log.Error("Failed to fetch pending transactions", "err", err) 926 return 927 } 928 // Short circuit if there is no available pending transactions. 929 // But if we disable empty precommit already, ignore it. Since 930 // empty block is necessary to keep the liveness of the network. 931 if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 { 932 w.updateSnapshot() 933 return 934 } 935 // Split the pending transactions into locals and remotes 936 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 937 for _, account := range w.xcb.TxPool().Locals() { 938 if txs := remoteTxs[account]; len(txs) > 0 { 939 delete(remoteTxs, account) 940 localTxs[account] = txs 941 } 942 } 943 if len(localTxs) > 0 { 944 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 945 if w.commitTransactions(txs, w.coinbase, interrupt) { 946 return 947 } 948 } 949 if len(remoteTxs) > 0 { 950 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 951 if w.commitTransactions(txs, w.coinbase, interrupt) { 952 return 953 } 954 } 955 w.commit(uncles, w.fullTaskHook, true, tstart) 956 } 957 958 // commit runs any post-transaction state modifications, assembles the final block 959 // and commits new work if consensus engine is running. 960 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 961 // Deep copy receipts here to avoid interaction between different tasks. 962 receipts := copyReceipts(w.current.receipts) 963 s := w.current.state.Copy() 964 block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts) 965 if err != nil { 966 return err 967 } 968 if w.isRunning() { 969 if interval != nil { 970 interval() 971 } 972 select { 973 case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: 974 w.unconfirmed.Shift(block.NumberU64() - 1) 975 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 976 "uncles", len(uncles), "txs", w.current.tcount, 977 "energy", block.EnergyUsed(), "fees", totalFees(block, receipts), 978 "elapsed", common.PrettyDuration(time.Since(start))) 979 980 case <-w.exitCh: 981 log.Info("Worker has exited") 982 } 983 } 984 if update { 985 w.updateSnapshot() 986 } 987 return nil 988 } 989 990 // copyReceipts makes a deep copy of the given receipts. 991 func copyReceipts(receipts []*types.Receipt) []*types.Receipt { 992 result := make([]*types.Receipt, len(receipts)) 993 for i, l := range receipts { 994 cpy := *l 995 result[i] = &cpy 996 } 997 return result 998 } 999 1000 // postSideBlock fires a side chain event, only use it for testing. 1001 func (w *worker) postSideBlock(event core.ChainSideEvent) { 1002 select { 1003 case w.chainSideCh <- event: 1004 case <-w.exitCh: 1005 } 1006 } 1007 1008 // totalFees computes total consumed fees in XCB. Block transactions and receipts have to have the same order. 1009 func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { 1010 feesWei := new(big.Int) 1011 for i, tx := range block.Transactions() { 1012 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].EnergyUsed), tx.EnergyPrice())) 1013 } 1014 return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Core))) 1015 }