github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/miner/worker.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package miner 18 19 import ( 20 "bytes" 21 "errors" 22 "math/big" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/consensus" 30 "github.com/ethereum/go-ethereum/consensus/misc" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/state" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/log" 36 "github.com/ethereum/go-ethereum/params" 37 ) 38 39 const ( 40 // resultQueueSize is the size of channel listening to sealing result. 41 resultQueueSize = 10 42 43 // txChanSize is the size of channel listening to NewTxsEvent. 44 // The number is referenced from the size of tx pool. 45 txChanSize = 4096 46 47 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 48 chainHeadChanSize = 10 49 50 // chainSideChanSize is the size of channel listening to ChainSideEvent. 51 chainSideChanSize = 10 52 53 // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. 54 resubmitAdjustChanSize = 10 55 56 // miningLogAtDepth is the number of confirmations before logging successful mining. 57 miningLogAtDepth = 7 58 59 // minRecommitInterval is the minimal time interval to recreate the mining block with 60 // any newly arrived transactions. 61 minRecommitInterval = 1 * time.Second 62 63 // maxRecommitInterval is the maximum time interval to recreate the mining block with 64 // any newly arrived transactions. 65 maxRecommitInterval = 15 * time.Second 66 67 // intervalAdjustRatio is the impact a single interval adjustment has on sealing work 68 // resubmitting interval. 69 intervalAdjustRatio = 0.1 70 71 // intervalAdjustBias is applied during the new resubmit interval calculation in favor of 72 // increasing upper limit or decreasing lower limit so that the limit can be reachable. 73 intervalAdjustBias = 200 * 1000.0 * 1000.0 74 75 // staleThreshold is the maximum depth of the acceptable stale block. 76 staleThreshold = 7 77 ) 78 79 // environment is the worker's current environment and holds all of the current state information. 80 type environment struct { 81 signer types.Signer 82 83 state *state.StateDB // apply state changes here 84 ancestors mapset.Set // ancestor set (used for checking uncle parent validity) 85 family mapset.Set // family set (used for checking uncle invalidity) 86 uncles mapset.Set // uncle set 87 tcount int // tx count in cycle 88 gasPool *core.GasPool // available gas used to pack transactions 89 90 header *types.Header 91 txs []*types.Transaction 92 receipts []*types.Receipt 93 } 94 95 // task contains all information for consensus engine sealing and result submitting. 96 type task struct { 97 receipts []*types.Receipt 98 state *state.StateDB 99 block *types.Block 100 createdAt time.Time 101 } 102 103 const ( 104 commitInterruptNone int32 = iota 105 commitInterruptNewHead 106 commitInterruptResubmit 107 ) 108 109 // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. 110 type newWorkReq struct { 111 interrupt *int32 112 noempty bool 113 timestamp int64 114 } 115 116 // intervalAdjust represents a resubmitting interval adjustment. 117 type intervalAdjust struct { 118 ratio float64 119 inc bool 120 } 121 122 // worker is the main object which takes care of submitting new work to consensus engine 123 // and gathering the sealing result. 124 type worker struct { 125 config *params.ChainConfig 126 engine consensus.Engine 127 eth Backend 128 chain *core.BlockChain 129 130 gasFloor uint64 131 gasCeil uint64 132 133 // Subscriptions 134 mux *event.TypeMux 135 txsCh chan core.NewTxsEvent 136 txsSub event.Subscription 137 chainHeadCh chan core.ChainHeadEvent 138 chainHeadSub event.Subscription 139 chainSideCh chan core.ChainSideEvent 140 chainSideSub event.Subscription 141 142 // Channels 143 newWorkCh chan *newWorkReq 144 taskCh chan *task 145 resultCh chan *types.Block 146 startCh chan struct{} 147 exitCh chan struct{} 148 resubmitIntervalCh chan time.Duration 149 resubmitAdjustCh chan *intervalAdjust 150 151 current *environment // An environment for current running cycle. 152 localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. 153 remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. 154 unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. 155 156 mu sync.RWMutex // The lock used to protect the coinbase and extra fields 157 coinbase common.Address 158 extra []byte 159 160 pendingMu sync.RWMutex 161 pendingTasks map[common.Hash]*task 162 163 snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot 164 snapshotBlock *types.Block 165 snapshotState *state.StateDB 166 167 // atomic status counters 168 running int32 // The indicator whether the consensus engine is running or not. 169 newTxs int32 // New arrival transaction count since last sealing work submitting. 170 171 // External functions 172 isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. 173 174 // Test hooks 175 newTaskHook func(*task) // Method to call upon receiving a new sealing task. 176 skipSealHook func(*task) bool // Method to decide whether skipping the sealing. 177 fullTaskHook func() // Method to call before pushing the full sealing task. 178 resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. 179 } 180 181 func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(*types.Block) bool) *worker { 182 worker := &worker{ 183 config: config, 184 engine: engine, 185 eth: eth, 186 mux: mux, 187 chain: eth.BlockChain(), 188 gasFloor: gasFloor, 189 gasCeil: gasCeil, 190 isLocalBlock: isLocalBlock, 191 localUncles: make(map[common.Hash]*types.Block), 192 remoteUncles: make(map[common.Hash]*types.Block), 193 unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), 194 pendingTasks: make(map[common.Hash]*task), 195 txsCh: make(chan core.NewTxsEvent, txChanSize), 196 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 197 chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), 198 newWorkCh: make(chan *newWorkReq), 199 taskCh: make(chan *task), 200 resultCh: make(chan *types.Block, resultQueueSize), 201 exitCh: make(chan struct{}), 202 startCh: make(chan struct{}, 1), 203 resubmitIntervalCh: make(chan time.Duration), 204 resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), 205 } 206 // Subscribe NewTxsEvent for tx pool 207 worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) 208 // Subscribe events for blockchain 209 worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) 210 worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) 211 212 // Sanitize recommit interval if the user-specified one is too short. 213 if recommit < minRecommitInterval { 214 log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) 215 recommit = minRecommitInterval 216 } 217 218 go worker.mainLoop() 219 go worker.newWorkLoop(recommit) 220 go worker.resultLoop() 221 go worker.taskLoop() 222 223 // Submit first work to initialize pending state. 224 worker.startCh <- struct{}{} 225 226 return worker 227 } 228 229 // setEtherbase sets the etherbase used to initialize the block coinbase field. 230 func (w *worker) setEtherbase(addr common.Address) { 231 w.mu.Lock() 232 defer w.mu.Unlock() 233 w.coinbase = addr 234 } 235 236 // setExtra sets the content used to initialize the block extra field. 237 func (w *worker) setExtra(extra []byte) { 238 w.mu.Lock() 239 defer w.mu.Unlock() 240 w.extra = extra 241 } 242 243 // setRecommitInterval updates the interval for miner sealing work recommitting. 244 func (w *worker) setRecommitInterval(interval time.Duration) { 245 w.resubmitIntervalCh <- interval 246 } 247 248 // pending returns the pending state and corresponding block. 249 func (w *worker) pending() (*types.Block, *state.StateDB) { 250 // return a snapshot to avoid contention on currentMu mutex 251 w.snapshotMu.RLock() 252 defer w.snapshotMu.RUnlock() 253 if w.snapshotState == nil { 254 return nil, nil 255 } 256 return w.snapshotBlock, w.snapshotState.Copy() 257 } 258 259 // pendingBlock returns pending block. 260 func (w *worker) pendingBlock() *types.Block { 261 // return a snapshot to avoid contention on currentMu mutex 262 w.snapshotMu.RLock() 263 defer w.snapshotMu.RUnlock() 264 return w.snapshotBlock 265 } 266 267 // start sets the running status as 1 and triggers new work submitting. 268 func (w *worker) start() { 269 atomic.StoreInt32(&w.running, 1) 270 w.startCh <- struct{}{} 271 } 272 273 // stop sets the running status as 0. 274 func (w *worker) stop() { 275 atomic.StoreInt32(&w.running, 0) 276 } 277 278 // isRunning returns an indicator whether worker is running or not. 279 func (w *worker) isRunning() bool { 280 return atomic.LoadInt32(&w.running) == 1 281 } 282 283 // close terminates all background threads maintained by the worker. 284 // Note the worker does not support being closed multiple times. 285 func (w *worker) close() { 286 close(w.exitCh) 287 } 288 289 // newWorkLoop is a standalone goroutine to submit new mining work upon received events. 290 func (w *worker) newWorkLoop(recommit time.Duration) { 291 var ( 292 interrupt *int32 293 minRecommit = recommit // minimal resubmit interval specified by user. 294 timestamp int64 // timestamp for each round of mining. 295 ) 296 297 timer := time.NewTimer(0) 298 <-timer.C // discard the initial tick 299 300 // commit aborts in-flight transaction execution with given signal and resubmits a new one. 301 commit := func(noempty bool, s int32) { 302 if interrupt != nil { 303 atomic.StoreInt32(interrupt, s) 304 } 305 interrupt = new(int32) 306 w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} 307 timer.Reset(recommit) 308 atomic.StoreInt32(&w.newTxs, 0) 309 } 310 // recalcRecommit recalculates the resubmitting interval upon feedback. 311 recalcRecommit := func(target float64, inc bool) { 312 var ( 313 prev = float64(recommit.Nanoseconds()) 314 next float64 315 ) 316 if inc { 317 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) 318 // Recap if interval is larger than the maximum time interval 319 if next > float64(maxRecommitInterval.Nanoseconds()) { 320 next = float64(maxRecommitInterval.Nanoseconds()) 321 } 322 } else { 323 next = prev*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) 324 // Recap if interval is less than the user specified minimum 325 if next < float64(minRecommit.Nanoseconds()) { 326 next = float64(minRecommit.Nanoseconds()) 327 } 328 } 329 recommit = time.Duration(int64(next)) 330 } 331 // clearPending cleans the stale pending tasks. 332 clearPending := func(number uint64) { 333 w.pendingMu.Lock() 334 for h, t := range w.pendingTasks { 335 if t.block.NumberU64()+staleThreshold <= number { 336 delete(w.pendingTasks, h) 337 } 338 } 339 w.pendingMu.Unlock() 340 } 341 342 for { 343 select { 344 case <-w.startCh: 345 clearPending(w.chain.CurrentBlock().NumberU64()) 346 timestamp = time.Now().Unix() 347 commit(false, commitInterruptNewHead) 348 349 case head := <-w.chainHeadCh: 350 clearPending(head.Block.NumberU64()) 351 timestamp = time.Now().Unix() 352 commit(false, commitInterruptNewHead) 353 354 case <-timer.C: 355 // If mining is running resubmit a new work cycle periodically to pull in 356 // higher priced transactions. Disable this overhead for pending blocks. 357 if w.isRunning() && (w.config.Clique == nil || w.config.Clique.Period > 0) { 358 // Short circuit if no new transaction arrives. 359 if atomic.LoadInt32(&w.newTxs) == 0 { 360 timer.Reset(recommit) 361 continue 362 } 363 commit(true, commitInterruptResubmit) 364 } 365 366 case interval := <-w.resubmitIntervalCh: 367 // Adjust resubmit interval explicitly by user. 368 if interval < minRecommitInterval { 369 log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) 370 interval = minRecommitInterval 371 } 372 log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) 373 minRecommit, recommit = interval, interval 374 375 if w.resubmitHook != nil { 376 w.resubmitHook(minRecommit, recommit) 377 } 378 379 case adjust := <-w.resubmitAdjustCh: 380 // Adjust resubmit interval by feedback. 381 if adjust.inc { 382 before := recommit 383 recalcRecommit(float64(recommit.Nanoseconds())/adjust.ratio, true) 384 log.Trace("Increase miner recommit interval", "from", before, "to", recommit) 385 } else { 386 before := recommit 387 recalcRecommit(float64(minRecommit.Nanoseconds()), false) 388 log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) 389 } 390 391 if w.resubmitHook != nil { 392 w.resubmitHook(minRecommit, recommit) 393 } 394 395 case <-w.exitCh: 396 return 397 } 398 } 399 } 400 401 // mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. 402 func (w *worker) mainLoop() { 403 defer w.txsSub.Unsubscribe() 404 defer w.chainHeadSub.Unsubscribe() 405 defer w.chainSideSub.Unsubscribe() 406 407 for { 408 select { 409 case req := <-w.newWorkCh: 410 w.commitNewWork(req.interrupt, req.noempty, req.timestamp) 411 412 case ev := <-w.chainSideCh: 413 // Short circuit for duplicate side blocks 414 if _, exist := w.localUncles[ev.Block.Hash()]; exist { 415 continue 416 } 417 if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { 418 continue 419 } 420 // Add side block to possible uncle block set depending on the author. 421 if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { 422 w.localUncles[ev.Block.Hash()] = ev.Block 423 } else { 424 w.remoteUncles[ev.Block.Hash()] = ev.Block 425 } 426 // If our mining block contains less than 2 uncle blocks, 427 // add the new uncle block if valid and regenerate a mining block. 428 if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { 429 start := time.Now() 430 if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { 431 var uncles []*types.Header 432 w.current.uncles.Each(func(item interface{}) bool { 433 hash, ok := item.(common.Hash) 434 if !ok { 435 return false 436 } 437 uncle, exist := w.localUncles[hash] 438 if !exist { 439 uncle, exist = w.remoteUncles[hash] 440 } 441 if !exist { 442 return false 443 } 444 uncles = append(uncles, uncle.Header()) 445 return false 446 }) 447 w.commit(uncles, nil, true, start) 448 } 449 } 450 451 case ev := <-w.txsCh: 452 // Apply transactions to the pending state if we're not mining. 453 // 454 // Note all transactions received may not be continuous with transactions 455 // already included in the current mining block. These transactions will 456 // be automatically eliminated. 457 if !w.isRunning() && w.current != nil { 458 w.mu.RLock() 459 coinbase := w.coinbase 460 w.mu.RUnlock() 461 462 txs := make(map[common.Address]types.Transactions) 463 num := w.chain.CurrentHeader().Number 464 num_now := num.Add(num, common.Big1) 465 idx := w.chain.GetFutureIDX(num_now.Uint64()) 466 467 for ii := uint(0); ii < *idx ; ii++ { 468 tx := w.chain.GetFutureTxByContract(num_now.Uint64(), ii) 469 txs[*tx.FutureFrom()] =append(txs[*tx.FutureFrom()], tx) 470 471 log.Info("CommitNewWork future tx added success") 472 // 이게 실행이 안되는 이유 찾기 473 } 474 for _, tx := range ev.Txs { 475 acc, _ := types.Sender(w.current.signer, tx) 476 txs[acc] = append(txs[acc], tx) 477 } 478 txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs) 479 480 w.commitTransactions(txset, coinbase, nil) 481 w.updateSnapshot() 482 } else { 483 // If we're mining, but nothing is being processed, wake on new transactions 484 if w.config.Clique != nil && w.config.Clique.Period == 0 { 485 w.commitNewWork(nil, false, time.Now().Unix()) 486 } 487 } 488 atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) 489 490 // System stopped 491 case <-w.exitCh: 492 return 493 case <-w.txsSub.Err(): 494 return 495 case <-w.chainHeadSub.Err(): 496 return 497 case <-w.chainSideSub.Err(): 498 return 499 } 500 } 501 } 502 503 // taskLoop is a standalone goroutine to fetch sealing task from the generator and 504 // push them to consensus engine. 505 func (w *worker) taskLoop() { 506 var ( 507 stopCh chan struct{} 508 prev common.Hash 509 ) 510 511 // interrupt aborts the in-flight sealing task. 512 interrupt := func() { 513 if stopCh != nil { 514 close(stopCh) 515 stopCh = nil 516 } 517 } 518 for { 519 select { 520 case task := <-w.taskCh: 521 if w.newTaskHook != nil { 522 w.newTaskHook(task) 523 } 524 // Reject duplicate sealing work due to resubmitting. 525 sealHash := w.engine.SealHash(task.block.Header()) 526 if sealHash == prev { 527 continue 528 } 529 // Interrupt previous sealing operation 530 interrupt() 531 stopCh, prev = make(chan struct{}), sealHash 532 533 if w.skipSealHook != nil && w.skipSealHook(task) { 534 continue 535 } 536 w.pendingMu.Lock() 537 w.pendingTasks[w.engine.SealHash(task.block.Header())] = task 538 w.pendingMu.Unlock() 539 540 if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { 541 log.Warn("Block sealing failed", "err", err) 542 } 543 case <-w.exitCh: 544 interrupt() 545 return 546 } 547 } 548 } 549 550 // resultLoop is a standalone goroutine to handle sealing result submitting 551 // and flush relative data to the database. 552 func (w *worker) resultLoop() { 553 for { 554 select { 555 case block := <-w.resultCh: 556 // Short circuit when receiving empty result. 557 if block == nil { 558 continue 559 } 560 // Short circuit when receiving duplicate result caused by resubmitting. 561 if w.chain.HasBlock(block.Hash(), block.NumberU64()) { 562 continue 563 } 564 var ( 565 sealhash = w.engine.SealHash(block.Header()) 566 hash = block.Hash() 567 ) 568 w.pendingMu.RLock() 569 task, exist := w.pendingTasks[sealhash] 570 w.pendingMu.RUnlock() 571 if !exist { 572 log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) 573 continue 574 } 575 // Different block could share same sealhash, deep copy here to prevent write-write conflict. 576 var ( 577 receipts = make([]*types.Receipt, len(task.receipts)) 578 logs []*types.Log 579 ) 580 for i, receipt := range task.receipts { 581 // add block location fields 582 receipt.BlockHash = hash 583 receipt.BlockNumber = block.Number() 584 receipt.TransactionIndex = uint(i) 585 586 receipts[i] = new(types.Receipt) 587 *receipts[i] = *receipt 588 // Update the block hash in all logs since it is now available and not when the 589 // receipt/log of individual transactions were created. 590 for _, log := range receipt.Logs { 591 log.BlockHash = hash 592 } 593 logs = append(logs, receipt.Logs...) 594 } 595 // Commit block and state to database. 596 stat, err := w.chain.WriteBlockWithState(block, receipts, task.state) 597 if err != nil { 598 log.Error("Failed writing block to chain", "err", err) 599 continue 600 } 601 log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, 602 "elapsed", common.PrettyDuration(time.Since(task.createdAt))) 603 604 // Broadcast the block and announce chain insertion event 605 w.mux.Post(core.NewMinedBlockEvent{Block: block}) 606 607 var events []interface{} 608 switch stat { 609 case core.CanonStatTy: 610 events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) 611 events = append(events, core.ChainHeadEvent{Block: block}) 612 case core.SideStatTy: 613 events = append(events, core.ChainSideEvent{Block: block}) 614 } 615 w.chain.PostChainEvents(events, logs) 616 617 // Insert the block into the set of pending ones to resultLoop for confirmations 618 w.unconfirmed.Insert(block.NumberU64(), block.Hash()) 619 620 case <-w.exitCh: 621 return 622 } 623 } 624 } 625 626 // makeCurrent creates a new environment for the current cycle. 627 func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { 628 state, err := w.chain.StateAt(parent.Root()) 629 if err != nil { 630 return err 631 } 632 env := &environment{ 633 signer: types.NewEIP155Signer(w.config.ChainID), 634 state: state, 635 ancestors: mapset.NewSet(), 636 family: mapset.NewSet(), 637 uncles: mapset.NewSet(), 638 header: header, 639 } 640 641 // when 08 is processed ancestors contain 07 (quick block) 642 for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { 643 for _, uncle := range ancestor.Uncles() { 644 env.family.Add(uncle.Hash()) 645 } 646 env.family.Add(ancestor.Hash()) 647 env.ancestors.Add(ancestor.Hash()) 648 } 649 650 // Keep track of transactions which return errors so they can be removed 651 env.tcount = 0 652 w.current = env 653 return nil 654 } 655 656 // commitUncle adds the given block to uncle block set, returns error if failed to add. 657 func (w *worker) commitUncle(env *environment, uncle *types.Header) error { 658 hash := uncle.Hash() 659 if env.uncles.Contains(hash) { 660 return errors.New("uncle not unique") 661 } 662 if env.header.ParentHash == uncle.ParentHash { 663 return errors.New("uncle is sibling") 664 } 665 if !env.ancestors.Contains(uncle.ParentHash) { 666 return errors.New("uncle's parent unknown") 667 } 668 if env.family.Contains(hash) { 669 return errors.New("uncle already included") 670 } 671 env.uncles.Add(uncle.Hash()) 672 return nil 673 } 674 675 // updateSnapshot updates pending snapshot block and state. 676 // Note this function assumes the current variable is thread safe. 677 func (w *worker) updateSnapshot() { 678 w.snapshotMu.Lock() 679 defer w.snapshotMu.Unlock() 680 681 var uncles []*types.Header 682 w.current.uncles.Each(func(item interface{}) bool { 683 hash, ok := item.(common.Hash) 684 if !ok { 685 return false 686 } 687 uncle, exist := w.localUncles[hash] 688 if !exist { 689 uncle, exist = w.remoteUncles[hash] 690 } 691 if !exist { 692 return false 693 } 694 uncles = append(uncles, uncle.Header()) 695 return false 696 }) 697 698 w.snapshotBlock = types.NewBlock( 699 w.current.header, 700 w.current.txs, 701 uncles, 702 w.current.receipts, 703 ) 704 705 w.snapshotState = w.current.state.Copy() 706 } 707 708 func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { 709 snap := w.current.state.Snapshot() 710 711 receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) 712 log.Info("future transaction commit 712") 713 if err != nil { 714 log.Info("revert 714","err",err) 715 w.current.state.RevertToSnapshot(snap) 716 return nil, err 717 } 718 log.Info("future transaction commit 717") 719 w.current.txs = append(w.current.txs, tx) 720 w.current.receipts = append(w.current.receipts, receipt) 721 722 if (tx.IsFutureTx()){ 723 log.Info("future transaction commit 720") 724 } 725 return receipt.Logs, nil 726 } 727 728 func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { 729 // Short circuit if current is nil 730 if w.current == nil { 731 return true 732 } 733 734 if w.current.gasPool == nil { 735 w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) 736 } 737 738 var coalescedLogs []*types.Log 739 740 for { 741 // In the following three cases, we will interrupt the execution of the transaction. 742 // (1) new head block event arrival, the interrupt signal is 1 743 // (2) worker start or restart, the interrupt signal is 1 744 // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. 745 // For the first two cases, the semi-finished work will be discarded. 746 // For the third case, the semi-finished work will be submitted to the consensus engine. 747 if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { 748 // Notify resubmit loop to increase resubmitting interval due to too frequent commits. 749 if atomic.LoadInt32(interrupt) == commitInterruptResubmit { 750 ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) 751 if ratio < 0.1 { 752 ratio = 0.1 753 } 754 w.resubmitAdjustCh <- &intervalAdjust{ 755 ratio: ratio, 756 inc: true, 757 } 758 } 759 return atomic.LoadInt32(interrupt) == commitInterruptNewHead 760 } 761 // If we don't have enough gas for any further transactions then we're done 762 if w.current.gasPool.Gas() < params.TxGas { 763 log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) 764 break 765 } 766 // Retrieve the next transaction and abort if all done 767 tx := txs.Peek() 768 if tx == nil { 769 break 770 } 771 // Error may be ignored here. The error has already been checked 772 // during transaction acceptance is the transaction pool. 773 // 774 // We use the eip155 signer regardless of the current hf. 775 776 // Future tx => 무시 777 from :=new(common.Address) 778 if(tx.IsFutureTx()){ 779 780 log.Info("Commit 777 is future tx") 781 from = tx.FutureFrom() 782 }else{ 783 from2, _ := types.Sender(w.current.signer, tx) 784 from= &from2 785 if tx.Protected() && !w.config.IsEIP155(w.current.header.Number) { 786 log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.config.EIP155Block) 787 788 txs.Pop() 789 continue 790 } 791 } 792 // Check whether the tx is replay protected. If we're not in the EIP155 hf 793 // phase, start ignoring the sender until we do. 794 795 // Start executing the transaction 796 w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) 797 // 여기서 돈이 잘빠져나가게 해야... 798 logs, err := w.commitTransaction(tx, coinbase) 799 switch err { 800 case core.ErrGasLimitReached: 801 // Pop the current out-of-gas transaction without shifting in the next from the account 802 log.Trace("Gas limit exceeded for current block", "sender", from) 803 txs.Pop() 804 805 case core.ErrNonceTooLow: 806 // New head notification data race between the transaction pool and miner, shift 807 log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) 808 txs.Shift() 809 810 case core.ErrNonceTooHigh: 811 // Reorg notification data race between the transaction pool and miner, skip account = 812 log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) 813 txs.Pop() 814 815 case nil: 816 // Everything ok, collect the logs and shift in the next transaction from the same account 817 log.Info("err nil") 818 819 coalescedLogs = append(coalescedLogs, logs...) 820 w.current.tcount++ 821 txs.Shift() 822 823 default: 824 // Strange error, discard the transaction and get the next in line (note, the 825 // nonce-too-high clause will prevent us from executing in vain). 826 log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) 827 txs.Shift() 828 } 829 } 830 831 if !w.isRunning() && len(coalescedLogs) > 0 { 832 // We don't push the pendingLogsEvent while we are mining. The reason is that 833 // when we are mining, the worker will regenerate a mining block every 3 seconds. 834 // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. 835 836 // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined 837 // logs by filling in the block hash when the block was mined by the local miner. This can 838 // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. 839 cpy := make([]*types.Log, len(coalescedLogs)) 840 for i, l := range coalescedLogs { 841 cpy[i] = new(types.Log) 842 *cpy[i] = *l 843 } 844 go w.mux.Post(core.PendingLogsEvent{Logs: cpy}) 845 } 846 // Notify resubmit loop to decrease resubmitting interval if current interval is larger 847 // than the user-specified one. 848 if interrupt != nil { 849 w.resubmitAdjustCh <- &intervalAdjust{inc: false} 850 } 851 return false 852 } 853 854 // commitNewWork generates several new sealing tasks based on the parent block. 855 func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { 856 w.mu.RLock() 857 defer w.mu.RUnlock() 858 859 tstart := time.Now() 860 parent := w.chain.CurrentBlock() 861 862 if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 { 863 timestamp = parent.Time().Int64() + 1 864 } 865 // this will ensure we're not going off too far in the future 866 if now := time.Now().Unix(); timestamp > now+1 { 867 wait := time.Duration(timestamp-now) * time.Second 868 log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) 869 time.Sleep(wait) 870 } 871 872 num := parent.Number() 873 num_now := num.Add(num, common.Big1) 874 header := &types.Header{ 875 ParentHash: parent.Hash(), 876 Number: num_now, 877 GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil), 878 Extra: w.extra, 879 Time: big.NewInt(timestamp), 880 } 881 // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) 882 if w.isRunning() { 883 if w.coinbase == (common.Address{}) { 884 log.Error("Refusing to mine without etherbase") 885 return 886 } 887 header.Coinbase = w.coinbase 888 } 889 if err := w.engine.Prepare(w.chain, header); err != nil { 890 log.Error("Failed to prepare header for mining", "err", err) 891 return 892 } 893 // If we are care about TheDAO hard-fork check whether to override the extra-data or not 894 if daoBlock := w.config.DAOForkBlock; daoBlock != nil { 895 // Check whether the block is among the fork extra-override range 896 limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) 897 if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { 898 // Depending whether we support or oppose the fork, override differently 899 if w.config.DAOForkSupport { 900 header.Extra = common.CopyBytes(params.DAOForkBlockExtra) 901 } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { 902 header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data 903 } 904 } 905 } 906 // Could potentially happen if starting to mine in an odd state. 907 err := w.makeCurrent(parent, header) 908 if err != nil { 909 log.Error("Failed to create mining context", "err", err) 910 return 911 } 912 // Create the current work task and check any fork transitions needed 913 env := w.current 914 if w.config.DAOForkSupport && w.config.DAOForkBlock != nil && w.config.DAOForkBlock.Cmp(header.Number) == 0 { 915 misc.ApplyDAOHardFork(env.state) 916 } 917 // Accumulate the uncles for the current block 918 uncles := make([]*types.Header, 0, 2) 919 commitUncles := func(blocks map[common.Hash]*types.Block) { 920 // Clean up stale uncle blocks first 921 for hash, uncle := range blocks { 922 if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { 923 delete(blocks, hash) 924 } 925 } 926 for hash, uncle := range blocks { 927 if len(uncles) == 2 { 928 break 929 } 930 if err := w.commitUncle(env, uncle.Header()); err != nil { 931 log.Trace("Possible uncle rejected", "hash", hash, "reason", err) 932 } else { 933 log.Debug("Committing new uncle to block", "hash", hash) 934 uncles = append(uncles, uncle.Header()) 935 } 936 } 937 } 938 // Prefer to locally generated uncle 939 commitUncles(w.localUncles) 940 commitUncles(w.remoteUncles) 941 942 if !noempty { 943 // Create an empty block based on temporary copied state for sealing in advance without waiting block 944 // execution finished. 945 w.commit(uncles, nil, false, tstart) 946 } 947 948 // Fill the block with all available pending transactions. 949 pending, err := w.eth.TxPool().Pending() 950 if err != nil { 951 log.Error("Failed to fetch pending transactions", "err", err) 952 return 953 } 954 // Short circuit if there is no available pending transactions 955 956 // Split the pending transactions into locals and remotes 957 localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending 958 959 // contract 별로 txs 모음 추출 960 961 idx := w.chain.GetFutureIDX(num_now.Uint64()) 962 if len(pending) == 0 && *idx == 0{ 963 w.updateSnapshot() 964 return 965 } 966 for ii := uint(0); ii < *idx ; ii++ { 967 tx := w.chain.GetFutureTxByContract(num_now.Uint64(), ii) 968 localTxs[*tx.FutureFrom()] =append(localTxs[*tx.FutureFrom()], tx) 969 970 log.Info("CommitNewWork future tx added success") 971 // 이게 실행이 안되는 이유 찾기 972 } 973 974 975 for _, account := range w.eth.TxPool().Locals() { 976 if txs := remoteTxs[account]; len(txs) > 0 { 977 delete(remoteTxs, account) 978 localTxs[account] = txs 979 // localTx에 추가하기 980 } 981 } 982 // localTx account에 컨트랙트에서 돈빠지게 만들기 983 if len(localTxs) > 0 { 984 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) 985 // 위의 함수 안에서 986 if w.commitTransactions(txs, w.coinbase, interrupt) { 987 return 988 } 989 } 990 if len(remoteTxs) > 0 { 991 txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs) 992 if w.commitTransactions(txs, w.coinbase, interrupt) { 993 return 994 } 995 } 996 log.Info("CommitNewWork 997") 997 w.commit(uncles, w.fullTaskHook, true, tstart) 998 } 999 1000 // commit runs any post-transaction state modifications, assembles the final block 1001 // and commits new work if consensus engine is running. 1002 func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { 1003 // Deep copy receipts here to avoid interaction between different tasks. 1004 receipts := make([]*types.Receipt, len(w.current.receipts)) 1005 for i, l := range w.current.receipts { 1006 receipts[i] = new(types.Receipt) 1007 *receipts[i] = *l 1008 } 1009 s := w.current.state.Copy() 1010 log.Info("Commit 1010") 1011 block, err := w.engine.Finalize(w.chain, w.current.header, s, w.current.txs, uncles, w.current.receipts) 1012 if err != nil { 1013 1014 log.Info("Commit 1014 err","err",err) 1015 return err 1016 } 1017 if w.isRunning() { 1018 if interval != nil { 1019 interval() 1020 } 1021 select { 1022 case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: 1023 w.unconfirmed.Shift(block.NumberU64() - 1) 1024 1025 feesWei := new(big.Int) 1026 for i, tx := range block.Transactions() { 1027 if(tx.IsFutureTx()){ 1028 log.Info("future added before commit 1018") 1029 } 1030 1031 feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) 1032 } 1033 feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) 1034 1035 log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), 1036 "uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start))) 1037 1038 case <-w.exitCh: 1039 log.Info("Worker has exited") 1040 } 1041 } 1042 if update { 1043 w.updateSnapshot() 1044 } 1045 return nil 1046 }