github.com/karalabe/go-ethereum@v0.8.5/eth/block_pool.go (about) 1 package eth 2 3 import ( 4 "bytes" 5 "fmt" 6 "math" 7 "math/big" 8 "math/rand" 9 "sort" 10 "sync" 11 "time" 12 13 "github.com/ethereum/go-ethereum/core/types" 14 "github.com/ethereum/go-ethereum/ethutil" 15 ethlogger "github.com/ethereum/go-ethereum/logger" 16 "github.com/ethereum/go-ethereum/pow" 17 ) 18 19 var poolLogger = ethlogger.NewLogger("Blockpool") 20 21 const ( 22 blockHashesBatchSize = 256 23 blockBatchSize = 64 24 blocksRequestInterval = 500 // ms 25 blocksRequestRepetition = 1 26 blockHashesRequestInterval = 500 // ms 27 blocksRequestMaxIdleRounds = 100 28 blockHashesTimeout = 60 // seconds 29 blocksTimeout = 120 // seconds 30 ) 31 32 type poolNode struct { 33 lock sync.RWMutex 34 hash []byte 35 td *big.Int 36 block *types.Block 37 parent *poolNode 38 peer string 39 blockBy string 40 } 41 42 type poolEntry struct { 43 node *poolNode 44 section *section 45 index int 46 } 47 48 type BlockPool struct { 49 lock sync.RWMutex 50 chainLock sync.RWMutex 51 52 pool map[string]*poolEntry 53 54 peersLock sync.RWMutex 55 peers map[string]*peerInfo 56 peer *peerInfo 57 58 quit chan bool 59 purgeC chan bool 60 flushC chan bool 61 wg sync.WaitGroup 62 procWg sync.WaitGroup 63 running bool 64 65 // the minimal interface with blockchain 66 hasBlock func(hash []byte) bool 67 insertChain func(types.Blocks) error 68 verifyPoW func(pow.Block) bool 69 } 70 71 type peerInfo struct { 72 lock sync.RWMutex 73 74 td *big.Int 75 currentBlockHash []byte 76 currentBlock *types.Block 77 currentBlockC chan *types.Block 78 parentHash []byte 79 headSection *section 80 headSectionC chan *section 81 id string 82 83 requestBlockHashes func([]byte) error 84 requestBlocks func([][]byte) error 85 peerError func(int, string, ...interface{}) 86 87 sections map[string]*section 88 89 quitC chan bool 90 } 91 92 // structure to store long range links on chain to skip along 93 type section struct { 94 lock sync.RWMutex 95 parent *section 96 child *section 97 top *poolNode 98 bottom *poolNode 99 nodes []*poolNode 100 controlC chan *peerInfo 101 suicideC chan bool 102 blockChainC chan bool 103 forkC chan chan bool 104 offC chan bool 105 } 106 107 func NewBlockPool(hasBlock func(hash []byte) bool, insertChain func(types.Blocks) error, verifyPoW func(pow.Block) bool, 108 ) *BlockPool { 109 return &BlockPool{ 110 hasBlock: hasBlock, 111 insertChain: insertChain, 112 verifyPoW: verifyPoW, 113 } 114 } 115 116 // allows restart 117 func (self *BlockPool) Start() { 118 self.lock.Lock() 119 if self.running { 120 self.lock.Unlock() 121 return 122 } 123 self.running = true 124 self.quit = make(chan bool) 125 self.flushC = make(chan bool) 126 self.pool = make(map[string]*poolEntry) 127 128 self.lock.Unlock() 129 130 self.peersLock.Lock() 131 self.peers = make(map[string]*peerInfo) 132 self.peersLock.Unlock() 133 134 poolLogger.Infoln("Started") 135 136 } 137 138 func (self *BlockPool) Stop() { 139 self.lock.Lock() 140 if !self.running { 141 self.lock.Unlock() 142 return 143 } 144 self.running = false 145 146 self.lock.Unlock() 147 148 poolLogger.Infoln("Stopping...") 149 150 close(self.quit) 151 //self.wg.Wait() 152 153 self.peersLock.Lock() 154 self.peers = nil 155 self.peer = nil 156 self.peersLock.Unlock() 157 158 self.lock.Lock() 159 self.pool = nil 160 self.lock.Unlock() 161 162 poolLogger.Infoln("Stopped") 163 } 164 165 func (self *BlockPool) Purge() { 166 self.lock.Lock() 167 if !self.running { 168 self.lock.Unlock() 169 return 170 } 171 self.lock.Unlock() 172 173 poolLogger.Infoln("Purging...") 174 175 close(self.purgeC) 176 self.wg.Wait() 177 178 self.purgeC = make(chan bool) 179 180 poolLogger.Infoln("Stopped") 181 182 } 183 184 func (self *BlockPool) Wait(t time.Duration) { 185 self.lock.Lock() 186 if !self.running { 187 self.lock.Unlock() 188 return 189 } 190 self.lock.Unlock() 191 192 poolLogger.Infoln("Waiting for processes to complete...") 193 close(self.flushC) 194 w := make(chan bool) 195 go func() { 196 self.procWg.Wait() 197 close(w) 198 }() 199 200 select { 201 case <-w: 202 poolLogger.Infoln("Processes complete") 203 case <-time.After(t): 204 poolLogger.Warnf("Timeout") 205 } 206 self.flushC = make(chan bool) 207 } 208 209 // AddPeer is called by the eth protocol instance running on the peer after 210 // the status message has been received with total difficulty and current block hash 211 // AddPeer can only be used once, RemovePeer needs to be called when the peer disconnects 212 func (self *BlockPool) AddPeer(td *big.Int, currentBlockHash []byte, peerId string, requestBlockHashes func([]byte) error, requestBlocks func([][]byte) error, peerError func(int, string, ...interface{})) (best bool) { 213 214 self.peersLock.Lock() 215 defer self.peersLock.Unlock() 216 peer, ok := self.peers[peerId] 217 if ok { 218 if bytes.Compare(peer.currentBlockHash, currentBlockHash) != 0 { 219 poolLogger.Debugf("Update peer %v with td %v and current block %s", peerId, td, name(currentBlockHash)) 220 peer.lock.Lock() 221 peer.td = td 222 peer.currentBlockHash = currentBlockHash 223 peer.currentBlock = nil 224 peer.parentHash = nil 225 peer.headSection = nil 226 peer.lock.Unlock() 227 } 228 } else { 229 peer = &peerInfo{ 230 td: td, 231 currentBlockHash: currentBlockHash, 232 id: peerId, //peer.Identity().Pubkey() 233 requestBlockHashes: requestBlockHashes, 234 requestBlocks: requestBlocks, 235 peerError: peerError, 236 sections: make(map[string]*section), 237 currentBlockC: make(chan *types.Block), 238 headSectionC: make(chan *section), 239 } 240 self.peers[peerId] = peer 241 poolLogger.Debugf("add new peer %v with td %v and current block %x", peerId, td, currentBlockHash[:4]) 242 } 243 // check peer current head 244 if self.hasBlock(currentBlockHash) { 245 // peer not ahead 246 return false 247 } 248 249 if self.peer == peer { 250 // new block update 251 // peer is already active best peer, request hashes 252 poolLogger.Debugf("[%s] already the best peer. Request new head section info from %s", peerId, name(currentBlockHash)) 253 peer.headSectionC <- nil 254 best = true 255 } else { 256 currentTD := ethutil.Big0 257 if self.peer != nil { 258 currentTD = self.peer.td 259 } 260 if td.Cmp(currentTD) > 0 { 261 poolLogger.Debugf("peer %v promoted best peer", peerId) 262 self.switchPeer(self.peer, peer) 263 self.peer = peer 264 best = true 265 } 266 } 267 return 268 } 269 270 func (self *BlockPool) requestHeadSection(peer *peerInfo) { 271 self.wg.Add(1) 272 self.procWg.Add(1) 273 poolLogger.Debugf("[%s] head section at [%s] requesting info", peer.id, name(peer.currentBlockHash)) 274 275 go func() { 276 var idle bool 277 peer.lock.RLock() 278 quitC := peer.quitC 279 currentBlockHash := peer.currentBlockHash 280 peer.lock.RUnlock() 281 blockHashesRequestTimer := time.NewTimer(0) 282 blocksRequestTimer := time.NewTimer(0) 283 suicide := time.NewTimer(blockHashesTimeout * time.Second) 284 blockHashesRequestTimer.Stop() 285 defer blockHashesRequestTimer.Stop() 286 defer blocksRequestTimer.Stop() 287 288 entry := self.get(currentBlockHash) 289 if entry != nil { 290 entry.node.lock.RLock() 291 currentBlock := entry.node.block 292 entry.node.lock.RUnlock() 293 if currentBlock != nil { 294 peer.lock.Lock() 295 peer.currentBlock = currentBlock 296 peer.parentHash = currentBlock.ParentHash() 297 poolLogger.Debugf("[%s] head block [%s] found", peer.id, name(currentBlockHash)) 298 peer.lock.Unlock() 299 blockHashesRequestTimer.Reset(0) 300 blocksRequestTimer.Stop() 301 } 302 } 303 304 LOOP: 305 for { 306 307 select { 308 case <-self.quit: 309 break LOOP 310 311 case <-quitC: 312 poolLogger.Debugf("[%s] head section at [%s] incomplete - quit request loop", peer.id, name(currentBlockHash)) 313 break LOOP 314 315 case headSection := <-peer.headSectionC: 316 peer.lock.Lock() 317 peer.headSection = headSection 318 if headSection == nil { 319 oldBlockHash := currentBlockHash 320 currentBlockHash = peer.currentBlockHash 321 poolLogger.Debugf("[%s] head section changed [%s] -> [%s]", peer.id, name(oldBlockHash), name(currentBlockHash)) 322 if idle { 323 idle = false 324 suicide.Reset(blockHashesTimeout * time.Second) 325 self.procWg.Add(1) 326 } 327 blocksRequestTimer.Reset(blocksRequestInterval * time.Millisecond) 328 } else { 329 poolLogger.DebugDetailf("[%s] head section at [%s] created", peer.id, name(currentBlockHash)) 330 if !idle { 331 idle = true 332 suicide.Stop() 333 self.procWg.Done() 334 } 335 } 336 peer.lock.Unlock() 337 blockHashesRequestTimer.Stop() 338 339 case <-blockHashesRequestTimer.C: 340 poolLogger.DebugDetailf("[%s] head section at [%s] not found, requesting block hashes", peer.id, name(currentBlockHash)) 341 peer.requestBlockHashes(currentBlockHash) 342 blockHashesRequestTimer.Reset(blockHashesRequestInterval * time.Millisecond) 343 344 case currentBlock := <-peer.currentBlockC: 345 peer.lock.Lock() 346 peer.currentBlock = currentBlock 347 peer.parentHash = currentBlock.ParentHash() 348 poolLogger.DebugDetailf("[%s] head block [%s] found", peer.id, name(currentBlockHash)) 349 peer.lock.Unlock() 350 if self.hasBlock(currentBlock.ParentHash()) { 351 if err := self.insertChain(types.Blocks([]*types.Block{currentBlock})); err != nil { 352 peer.peerError(ErrInvalidBlock, "%v", err) 353 } 354 if !idle { 355 idle = true 356 suicide.Stop() 357 self.procWg.Done() 358 } 359 } else { 360 blockHashesRequestTimer.Reset(0) 361 } 362 blocksRequestTimer.Stop() 363 364 case <-blocksRequestTimer.C: 365 peer.lock.RLock() 366 poolLogger.DebugDetailf("[%s] head block [%s] not found, requesting", peer.id, name(currentBlockHash)) 367 peer.requestBlocks([][]byte{peer.currentBlockHash}) 368 peer.lock.RUnlock() 369 blocksRequestTimer.Reset(blocksRequestInterval * time.Millisecond) 370 371 case <-suicide.C: 372 peer.peerError(ErrInsufficientChainInfo, "peer failed to provide block hashes or head block for block hash %x", currentBlockHash) 373 break LOOP 374 } 375 } 376 self.wg.Done() 377 if !idle { 378 self.procWg.Done() 379 } 380 }() 381 } 382 383 // RemovePeer is called by the eth protocol when the peer disconnects 384 func (self *BlockPool) RemovePeer(peerId string) { 385 self.peersLock.Lock() 386 defer self.peersLock.Unlock() 387 peer, ok := self.peers[peerId] 388 if !ok { 389 return 390 } 391 delete(self.peers, peerId) 392 poolLogger.Debugf("remove peer %v", peerId) 393 394 // if current best peer is removed, need find a better one 395 if self.peer == peer { 396 var newPeer *peerInfo 397 max := ethutil.Big0 398 // peer with the highest self-acclaimed TD is chosen 399 for _, info := range self.peers { 400 if info.td.Cmp(max) > 0 { 401 max = info.td 402 newPeer = info 403 } 404 } 405 if newPeer != nil { 406 poolLogger.Debugf("peer %v with td %v promoted to best peer", newPeer.id, newPeer.td) 407 } else { 408 poolLogger.Warnln("no peers") 409 } 410 self.peer = newPeer 411 self.switchPeer(peer, newPeer) 412 } 413 } 414 415 // Entry point for eth protocol to add block hashes received via BlockHashesMsg 416 // only hashes from the best peer is handled 417 // this method is always responsible to initiate further hash requests until 418 // a known parent is reached unless cancelled by a peerChange event 419 // this process also launches all request processes on each chain section 420 // this function needs to run asynchronously for one peer since the message is discarded??? 421 func (self *BlockPool) AddBlockHashes(next func() ([]byte, bool), peerId string) { 422 423 // register with peer manager loop 424 425 peer, best := self.getPeer(peerId) 426 if !best { 427 return 428 } 429 // peer is still the best 430 431 var size, n int 432 var hash []byte 433 var ok, headSection bool 434 var sec, child, parent *section 435 var entry *poolEntry 436 var nodes []*poolNode 437 bestPeer := peer 438 439 hash, ok = next() 440 peer.lock.Lock() 441 if bytes.Compare(peer.parentHash, hash) == 0 { 442 if self.hasBlock(peer.currentBlockHash) { 443 return 444 } 445 poolLogger.Debugf("adding hashes at chain head for best peer %s starting from [%s]", peerId, name(peer.currentBlockHash)) 446 headSection = true 447 448 if entry := self.get(peer.currentBlockHash); entry == nil { 449 node := &poolNode{ 450 hash: peer.currentBlockHash, 451 block: peer.currentBlock, 452 peer: peerId, 453 blockBy: peerId, 454 } 455 if size == 0 { 456 sec = newSection() 457 } 458 nodes = append(nodes, node) 459 size++ 460 n++ 461 } else { 462 child = entry.section 463 } 464 } else { 465 poolLogger.Debugf("adding hashes for best peer %s starting from [%s]", peerId, name(hash)) 466 } 467 quitC := peer.quitC 468 peer.lock.Unlock() 469 470 LOOP: 471 // iterate using next (rlp stream lazy decoder) feeding hashesC 472 for ; ok; hash, ok = next() { 473 n++ 474 select { 475 case <-self.quit: 476 return 477 case <-quitC: 478 // if the peer is demoted, no more hashes taken 479 bestPeer = nil 480 break LOOP 481 default: 482 } 483 if self.hasBlock(hash) { 484 // check if known block connecting the downloaded chain to our blockchain 485 poolLogger.DebugDetailf("[%s] known block", name(hash)) 486 // mark child as absolute pool root with parent known to blockchain 487 if sec != nil { 488 self.connectToBlockChain(sec) 489 } else { 490 if child != nil { 491 self.connectToBlockChain(child) 492 } 493 } 494 break LOOP 495 } 496 // look up node in pool 497 entry = self.get(hash) 498 if entry != nil { 499 // reached a known chain in the pool 500 if entry.node == entry.section.bottom && n == 1 { 501 // the first block hash received is an orphan in the pool, so rejoice and continue 502 poolLogger.DebugDetailf("[%s] connecting child section", sectionName(entry.section)) 503 child = entry.section 504 continue LOOP 505 } 506 poolLogger.DebugDetailf("[%s] reached blockpool chain", name(hash)) 507 parent = entry.section 508 break LOOP 509 } 510 // if node for block hash does not exist, create it and index in the pool 511 node := &poolNode{ 512 hash: hash, 513 peer: peerId, 514 } 515 if size == 0 { 516 sec = newSection() 517 } 518 nodes = append(nodes, node) 519 size++ 520 } //for 521 522 self.chainLock.Lock() 523 524 poolLogger.DebugDetailf("added %v hashes sent by %s", n, peerId) 525 526 if parent != nil && entry != nil && entry.node != parent.top { 527 poolLogger.DebugDetailf("[%s] split section at fork", sectionName(parent)) 528 parent.controlC <- nil 529 waiter := make(chan bool) 530 parent.forkC <- waiter 531 chain := parent.nodes 532 parent.nodes = chain[entry.index:] 533 parent.top = parent.nodes[0] 534 orphan := newSection() 535 self.link(orphan, parent.child) 536 self.processSection(orphan, chain[0:entry.index]) 537 orphan.controlC <- nil 538 close(waiter) 539 } 540 541 if size > 0 { 542 self.processSection(sec, nodes) 543 poolLogger.DebugDetailf("[%s]->[%s](%v)->[%s] new chain section", sectionName(parent), sectionName(sec), size, sectionName(child)) 544 self.link(parent, sec) 545 self.link(sec, child) 546 } else { 547 poolLogger.DebugDetailf("[%s]->[%s] connecting known sections", sectionName(parent), sectionName(child)) 548 self.link(parent, child) 549 } 550 551 self.chainLock.Unlock() 552 553 if parent != nil && bestPeer != nil { 554 self.activateChain(parent, peer) 555 poolLogger.Debugf("[%s] activate parent section [%s]", name(parent.top.hash), sectionName(parent)) 556 } 557 558 if sec != nil { 559 peer.addSection(sec.top.hash, sec) 560 // request next section here once, only repeat if bottom block arrives, 561 // otherwise no way to check if it arrived 562 peer.requestBlockHashes(sec.bottom.hash) 563 sec.controlC <- bestPeer 564 poolLogger.Debugf("[%s] activate new section", sectionName(sec)) 565 } 566 567 if headSection { 568 var headSec *section 569 switch { 570 case sec != nil: 571 headSec = sec 572 case child != nil: 573 headSec = child 574 default: 575 headSec = parent 576 } 577 peer.headSectionC <- headSec 578 } 579 } 580 581 func name(hash []byte) (name string) { 582 if hash == nil { 583 name = "" 584 } else { 585 name = fmt.Sprintf("%x", hash[:4]) 586 } 587 return 588 } 589 590 func sectionName(section *section) (name string) { 591 if section == nil { 592 name = "" 593 } else { 594 name = fmt.Sprintf("%x-%x", section.bottom.hash[:4], section.top.hash[:4]) 595 } 596 return 597 } 598 599 // AddBlock is the entry point for the eth protocol when blockmsg is received upon requests 600 // It has a strict interpretation of the protocol in that if the block received has not been requested, it results in an error (which can be ignored) 601 // block is checked for PoW 602 // only the first PoW-valid block for a hash is considered legit 603 func (self *BlockPool) AddBlock(block *types.Block, peerId string) { 604 hash := block.Hash() 605 self.peersLock.Lock() 606 peer := self.peer 607 self.peersLock.Unlock() 608 609 entry := self.get(hash) 610 if bytes.Compare(hash, peer.currentBlockHash) == 0 { 611 poolLogger.Debugf("add head block [%s] for peer %s", name(hash), peerId) 612 peer.currentBlockC <- block 613 } else { 614 if entry == nil { 615 poolLogger.Warnf("unrequested block [%s] by peer %s", name(hash), peerId) 616 self.peerError(peerId, ErrUnrequestedBlock, "%x", hash) 617 } 618 } 619 if entry == nil { 620 return 621 } 622 623 node := entry.node 624 node.lock.Lock() 625 defer node.lock.Unlock() 626 627 // check if block already present 628 if node.block != nil { 629 poolLogger.DebugDetailf("block [%s] already sent by %s", name(hash), node.blockBy) 630 return 631 } 632 633 if self.hasBlock(hash) { 634 poolLogger.DebugDetailf("block [%s] already known", name(hash)) 635 } else { 636 637 // validate block for PoW 638 if !self.verifyPoW(block) { 639 poolLogger.Warnf("invalid pow on block [%s %v] by peer %s", name(hash), block.Number(), peerId) 640 self.peerError(peerId, ErrInvalidPoW, "%x", hash) 641 return 642 } 643 } 644 poolLogger.DebugDetailf("added block [%s] sent by peer %s", name(hash), peerId) 645 node.block = block 646 node.blockBy = peerId 647 648 } 649 650 func (self *BlockPool) connectToBlockChain(section *section) { 651 select { 652 case <-section.offC: 653 self.addSectionToBlockChain(section) 654 case <-section.blockChainC: 655 default: 656 close(section.blockChainC) 657 } 658 } 659 660 func (self *BlockPool) addSectionToBlockChain(section *section) (rest int, err error) { 661 662 var blocks types.Blocks 663 var node *poolNode 664 var keys []string 665 rest = len(section.nodes) 666 for rest > 0 { 667 rest-- 668 node = section.nodes[rest] 669 node.lock.RLock() 670 block := node.block 671 node.lock.RUnlock() 672 if block == nil { 673 break 674 } 675 keys = append(keys, string(node.hash)) 676 blocks = append(blocks, block) 677 } 678 679 self.lock.Lock() 680 for _, key := range keys { 681 delete(self.pool, key) 682 } 683 self.lock.Unlock() 684 685 poolLogger.Infof("insert %v blocks into blockchain", len(blocks)) 686 err = self.insertChain(blocks) 687 if err != nil { 688 // TODO: not clear which peer we need to address 689 // peerError should dispatch to peer if still connected and disconnect 690 self.peerError(node.blockBy, ErrInvalidBlock, "%v", err) 691 poolLogger.Warnf("invalid block %x", node.hash) 692 poolLogger.Warnf("penalise peers %v (hash), %v (block)", node.peer, node.blockBy) 693 // penalise peer in node.blockBy 694 // self.disconnect() 695 } 696 return 697 } 698 699 func (self *BlockPool) activateChain(section *section, peer *peerInfo) { 700 poolLogger.DebugDetailf("[%s] activate known chain for peer %s", sectionName(section), peer.id) 701 i := 0 702 LOOP: 703 for section != nil { 704 // register this section with the peer and quit if registered 705 poolLogger.DebugDetailf("[%s] register section with peer %s", sectionName(section), peer.id) 706 if peer.addSection(section.top.hash, section) == section { 707 return 708 } 709 poolLogger.DebugDetailf("[%s] activate section process", sectionName(section)) 710 select { 711 case section.controlC <- peer: 712 case <-section.offC: 713 } 714 i++ 715 section = self.getParent(section) 716 select { 717 case <-peer.quitC: 718 break LOOP 719 case <-self.quit: 720 break LOOP 721 default: 722 } 723 } 724 } 725 726 // main worker thread on each section in the poolchain 727 // - kills the section if there are blocks missing after an absolute time 728 // - kills the section if there are maxIdleRounds of idle rounds of block requests with no response 729 // - periodically polls the chain section for missing blocks which are then requested from peers 730 // - registers the process controller on the peer so that if the peer is promoted as best peer the second time (after a disconnect of a better one), all active processes are switched back on unless they expire and killed () 731 // - when turned off (if peer disconnects and new peer connects with alternative chain), no blockrequests are made but absolute expiry timer is ticking 732 // - when turned back on it recursively calls itself on the root of the next chain section 733 // - when exits, signals to 734 func (self *BlockPool) processSection(sec *section, nodes []*poolNode) { 735 736 for i, node := range nodes { 737 entry := &poolEntry{node: node, section: sec, index: i} 738 self.set(node.hash, entry) 739 } 740 741 sec.bottom = nodes[len(nodes)-1] 742 sec.top = nodes[0] 743 sec.nodes = nodes 744 poolLogger.DebugDetailf("[%s] setup section process", sectionName(sec)) 745 746 self.wg.Add(1) 747 go func() { 748 749 // absolute time after which sub-chain is killed if not complete (some blocks are missing) 750 suicideTimer := time.After(blocksTimeout * time.Second) 751 752 var peer, newPeer *peerInfo 753 754 var blocksRequestTimer, blockHashesRequestTimer <-chan time.Time 755 var blocksRequestTime, blockHashesRequestTime bool 756 var blocksRequests, blockHashesRequests int 757 var blocksRequestsComplete, blockHashesRequestsComplete bool 758 759 // node channels for the section 760 var missingC, processC, offC chan *poolNode 761 // container for missing block hashes 762 var hashes [][]byte 763 764 var i, missing, lastMissing, depth int 765 var idle int 766 var init, done, same, ready bool 767 var insertChain bool 768 var quitC chan bool 769 770 var blockChainC = sec.blockChainC 771 772 var parentHash []byte 773 774 LOOP: 775 for { 776 777 if insertChain { 778 insertChain = false 779 rest, err := self.addSectionToBlockChain(sec) 780 if err != nil { 781 close(sec.suicideC) 782 continue LOOP 783 } 784 if rest == 0 { 785 blocksRequestsComplete = true 786 child := self.getChild(sec) 787 if child != nil { 788 self.connectToBlockChain(child) 789 } 790 } 791 } 792 793 if blockHashesRequestsComplete && blocksRequestsComplete { 794 // not waiting for hashes any more 795 poolLogger.Debugf("[%s] section complete %v blocks retrieved (%v attempts), hash requests complete on root (%v attempts)", sectionName(sec), depth, blocksRequests, blockHashesRequests) 796 break LOOP 797 } // otherwise suicide if no hashes coming 798 799 if done { 800 // went through all blocks in section 801 if missing == 0 { 802 // no missing blocks 803 poolLogger.DebugDetailf("[%s] got all blocks. process complete (%v total blocksRequests): missing %v/%v/%v", sectionName(sec), blocksRequests, missing, lastMissing, depth) 804 blocksRequestsComplete = true 805 blocksRequestTimer = nil 806 blocksRequestTime = false 807 } else { 808 poolLogger.DebugDetailf("[%s] section checked: missing %v/%v/%v", sectionName(sec), missing, lastMissing, depth) 809 // some missing blocks 810 blocksRequests++ 811 if len(hashes) > 0 { 812 // send block requests to peers 813 self.requestBlocks(blocksRequests, hashes) 814 hashes = nil 815 } 816 if missing == lastMissing { 817 // idle round 818 if same { 819 // more than once 820 idle++ 821 // too many idle rounds 822 if idle >= blocksRequestMaxIdleRounds { 823 poolLogger.DebugDetailf("[%s] block requests had %v idle rounds (%v total attempts): missing %v/%v/%v\ngiving up...", sectionName(sec), idle, blocksRequests, missing, lastMissing, depth) 824 close(sec.suicideC) 825 } 826 } else { 827 idle = 0 828 } 829 same = true 830 } else { 831 same = false 832 } 833 } 834 lastMissing = missing 835 ready = true 836 done = false 837 // save a new processC (blocks still missing) 838 offC = missingC 839 missingC = processC 840 // put processC offline 841 processC = nil 842 } 843 // 844 845 if ready && blocksRequestTime && !blocksRequestsComplete { 846 poolLogger.DebugDetailf("[%s] check if new blocks arrived (attempt %v): missing %v/%v/%v", sectionName(sec), blocksRequests, missing, lastMissing, depth) 847 blocksRequestTimer = time.After(blocksRequestInterval * time.Millisecond) 848 blocksRequestTime = false 849 processC = offC 850 } 851 852 if blockHashesRequestTime { 853 var parentSection = self.getParent(sec) 854 if parentSection == nil { 855 if parent := self.get(parentHash); parent != nil { 856 parentSection = parent.section 857 self.chainLock.Lock() 858 self.link(parentSection, sec) 859 self.chainLock.Unlock() 860 } else { 861 if self.hasBlock(parentHash) { 862 insertChain = true 863 blockHashesRequestTime = false 864 blockHashesRequestTimer = nil 865 blockHashesRequestsComplete = true 866 continue LOOP 867 } 868 } 869 } 870 if parentSection != nil { 871 // if not root of chain, switch off 872 poolLogger.DebugDetailf("[%s] parent found, hash requests deactivated (after %v total attempts)\n", sectionName(sec), blockHashesRequests) 873 blockHashesRequestTimer = nil 874 blockHashesRequestsComplete = true 875 } else { 876 blockHashesRequests++ 877 poolLogger.Debugf("[%s] hash request on root (%v total attempts)\n", sectionName(sec), blockHashesRequests) 878 peer.requestBlockHashes(sec.bottom.hash) 879 blockHashesRequestTimer = time.After(blockHashesRequestInterval * time.Millisecond) 880 } 881 blockHashesRequestTime = false 882 } 883 884 select { 885 case <-self.quit: 886 break LOOP 887 888 case <-quitC: 889 // peer quit or demoted, put section in idle mode 890 quitC = nil 891 go func() { 892 sec.controlC <- nil 893 }() 894 895 case <-self.purgeC: 896 suicideTimer = time.After(0) 897 898 case <-suicideTimer: 899 close(sec.suicideC) 900 poolLogger.Debugf("[%s] timeout. (%v total attempts): missing %v/%v/%v", sectionName(sec), blocksRequests, missing, lastMissing, depth) 901 902 case <-sec.suicideC: 903 poolLogger.Debugf("[%s] suicide", sectionName(sec)) 904 905 // first delink from child and parent under chainlock 906 self.chainLock.Lock() 907 self.link(nil, sec) 908 self.link(sec, nil) 909 self.chainLock.Unlock() 910 // delete node entries from pool index under pool lock 911 self.lock.Lock() 912 for _, node := range sec.nodes { 913 delete(self.pool, string(node.hash)) 914 } 915 self.lock.Unlock() 916 917 break LOOP 918 919 case <-blocksRequestTimer: 920 poolLogger.DebugDetailf("[%s] block request time", sectionName(sec)) 921 blocksRequestTime = true 922 923 case <-blockHashesRequestTimer: 924 poolLogger.DebugDetailf("[%s] hash request time", sectionName(sec)) 925 blockHashesRequestTime = true 926 927 case newPeer = <-sec.controlC: 928 929 // active -> idle 930 if peer != nil && newPeer == nil { 931 self.procWg.Done() 932 if init { 933 poolLogger.Debugf("[%s] idle mode (%v total attempts): missing %v/%v/%v", sectionName(sec), blocksRequests, missing, lastMissing, depth) 934 } 935 blocksRequestTime = false 936 blocksRequestTimer = nil 937 blockHashesRequestTime = false 938 blockHashesRequestTimer = nil 939 if processC != nil { 940 offC = processC 941 processC = nil 942 } 943 } 944 945 // idle -> active 946 if peer == nil && newPeer != nil { 947 self.procWg.Add(1) 948 949 poolLogger.Debugf("[%s] active mode", sectionName(sec)) 950 if !blocksRequestsComplete { 951 blocksRequestTime = true 952 } 953 if !blockHashesRequestsComplete && parentHash != nil { 954 blockHashesRequestTime = true 955 } 956 if !init { 957 processC = make(chan *poolNode, blockHashesBatchSize) 958 missingC = make(chan *poolNode, blockHashesBatchSize) 959 i = 0 960 missing = 0 961 self.wg.Add(1) 962 self.procWg.Add(1) 963 depth = len(sec.nodes) 964 lastMissing = depth 965 // if not run at least once fully, launch iterator 966 go func() { 967 var node *poolNode 968 IT: 969 for _, node = range sec.nodes { 970 select { 971 case processC <- node: 972 case <-self.quit: 973 break IT 974 } 975 } 976 close(processC) 977 self.wg.Done() 978 self.procWg.Done() 979 }() 980 } else { 981 poolLogger.Debugf("[%s] restore earlier state", sectionName(sec)) 982 processC = offC 983 } 984 } 985 // reset quitC to current best peer 986 if newPeer != nil { 987 quitC = newPeer.quitC 988 } 989 peer = newPeer 990 991 case waiter := <-sec.forkC: 992 // this case just blocks the process until section is split at the fork 993 <-waiter 994 init = false 995 done = false 996 ready = false 997 998 case node, ok := <-processC: 999 if !ok && !init { 1000 // channel closed, first iteration finished 1001 init = true 1002 done = true 1003 processC = make(chan *poolNode, missing) 1004 poolLogger.DebugDetailf("[%s] section initalised: missing %v/%v/%v", sectionName(sec), missing, lastMissing, depth) 1005 continue LOOP 1006 } 1007 if ready { 1008 i = 0 1009 missing = 0 1010 ready = false 1011 } 1012 i++ 1013 // if node has no block 1014 node.lock.RLock() 1015 block := node.block 1016 node.lock.RUnlock() 1017 if block == nil { 1018 missing++ 1019 hashes = append(hashes, node.hash) 1020 if len(hashes) == blockBatchSize { 1021 poolLogger.Debugf("[%s] request %v missing blocks", sectionName(sec), len(hashes)) 1022 self.requestBlocks(blocksRequests, hashes) 1023 hashes = nil 1024 } 1025 missingC <- node 1026 } else { 1027 if i == lastMissing { 1028 if blockChainC == nil { 1029 insertChain = true 1030 } else { 1031 if parentHash == nil { 1032 parentHash = block.ParentHash() 1033 poolLogger.Debugf("[%s] found root block [%s]", sectionName(sec), name(parentHash)) 1034 blockHashesRequestTime = true 1035 } 1036 } 1037 } 1038 } 1039 if i == lastMissing && init { 1040 done = true 1041 } 1042 1043 case <-blockChainC: 1044 // closed blockChain channel indicates that the blockpool is reached 1045 // connected to the blockchain, insert the longest chain of blocks 1046 poolLogger.Debugf("[%s] reached blockchain", sectionName(sec)) 1047 blockChainC = nil 1048 // switch off hash requests in case they were on 1049 blockHashesRequestTime = false 1050 blockHashesRequestTimer = nil 1051 blockHashesRequestsComplete = true 1052 // section root has block 1053 if len(sec.nodes) > 0 && sec.nodes[len(sec.nodes)-1].block != nil { 1054 insertChain = true 1055 } 1056 continue LOOP 1057 1058 } // select 1059 } // for 1060 1061 close(sec.offC) 1062 1063 self.wg.Done() 1064 if peer != nil { 1065 self.procWg.Done() 1066 } 1067 }() 1068 return 1069 } 1070 1071 func (self *BlockPool) peerError(peerId string, code int, format string, params ...interface{}) { 1072 self.peersLock.RLock() 1073 defer self.peersLock.RUnlock() 1074 peer, ok := self.peers[peerId] 1075 if ok { 1076 peer.peerError(code, format, params...) 1077 } 1078 } 1079 1080 func (self *BlockPool) requestBlocks(attempts int, hashes [][]byte) { 1081 self.wg.Add(1) 1082 self.procWg.Add(1) 1083 go func() { 1084 // distribute block request among known peers 1085 self.peersLock.Lock() 1086 defer self.peersLock.Unlock() 1087 peerCount := len(self.peers) 1088 // on first attempt use the best peer 1089 if attempts == 0 { 1090 poolLogger.Debugf("request %v missing blocks from best peer %s", len(hashes), self.peer.id) 1091 self.peer.requestBlocks(hashes) 1092 return 1093 } 1094 repetitions := int(math.Min(float64(peerCount), float64(blocksRequestRepetition))) 1095 i := 0 1096 indexes := rand.Perm(peerCount)[0:repetitions] 1097 sort.Ints(indexes) 1098 poolLogger.Debugf("request %v missing blocks from %v/%v peers: chosen %v", len(hashes), repetitions, peerCount, indexes) 1099 for _, peer := range self.peers { 1100 if i == indexes[0] { 1101 poolLogger.Debugf("request %v missing blocks [%x/%x] from peer %s", len(hashes), hashes[0][:4], hashes[len(hashes)-1][:4], peer.id) 1102 peer.requestBlocks(hashes) 1103 indexes = indexes[1:] 1104 if len(indexes) == 0 { 1105 break 1106 } 1107 } 1108 i++ 1109 } 1110 self.wg.Done() 1111 self.procWg.Done() 1112 }() 1113 } 1114 1115 func (self *BlockPool) getPeer(peerId string) (*peerInfo, bool) { 1116 self.peersLock.RLock() 1117 defer self.peersLock.RUnlock() 1118 if self.peer != nil && self.peer.id == peerId { 1119 return self.peer, true 1120 } 1121 info, ok := self.peers[peerId] 1122 if !ok { 1123 return nil, false 1124 } 1125 return info, false 1126 } 1127 1128 func (self *peerInfo) addSection(hash []byte, section *section) (found *section) { 1129 self.lock.Lock() 1130 defer self.lock.Unlock() 1131 key := string(hash) 1132 found = self.sections[key] 1133 poolLogger.DebugDetailf("[%s] section process stored for %s", sectionName(section), self.id) 1134 self.sections[key] = section 1135 return 1136 } 1137 1138 func (self *BlockPool) switchPeer(oldPeer, newPeer *peerInfo) { 1139 if newPeer != nil { 1140 newPeer.quitC = make(chan bool) 1141 poolLogger.DebugDetailf("[%s] activate section processes", newPeer.id) 1142 var addSections []*section 1143 for hash, section := range newPeer.sections { 1144 // split sections get reorganised here 1145 if string(section.top.hash) != hash { 1146 addSections = append(addSections, section) 1147 if entry := self.get([]byte(hash)); entry != nil { 1148 addSections = append(addSections, entry.section) 1149 } 1150 } 1151 } 1152 for _, section := range addSections { 1153 newPeer.sections[string(section.top.hash)] = section 1154 } 1155 for hash, section := range newPeer.sections { 1156 // this will block if section process is waiting for peer lock 1157 select { 1158 case <-section.offC: 1159 poolLogger.DebugDetailf("[%s][%x] section process complete - remove", newPeer.id, hash[:4]) 1160 delete(newPeer.sections, hash) 1161 case section.controlC <- newPeer: 1162 poolLogger.DebugDetailf("[%s][%x] activates section [%s]", newPeer.id, hash[:4], sectionName(section)) 1163 } 1164 } 1165 newPeer.lock.Lock() 1166 headSection := newPeer.headSection 1167 currentBlockHash := newPeer.currentBlockHash 1168 newPeer.lock.Unlock() 1169 if headSection == nil { 1170 poolLogger.DebugDetailf("[%s] head section for [%s] not created, requesting info", newPeer.id, name(currentBlockHash)) 1171 self.requestHeadSection(newPeer) 1172 } else { 1173 if entry := self.get(currentBlockHash); entry != nil { 1174 headSection = entry.section 1175 } 1176 poolLogger.DebugDetailf("[%s] activate chain at head section [%s] for current head [%s]", newPeer.id, sectionName(headSection), name(currentBlockHash)) 1177 self.activateChain(headSection, newPeer) 1178 } 1179 } 1180 if oldPeer != nil { 1181 poolLogger.DebugDetailf("[%s] quit section processes", oldPeer.id) 1182 close(oldPeer.quitC) 1183 } 1184 } 1185 1186 func (self *BlockPool) getParent(sec *section) *section { 1187 self.chainLock.RLock() 1188 defer self.chainLock.RUnlock() 1189 return sec.parent 1190 } 1191 1192 func (self *BlockPool) getChild(sec *section) *section { 1193 self.chainLock.RLock() 1194 defer self.chainLock.RUnlock() 1195 return sec.child 1196 } 1197 1198 func newSection() (sec *section) { 1199 sec = §ion{ 1200 controlC: make(chan *peerInfo), 1201 suicideC: make(chan bool), 1202 blockChainC: make(chan bool), 1203 offC: make(chan bool), 1204 forkC: make(chan chan bool), 1205 } 1206 return 1207 } 1208 1209 // link should only be called under chainLock 1210 func (self *BlockPool) link(parent *section, child *section) { 1211 if parent != nil { 1212 exChild := parent.child 1213 parent.child = child 1214 if exChild != nil && exChild != child { 1215 poolLogger.Debugf("[%s] chain fork [%s] -> [%s]", sectionName(parent), sectionName(exChild), sectionName(child)) 1216 exChild.parent = nil 1217 } 1218 } 1219 if child != nil { 1220 exParent := child.parent 1221 if exParent != nil && exParent != parent { 1222 poolLogger.Debugf("[%s] chain reverse fork [%s] -> [%s]", sectionName(child), sectionName(exParent), sectionName(parent)) 1223 exParent.child = nil 1224 } 1225 child.parent = parent 1226 } 1227 } 1228 1229 func (self *BlockPool) get(hash []byte) (node *poolEntry) { 1230 self.lock.RLock() 1231 defer self.lock.RUnlock() 1232 return self.pool[string(hash)] 1233 } 1234 1235 func (self *BlockPool) set(hash []byte, node *poolEntry) { 1236 self.lock.Lock() 1237 defer self.lock.Unlock() 1238 self.pool[string(hash)] = node 1239 }