github.com/XinFinOrg/xdcchain@v1.1.0/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/mclock" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/light" 31 "github.com/ethereum/go-ethereum/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 serverStateAvailable = 100 // number of recent blocks where state availability is assumed 38 ) 39 40 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 41 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 42 // and announced that block. 43 type lightFetcher struct { 44 pm *ProtocolManager 45 odr *LesOdr 46 chain *light.LightChain 47 48 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 49 maxConfirmedTd *big.Int 50 peers map[*peer]*fetcherPeerInfo 51 lastUpdateStats *updateStatsEntry 52 syncing bool 53 syncDone chan *peer 54 55 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 56 requested map[uint64]fetchRequest 57 deliverChn chan fetchResponse 58 timeoutChn chan uint64 59 requestChn chan bool // true if initiated from outside 60 } 61 62 // fetcherPeerInfo holds fetcher-specific information about each active peer 63 type fetcherPeerInfo struct { 64 root, lastAnnounced *fetcherTreeNode 65 nodeCnt int 66 confirmedTd *big.Int 67 bestConfirmed *fetcherTreeNode 68 nodeByHash map[common.Hash]*fetcherTreeNode 69 firstUpdateStats *updateStatsEntry 70 } 71 72 // fetcherTreeNode is a node of a tree that holds information about blocks recently 73 // announced and confirmed by a certain peer. Each new announce message from a peer 74 // adds nodes to the tree, based on the previous announced head and the reorg depth. 75 // There are three possible states for a tree node: 76 // - announced: not downloaded (known) yet, but we know its head, number and td 77 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 78 // - known: both announced by this peer and downloaded (from any peer). 79 // This structure makes it possible to always know which peer has a certain block, 80 // which is necessary for selecting a suitable peer for ODR requests and also for 81 // canonizing new heads. It also helps to always download the minimum necessary 82 // amount of headers with a single request. 83 type fetcherTreeNode struct { 84 hash common.Hash 85 number uint64 86 td *big.Int 87 known, requested bool 88 parent *fetcherTreeNode 89 children []*fetcherTreeNode 90 } 91 92 // fetchRequest represents a header download request 93 type fetchRequest struct { 94 hash common.Hash 95 amount uint64 96 peer *peer 97 sent mclock.AbsTime 98 timeout bool 99 } 100 101 // fetchResponse represents a header download response 102 type fetchResponse struct { 103 reqID uint64 104 headers []*types.Header 105 peer *peer 106 } 107 108 // newLightFetcher creates a new light fetcher 109 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 110 f := &lightFetcher{ 111 pm: pm, 112 chain: pm.blockchain.(*light.LightChain), 113 odr: pm.odr, 114 peers: make(map[*peer]*fetcherPeerInfo), 115 deliverChn: make(chan fetchResponse, 100), 116 requested: make(map[uint64]fetchRequest), 117 timeoutChn: make(chan uint64), 118 requestChn: make(chan bool, 100), 119 syncDone: make(chan *peer), 120 maxConfirmedTd: big.NewInt(0), 121 } 122 pm.peers.notify(f) 123 124 f.pm.wg.Add(1) 125 go f.syncLoop() 126 return f 127 } 128 129 // syncLoop is the main event loop of the light fetcher 130 func (f *lightFetcher) syncLoop() { 131 requesting := false 132 defer f.pm.wg.Done() 133 for { 134 select { 135 case <-f.pm.quitSync: 136 return 137 // when a new announce is received, request loop keeps running until 138 // no further requests are necessary or possible 139 case newAnnounce := <-f.requestChn: 140 f.lock.Lock() 141 s := requesting 142 requesting = false 143 var ( 144 rq *distReq 145 reqID uint64 146 syncing bool 147 ) 148 if !f.syncing && !(newAnnounce && s) { 149 rq, reqID, syncing = f.nextRequest() 150 } 151 f.lock.Unlock() 152 153 if rq != nil { 154 requesting = true 155 if _, ok := <-f.pm.reqDist.queue(rq); ok { 156 if syncing { 157 f.lock.Lock() 158 f.syncing = true 159 f.lock.Unlock() 160 } else { 161 go func() { 162 time.Sleep(softRequestTimeout) 163 f.reqMu.Lock() 164 req, ok := f.requested[reqID] 165 if ok { 166 req.timeout = true 167 f.requested[reqID] = req 168 } 169 f.reqMu.Unlock() 170 // keep starting new requests while possible 171 f.requestChn <- false 172 }() 173 } 174 } else { 175 f.requestChn <- false 176 } 177 } 178 case reqID := <-f.timeoutChn: 179 f.reqMu.Lock() 180 req, ok := f.requested[reqID] 181 if ok { 182 delete(f.requested, reqID) 183 } 184 f.reqMu.Unlock() 185 if ok { 186 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 187 req.peer.Log().Debug("Fetching data timed out hard") 188 go f.pm.removePeer(req.peer.id) 189 } 190 case resp := <-f.deliverChn: 191 f.reqMu.Lock() 192 req, ok := f.requested[resp.reqID] 193 if ok && req.peer != resp.peer { 194 ok = false 195 } 196 if ok { 197 delete(f.requested, resp.reqID) 198 } 199 f.reqMu.Unlock() 200 if ok { 201 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 202 } 203 f.lock.Lock() 204 if !ok || !(f.syncing || f.processResponse(req, resp)) { 205 resp.peer.Log().Debug("Failed processing response") 206 go f.pm.removePeer(resp.peer.id) 207 } 208 f.lock.Unlock() 209 case p := <-f.syncDone: 210 f.lock.Lock() 211 p.Log().Debug("Done synchronising with peer") 212 f.checkSyncedHeaders(p) 213 f.syncing = false 214 f.lock.Unlock() 215 f.requestChn <- false 216 } 217 } 218 } 219 220 // registerPeer adds a new peer to the fetcher's peer set 221 func (f *lightFetcher) registerPeer(p *peer) { 222 p.lock.Lock() 223 p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { 224 return f.peerHasBlock(p, hash, number, hasState) 225 } 226 p.lock.Unlock() 227 228 f.lock.Lock() 229 defer f.lock.Unlock() 230 231 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 232 } 233 234 // unregisterPeer removes a new peer from the fetcher's peer set 235 func (f *lightFetcher) unregisterPeer(p *peer) { 236 p.lock.Lock() 237 p.hasBlock = nil 238 p.lock.Unlock() 239 240 f.lock.Lock() 241 defer f.lock.Unlock() 242 243 // check for potential timed out block delay statistics 244 f.checkUpdateStats(p, nil) 245 delete(f.peers, p) 246 } 247 248 // announce processes a new announcement message received from a peer, adding new 249 // nodes to the peer's block tree and removing old nodes if necessary 250 func (f *lightFetcher) announce(p *peer, head *announceData) { 251 f.lock.Lock() 252 defer f.lock.Unlock() 253 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 254 255 fp := f.peers[p] 256 if fp == nil { 257 p.Log().Debug("Announcement from unknown peer") 258 return 259 } 260 261 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 262 // announced tds should be strictly monotonic 263 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 264 go f.pm.removePeer(p.id) 265 return 266 } 267 268 n := fp.lastAnnounced 269 for i := uint64(0); i < head.ReorgDepth; i++ { 270 if n == nil { 271 break 272 } 273 n = n.parent 274 } 275 // n is now the reorg common ancestor, add a new branch of nodes 276 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) { 277 // if announced head block height is lower or same as n or too far from it to add 278 // intermediate nodes then discard previous announcement info and trigger a resync 279 n = nil 280 fp.nodeCnt = 0 281 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode) 282 } 283 if n != nil { 284 // check if the node count is too high to add new nodes, discard oldest ones if necessary 285 locked := false 286 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 287 if !locked { 288 f.chain.LockChain() 289 defer f.chain.UnlockChain() 290 locked = true 291 } 292 // if one of root's children is canonical, keep it, delete other branches and root itself 293 var newRoot *fetcherTreeNode 294 for i, nn := range fp.root.children { 295 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 296 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 297 nn.parent = nil 298 newRoot = nn 299 break 300 } 301 } 302 fp.deleteNode(fp.root) 303 if n == fp.root { 304 n = newRoot 305 } 306 fp.root = newRoot 307 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 308 fp.bestConfirmed = nil 309 fp.confirmedTd = nil 310 } 311 312 if n == nil { 313 break 314 } 315 } 316 if n != nil { 317 for n.number < head.Number { 318 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 319 n.children = append(n.children, nn) 320 n = nn 321 fp.nodeCnt++ 322 } 323 n.hash = head.Hash 324 n.td = head.Td 325 fp.nodeByHash[n.hash] = n 326 } 327 } 328 if n == nil { 329 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 330 if fp.root != nil { 331 fp.deleteNode(fp.root) 332 } 333 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 334 fp.root = n 335 fp.nodeCnt++ 336 fp.nodeByHash[n.hash] = n 337 fp.bestConfirmed = nil 338 fp.confirmedTd = nil 339 } 340 341 f.checkKnownNode(p, n) 342 p.lock.Lock() 343 p.headInfo = head 344 fp.lastAnnounced = n 345 p.lock.Unlock() 346 f.checkUpdateStats(p, nil) 347 f.requestChn <- true 348 } 349 350 // peerHasBlock returns true if we can assume the peer knows the given block 351 // based on its announcements 352 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { 353 f.lock.Lock() 354 defer f.lock.Unlock() 355 356 fp := f.peers[p] 357 if fp == nil || fp.root == nil { 358 return false 359 } 360 361 if hasState { 362 if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable { 363 return false 364 } 365 } 366 367 if f.syncing { 368 // always return true when syncing 369 // false positives are acceptable, a more sophisticated condition can be implemented later 370 return true 371 } 372 373 if number >= fp.root.number { 374 // it is recent enough that if it is known, is should be in the peer's block tree 375 return fp.nodeByHash[hash] != nil 376 } 377 f.chain.LockChain() 378 defer f.chain.UnlockChain() 379 // if it's older than the peer's block tree root but it's in the same canonical chain 380 // as the root, we can still be sure the peer knows it 381 // 382 // when syncing, just check if it is part of the known chain, there is nothing better we 383 // can do since we do not know the most recent block hash yet 384 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash 385 } 386 387 // requestAmount calculates the amount of headers to be downloaded starting 388 // from a certain head backwards 389 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 390 amount := uint64(0) 391 nn := n 392 for nn != nil && !f.checkKnownNode(p, nn) { 393 nn = nn.parent 394 amount++ 395 } 396 if nn == nil { 397 amount = n.number 398 } 399 return amount 400 } 401 402 // requestedID tells if a certain reqID has been requested by the fetcher 403 func (f *lightFetcher) requestedID(reqID uint64) bool { 404 f.reqMu.RLock() 405 _, ok := f.requested[reqID] 406 f.reqMu.RUnlock() 407 return ok 408 } 409 410 // nextRequest selects the peer and announced head to be requested next, amount 411 // to be downloaded starting from the head backwards is also returned 412 func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { 413 var ( 414 bestHash common.Hash 415 bestAmount uint64 416 ) 417 bestTd := f.maxConfirmedTd 418 bestSyncing := false 419 420 for p, fp := range f.peers { 421 for hash, n := range fp.nodeByHash { 422 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 423 amount := f.requestAmount(p, n) 424 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 425 bestHash = hash 426 bestAmount = amount 427 bestTd = n.td 428 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 429 } 430 } 431 } 432 } 433 if bestTd == f.maxConfirmedTd { 434 return nil, 0, false 435 } 436 437 var rq *distReq 438 reqID := genReqID() 439 if bestSyncing { 440 rq = &distReq{ 441 getCost: func(dp distPeer) uint64 { 442 return 0 443 }, 444 canSend: func(dp distPeer) bool { 445 p := dp.(*peer) 446 f.lock.Lock() 447 defer f.lock.Unlock() 448 449 fp := f.peers[p] 450 return fp != nil && fp.nodeByHash[bestHash] != nil 451 }, 452 request: func(dp distPeer) func() { 453 go func() { 454 p := dp.(*peer) 455 p.Log().Debug("Synchronisation started") 456 f.pm.synchronise(p) 457 f.syncDone <- p 458 }() 459 return nil 460 }, 461 } 462 } else { 463 rq = &distReq{ 464 getCost: func(dp distPeer) uint64 { 465 p := dp.(*peer) 466 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 467 }, 468 canSend: func(dp distPeer) bool { 469 p := dp.(*peer) 470 f.lock.Lock() 471 defer f.lock.Unlock() 472 473 fp := f.peers[p] 474 if fp == nil { 475 return false 476 } 477 n := fp.nodeByHash[bestHash] 478 return n != nil && !n.requested 479 }, 480 request: func(dp distPeer) func() { 481 p := dp.(*peer) 482 f.lock.Lock() 483 fp := f.peers[p] 484 if fp != nil { 485 n := fp.nodeByHash[bestHash] 486 if n != nil { 487 n.requested = true 488 } 489 } 490 f.lock.Unlock() 491 492 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 493 p.fcServer.QueueRequest(reqID, cost) 494 f.reqMu.Lock() 495 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 496 f.reqMu.Unlock() 497 go func() { 498 time.Sleep(hardRequestTimeout) 499 f.timeoutChn <- reqID 500 }() 501 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 502 }, 503 } 504 } 505 return rq, reqID, bestSyncing 506 } 507 508 // deliverHeaders delivers header download request responses for processing 509 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 510 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 511 } 512 513 // processResponse processes header download request responses, returns true if successful 514 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 515 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 516 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 517 return false 518 } 519 headers := make([]*types.Header, req.amount) 520 for i, header := range resp.headers { 521 headers[int(req.amount)-1-i] = header 522 } 523 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 524 if err == consensus.ErrFutureBlock { 525 return true 526 } 527 log.Debug("Failed to insert header chain", "err", err) 528 return false 529 } 530 tds := make([]*big.Int, len(headers)) 531 for i, header := range headers { 532 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 533 if td == nil { 534 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 535 return false 536 } 537 tds[i] = td 538 } 539 f.newHeaders(headers, tds) 540 return true 541 } 542 543 // newHeaders updates the block trees of all active peers according to a newly 544 // downloaded and validated batch or headers 545 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 546 var maxTd *big.Int 547 for p, fp := range f.peers { 548 if !f.checkAnnouncedHeaders(fp, headers, tds) { 549 p.Log().Debug("Inconsistent announcement") 550 go f.pm.removePeer(p.id) 551 } 552 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 553 maxTd = fp.confirmedTd 554 } 555 } 556 if maxTd != nil { 557 f.updateMaxConfirmedTd(maxTd) 558 } 559 } 560 561 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 562 // a batch of headers. It searches for the latest header in the batch that has a 563 // matching tree node (if any), and if it has not been marked as known already, 564 // sets it and its parents to known (even those which are older than the currently 565 // validated ones). Return value shows if all hashes, numbers and Tds matched 566 // correctly to the announced values (otherwise the peer should be dropped). 567 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 568 var ( 569 n *fetcherTreeNode 570 header *types.Header 571 td *big.Int 572 ) 573 574 for i := len(headers) - 1; ; i-- { 575 if i < 0 { 576 if n == nil { 577 // no more headers and nothing to match 578 return true 579 } 580 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 581 hash, number := header.ParentHash, header.Number.Uint64()-1 582 td = f.chain.GetTd(hash, number) 583 header = f.chain.GetHeader(hash, number) 584 if header == nil || td == nil { 585 log.Error("Missing parent of validated header", "hash", hash, "number", number) 586 return false 587 } 588 } else { 589 header = headers[i] 590 td = tds[i] 591 } 592 hash := header.Hash() 593 number := header.Number.Uint64() 594 if n == nil { 595 n = fp.nodeByHash[hash] 596 } 597 if n != nil { 598 if n.td == nil { 599 // node was unannounced 600 if nn := fp.nodeByHash[hash]; nn != nil { 601 // if there was already a node with the same hash, continue there and drop this one 602 nn.children = append(nn.children, n.children...) 603 n.children = nil 604 fp.deleteNode(n) 605 n = nn 606 } else { 607 n.hash = hash 608 n.td = td 609 fp.nodeByHash[hash] = n 610 } 611 } 612 // check if it matches the header 613 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 614 // peer has previously made an invalid announcement 615 return false 616 } 617 if n.known { 618 // we reached a known node that matched our expectations, return with success 619 return true 620 } 621 n.known = true 622 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 623 fp.confirmedTd = td 624 fp.bestConfirmed = n 625 } 626 n = n.parent 627 if n == nil { 628 return true 629 } 630 } 631 } 632 } 633 634 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 635 // downloaded headers as known. If none of the announced headers are found after 636 // syncing, the peer is dropped. 637 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 638 fp := f.peers[p] 639 if fp == nil { 640 p.Log().Debug("Unknown peer to check sync headers") 641 return 642 } 643 n := fp.lastAnnounced 644 var td *big.Int 645 for n != nil { 646 if td = f.chain.GetTd(n.hash, n.number); td != nil { 647 break 648 } 649 n = n.parent 650 } 651 // now n is the latest downloaded header after syncing 652 if n == nil { 653 p.Log().Debug("Synchronisation failed") 654 go f.pm.removePeer(p.id) 655 } else { 656 header := f.chain.GetHeader(n.hash, n.number) 657 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 658 } 659 } 660 661 // checkKnownNode checks if a block tree node is known (downloaded and validated) 662 // If it was not known previously but found in the database, sets its known flag 663 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 664 if n.known { 665 return true 666 } 667 td := f.chain.GetTd(n.hash, n.number) 668 if td == nil { 669 return false 670 } 671 header := f.chain.GetHeader(n.hash, n.number) 672 // check the availability of both header and td because reads are not protected by chain db mutex 673 // Note: returning false is always safe here 674 if header == nil { 675 return false 676 } 677 678 fp := f.peers[p] 679 if fp == nil { 680 p.Log().Debug("Unknown peer to check known nodes") 681 return false 682 } 683 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 684 p.Log().Debug("Inconsistent announcement") 685 go f.pm.removePeer(p.id) 686 } 687 if fp.confirmedTd != nil { 688 f.updateMaxConfirmedTd(fp.confirmedTd) 689 } 690 return n.known 691 } 692 693 // deleteNode deletes a node and its child subtrees from a peer's block tree 694 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 695 if n.parent != nil { 696 for i, nn := range n.parent.children { 697 if nn == n { 698 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 699 break 700 } 701 } 702 } 703 for { 704 if n.td != nil { 705 delete(fp.nodeByHash, n.hash) 706 } 707 fp.nodeCnt-- 708 if len(n.children) == 0 { 709 return 710 } 711 for i, nn := range n.children { 712 if i == 0 { 713 n = nn 714 } else { 715 fp.deleteNode(nn) 716 } 717 } 718 } 719 } 720 721 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 722 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 723 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 724 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 725 // and it has also been downloaded from any peer, either before or after the given announcement). 726 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 727 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 728 // the current global head). 729 type updateStatsEntry struct { 730 time mclock.AbsTime 731 td *big.Int 732 next *updateStatsEntry 733 } 734 735 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 736 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 737 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 738 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 739 // positive block delay value. 740 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 741 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 742 f.maxConfirmedTd = td 743 newEntry := &updateStatsEntry{ 744 time: mclock.Now(), 745 td: td, 746 } 747 if f.lastUpdateStats != nil { 748 f.lastUpdateStats.next = newEntry 749 } 750 f.lastUpdateStats = newEntry 751 for p := range f.peers { 752 f.checkUpdateStats(p, newEntry) 753 } 754 } 755 } 756 757 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 758 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 759 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 760 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 761 // items are removed from the head of the linked list. 762 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 763 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 764 // it can set the new head to newEntry. 765 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 766 now := mclock.Now() 767 fp := f.peers[p] 768 if fp == nil { 769 p.Log().Debug("Unknown peer to check update stats") 770 return 771 } 772 if newEntry != nil && fp.firstUpdateStats == nil { 773 fp.firstUpdateStats = newEntry 774 } 775 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 776 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 777 fp.firstUpdateStats = fp.firstUpdateStats.next 778 } 779 if fp.confirmedTd != nil { 780 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 781 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 782 fp.firstUpdateStats = fp.firstUpdateStats.next 783 } 784 } 785 }