github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/les/fetcher.go (about) 1 // Copyright 2016 The Spectrum Authors 2 // This file is part of the Spectrum library. 3 // 4 // The Spectrum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The Spectrum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/SmartMeshFoundation/Spectrum/common" 26 "github.com/SmartMeshFoundation/Spectrum/common/mclock" 27 "github.com/SmartMeshFoundation/Spectrum/consensus" 28 "github.com/SmartMeshFoundation/Spectrum/core" 29 "github.com/SmartMeshFoundation/Spectrum/core/types" 30 "github.com/SmartMeshFoundation/Spectrum/light" 31 "github.com/SmartMeshFoundation/Spectrum/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 ) 38 39 // lightFetcher 40 type lightFetcher struct { 41 pm *ProtocolManager 42 odr *LesOdr 43 chain *light.LightChain 44 45 maxConfirmedTd *big.Int 46 peers map[*peer]*fetcherPeerInfo 47 lastUpdateStats *updateStatsEntry 48 49 lock sync.Mutex // qwerqwerqwe 50 deliverChn chan fetchResponse 51 reqMu sync.RWMutex 52 requested map[uint64]fetchRequest 53 timeoutChn chan uint64 54 requestChn chan bool // true if initiated from outside 55 syncing bool 56 syncDone chan *peer 57 } 58 59 // fetcherPeerInfo holds fetcher-specific information about each active peer 60 type fetcherPeerInfo struct { 61 root, lastAnnounced *fetcherTreeNode 62 nodeCnt int 63 confirmedTd *big.Int 64 bestConfirmed *fetcherTreeNode 65 nodeByHash map[common.Hash]*fetcherTreeNode 66 firstUpdateStats *updateStatsEntry 67 } 68 69 // fetcherTreeNode is a node of a tree that holds information about blocks recently 70 // announced and confirmed by a certain peer. Each new announce message from a peer 71 // adds nodes to the tree, based on the previous announced head and the reorg depth. 72 // There are three possible states for a tree node: 73 // - announced: not downloaded (known) yet, but we know its head, number and td 74 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 75 // - known: both announced by this peer and downloaded (from any peer). 76 // This structure makes it possible to always know which peer has a certain block, 77 // which is necessary for selecting a suitable peer for ODR requests and also for 78 // canonizing new heads. It also helps to always download the minimum necessary 79 // amount of headers with a single request. 80 type fetcherTreeNode struct { 81 hash common.Hash 82 number uint64 83 td *big.Int 84 known, requested bool 85 parent *fetcherTreeNode 86 children []*fetcherTreeNode 87 } 88 89 // fetchRequest represents a header download request 90 type fetchRequest struct { 91 hash common.Hash 92 amount uint64 93 peer *peer 94 sent mclock.AbsTime 95 timeout bool 96 } 97 98 // fetchResponse represents a header download response 99 type fetchResponse struct { 100 reqID uint64 101 headers []*types.Header 102 peer *peer 103 } 104 105 // newLightFetcher creates a new light fetcher 106 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 107 f := &lightFetcher{ 108 pm: pm, 109 chain: pm.blockchain.(*light.LightChain), 110 odr: pm.odr, 111 peers: make(map[*peer]*fetcherPeerInfo), 112 deliverChn: make(chan fetchResponse, 100), 113 requested: make(map[uint64]fetchRequest), 114 timeoutChn: make(chan uint64), 115 requestChn: make(chan bool, 100), 116 syncDone: make(chan *peer), 117 maxConfirmedTd: big.NewInt(0), 118 } 119 pm.peers.notify(f) 120 121 f.pm.wg.Add(1) 122 go f.syncLoop() 123 return f 124 } 125 126 // syncLoop is the main event loop of the light fetcher 127 func (f *lightFetcher) syncLoop() { 128 requesting := false 129 defer f.pm.wg.Done() 130 for { 131 select { 132 case <-f.pm.quitSync: 133 return 134 // when a new announce is received, request loop keeps running until 135 // no further requests are necessary or possible 136 case newAnnounce := <-f.requestChn: 137 f.lock.Lock() 138 s := requesting 139 requesting = false 140 var ( 141 rq *distReq 142 reqID uint64 143 ) 144 if !f.syncing && !(newAnnounce && s) { 145 rq, reqID = f.nextRequest() 146 } 147 syncing := f.syncing 148 f.lock.Unlock() 149 150 if rq != nil { 151 requesting = true 152 _, ok := <-f.pm.reqDist.queue(rq) 153 if !ok { 154 f.requestChn <- false 155 } 156 157 if !syncing { 158 go func() { 159 time.Sleep(softRequestTimeout) 160 f.reqMu.Lock() 161 req, ok := f.requested[reqID] 162 if ok { 163 req.timeout = true 164 f.requested[reqID] = req 165 } 166 f.reqMu.Unlock() 167 // keep starting new requests while possible 168 f.requestChn <- false 169 }() 170 } 171 } 172 case reqID := <-f.timeoutChn: 173 f.reqMu.Lock() 174 req, ok := f.requested[reqID] 175 if ok { 176 delete(f.requested, reqID) 177 } 178 f.reqMu.Unlock() 179 if ok { 180 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 181 req.peer.Log().Debug("Fetching data timed out hard") 182 go f.pm.removePeer(req.peer.id) 183 } 184 case resp := <-f.deliverChn: 185 f.reqMu.Lock() 186 req, ok := f.requested[resp.reqID] 187 if ok && req.peer != resp.peer { 188 ok = false 189 } 190 if ok { 191 delete(f.requested, resp.reqID) 192 } 193 f.reqMu.Unlock() 194 if ok { 195 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 196 } 197 f.lock.Lock() 198 if !ok || !(f.syncing || f.processResponse(req, resp)) { 199 resp.peer.Log().Debug("Failed processing response") 200 go f.pm.removePeer(resp.peer.id) 201 } 202 f.lock.Unlock() 203 case p := <-f.syncDone: 204 f.lock.Lock() 205 p.Log().Debug("Done synchronising with peer") 206 f.checkSyncedHeaders(p) 207 f.syncing = false 208 f.lock.Unlock() 209 } 210 } 211 } 212 213 // registerPeer adds a new peer to the fetcher's peer set 214 func (f *lightFetcher) registerPeer(p *peer) { 215 p.lock.Lock() 216 p.hasBlock = func(hash common.Hash, number uint64) bool { 217 return f.peerHasBlock(p, hash, number) 218 } 219 p.lock.Unlock() 220 221 f.lock.Lock() 222 defer f.lock.Unlock() 223 224 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 225 } 226 227 // unregisterPeer removes a new peer from the fetcher's peer set 228 func (f *lightFetcher) unregisterPeer(p *peer) { 229 p.lock.Lock() 230 p.hasBlock = nil 231 p.lock.Unlock() 232 233 f.lock.Lock() 234 defer f.lock.Unlock() 235 236 // check for potential timed out block delay statistics 237 f.checkUpdateStats(p, nil) 238 delete(f.peers, p) 239 } 240 241 // announce processes a new announcement message received from a peer, adding new 242 // nodes to the peer's block tree and removing old nodes if necessary 243 func (f *lightFetcher) announce(p *peer, head *announceData) { 244 f.lock.Lock() 245 defer f.lock.Unlock() 246 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 247 248 fp := f.peers[p] 249 if fp == nil { 250 p.Log().Debug("Announcement from unknown peer") 251 return 252 } 253 254 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 255 // announced tds should be strictly monotonic 256 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 257 go f.pm.removePeer(p.id) 258 return 259 } 260 261 n := fp.lastAnnounced 262 for i := uint64(0); i < head.ReorgDepth; i++ { 263 if n == nil { 264 break 265 } 266 n = n.parent 267 } 268 if n != nil { 269 // n is now the reorg common ancestor, add a new branch of nodes 270 // check if the node count is too high to add new nodes 271 locked := false 272 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 273 if !locked { 274 f.chain.LockChain() 275 defer f.chain.UnlockChain() 276 locked = true 277 } 278 // if one of root's children is canonical, keep it, delete other branches and root itself 279 var newRoot *fetcherTreeNode 280 for i, nn := range fp.root.children { 281 if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 282 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 283 nn.parent = nil 284 newRoot = nn 285 break 286 } 287 } 288 fp.deleteNode(fp.root) 289 if n == fp.root { 290 n = newRoot 291 } 292 fp.root = newRoot 293 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 294 fp.bestConfirmed = nil 295 fp.confirmedTd = nil 296 } 297 298 if n == nil { 299 break 300 } 301 } 302 if n != nil { 303 for n.number < head.Number { 304 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 305 n.children = append(n.children, nn) 306 n = nn 307 fp.nodeCnt++ 308 } 309 n.hash = head.Hash 310 n.td = head.Td 311 fp.nodeByHash[n.hash] = n 312 } 313 } 314 if n == nil { 315 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 316 if fp.root != nil { 317 fp.deleteNode(fp.root) 318 } 319 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 320 fp.root = n 321 fp.nodeCnt++ 322 fp.nodeByHash[n.hash] = n 323 fp.bestConfirmed = nil 324 fp.confirmedTd = nil 325 } 326 327 f.checkKnownNode(p, n) 328 p.lock.Lock() 329 p.headInfo = head 330 fp.lastAnnounced = n 331 p.lock.Unlock() 332 f.checkUpdateStats(p, nil) 333 f.requestChn <- true 334 } 335 336 // peerHasBlock returns true if we can assume the peer knows the given block 337 // based on its announcements 338 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { 339 f.lock.Lock() 340 defer f.lock.Unlock() 341 342 if f.syncing { 343 // always return true when syncing 344 // false positives are acceptable, a more sophisticated condition can be implemented later 345 return true 346 } 347 348 fp := f.peers[p] 349 if fp == nil || fp.root == nil { 350 return false 351 } 352 353 if number >= fp.root.number { 354 // it is recent enough that if it is known, is should be in the peer's block tree 355 return fp.nodeByHash[hash] != nil 356 } 357 f.chain.LockChain() 358 defer f.chain.UnlockChain() 359 // if it's older than the peer's block tree root but it's in the same canonical chain 360 // as the root, we can still be sure the peer knows it 361 // 362 // when syncing, just check if it is part of the known chain, there is nothing better we 363 // can do since we do not know the most recent block hash yet 364 return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash 365 } 366 367 // requestAmount calculates the amount of headers to be downloaded starting 368 // from a certain head backwards 369 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 370 amount := uint64(0) 371 nn := n 372 for nn != nil && !f.checkKnownNode(p, nn) { 373 nn = nn.parent 374 amount++ 375 } 376 if nn == nil { 377 amount = n.number 378 } 379 return amount 380 } 381 382 // requestedID tells if a certain reqID has been requested by the fetcher 383 func (f *lightFetcher) requestedID(reqID uint64) bool { 384 f.reqMu.RLock() 385 _, ok := f.requested[reqID] 386 f.reqMu.RUnlock() 387 return ok 388 } 389 390 // nextRequest selects the peer and announced head to be requested next, amount 391 // to be downloaded starting from the head backwards is also returned 392 func (f *lightFetcher) nextRequest() (*distReq, uint64) { 393 var ( 394 bestHash common.Hash 395 bestAmount uint64 396 ) 397 bestTd := f.maxConfirmedTd 398 bestSyncing := false 399 400 for p, fp := range f.peers { 401 for hash, n := range fp.nodeByHash { 402 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 403 amount := f.requestAmount(p, n) 404 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 405 bestHash = hash 406 bestAmount = amount 407 bestTd = n.td 408 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 409 } 410 } 411 } 412 } 413 if bestTd == f.maxConfirmedTd { 414 return nil, 0 415 } 416 417 f.syncing = bestSyncing 418 419 var rq *distReq 420 reqID := genReqID() 421 if f.syncing { 422 rq = &distReq{ 423 getCost: func(dp distPeer) uint64 { 424 return 0 425 }, 426 canSend: func(dp distPeer) bool { 427 p := dp.(*peer) 428 fp := f.peers[p] 429 return fp != nil && fp.nodeByHash[bestHash] != nil 430 }, 431 request: func(dp distPeer) func() { 432 go func() { 433 p := dp.(*peer) 434 p.Log().Debug("Synchronisation started") 435 f.pm.synchronise(p) 436 f.syncDone <- p 437 }() 438 return nil 439 }, 440 } 441 } else { 442 rq = &distReq{ 443 getCost: func(dp distPeer) uint64 { 444 p := dp.(*peer) 445 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 446 }, 447 canSend: func(dp distPeer) bool { 448 p := dp.(*peer) 449 f.lock.Lock() 450 defer f.lock.Unlock() 451 452 fp := f.peers[p] 453 if fp == nil { 454 return false 455 } 456 n := fp.nodeByHash[bestHash] 457 return n != nil && !n.requested 458 }, 459 request: func(dp distPeer) func() { 460 p := dp.(*peer) 461 f.lock.Lock() 462 fp := f.peers[p] 463 if fp != nil { 464 n := fp.nodeByHash[bestHash] 465 if n != nil { 466 n.requested = true 467 } 468 } 469 f.lock.Unlock() 470 471 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 472 p.fcServer.QueueRequest(reqID, cost) 473 f.reqMu.Lock() 474 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 475 f.reqMu.Unlock() 476 go func() { 477 time.Sleep(hardRequestTimeout) 478 f.timeoutChn <- reqID 479 }() 480 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 481 }, 482 } 483 } 484 return rq, reqID 485 } 486 487 // deliverHeaders delivers header download request responses for processing 488 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 489 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 490 } 491 492 // processResponse processes header download request responses, returns true if successful 493 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 494 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 495 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 496 return false 497 } 498 headers := make([]*types.Header, req.amount) 499 for i, header := range resp.headers { 500 headers[int(req.amount)-1-i] = header 501 } 502 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 503 if err == consensus.ErrFutureBlock { 504 return true 505 } 506 log.Debug("Failed to insert header chain", "err", err) 507 return false 508 } 509 tds := make([]*big.Int, len(headers)) 510 for i, header := range headers { 511 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 512 if td == nil { 513 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 514 return false 515 } 516 tds[i] = td 517 } 518 f.newHeaders(headers, tds) 519 return true 520 } 521 522 // newHeaders updates the block trees of all active peers according to a newly 523 // downloaded and validated batch or headers 524 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 525 var maxTd *big.Int 526 for p, fp := range f.peers { 527 if !f.checkAnnouncedHeaders(fp, headers, tds) { 528 p.Log().Debug("Inconsistent announcement") 529 go f.pm.removePeer(p.id) 530 } 531 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 532 maxTd = fp.confirmedTd 533 } 534 } 535 if maxTd != nil { 536 f.updateMaxConfirmedTd(maxTd) 537 } 538 } 539 540 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 541 // a batch of headers. It searches for the latest header in the batch that has a 542 // matching tree node (if any), and if it has not been marked as known already, 543 // sets it and its parents to known (even those which are older than the currently 544 // validated ones). Return value shows if all hashes, numbers and Tds matched 545 // correctly to the announced values (otherwise the peer should be dropped). 546 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 547 var ( 548 n *fetcherTreeNode 549 header *types.Header 550 td *big.Int 551 ) 552 553 for i := len(headers) - 1; ; i-- { 554 if i < 0 { 555 if n == nil { 556 // no more headers and nothing to match 557 return true 558 } 559 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 560 td = f.chain.GetTd(header.ParentHash, header.Number.Uint64()-1) 561 header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) 562 } else { 563 header = headers[i] 564 td = tds[i] 565 } 566 hash := header.Hash() 567 number := header.Number.Uint64() 568 if n == nil { 569 n = fp.nodeByHash[hash] 570 } 571 if n != nil { 572 if n.td == nil { 573 // node was unannounced 574 if nn := fp.nodeByHash[hash]; nn != nil { 575 // if there was already a node with the same hash, continue there and drop this one 576 nn.children = append(nn.children, n.children...) 577 n.children = nil 578 fp.deleteNode(n) 579 n = nn 580 } else { 581 n.hash = hash 582 n.td = td 583 fp.nodeByHash[hash] = n 584 } 585 } 586 // check if it matches the header 587 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 588 // peer has previously made an invalid announcement 589 return false 590 } 591 if n.known { 592 // we reached a known node that matched our expectations, return with success 593 return true 594 } 595 n.known = true 596 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 597 fp.confirmedTd = td 598 fp.bestConfirmed = n 599 } 600 n = n.parent 601 if n == nil { 602 return true 603 } 604 } 605 } 606 } 607 608 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 609 // downloaded headers as known. If none of the announced headers are found after 610 // syncing, the peer is dropped. 611 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 612 fp := f.peers[p] 613 if fp == nil { 614 p.Log().Debug("Unknown peer to check sync headers") 615 return 616 } 617 n := fp.lastAnnounced 618 var td *big.Int 619 for n != nil { 620 if td = f.chain.GetTd(n.hash, n.number); td != nil { 621 break 622 } 623 n = n.parent 624 } 625 // now n is the latest downloaded header after syncing 626 if n == nil { 627 p.Log().Debug("Synchronisation failed") 628 go f.pm.removePeer(p.id) 629 } else { 630 header := f.chain.GetHeader(n.hash, n.number) 631 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 632 } 633 } 634 635 // checkKnownNode checks if a block tree node is known (downloaded and validated) 636 // If it was not known previously but found in the database, sets its known flag 637 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 638 if n.known { 639 return true 640 } 641 td := f.chain.GetTd(n.hash, n.number) 642 if td == nil { 643 return false 644 } 645 646 fp := f.peers[p] 647 if fp == nil { 648 p.Log().Debug("Unknown peer to check known nodes") 649 return false 650 } 651 header := f.chain.GetHeader(n.hash, n.number) 652 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 653 p.Log().Debug("Inconsistent announcement") 654 go f.pm.removePeer(p.id) 655 } 656 if fp.confirmedTd != nil { 657 f.updateMaxConfirmedTd(fp.confirmedTd) 658 } 659 return n.known 660 } 661 662 // deleteNode deletes a node and its child subtrees from a peer's block tree 663 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 664 if n.parent != nil { 665 for i, nn := range n.parent.children { 666 if nn == n { 667 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 668 break 669 } 670 } 671 } 672 for { 673 if n.td != nil { 674 delete(fp.nodeByHash, n.hash) 675 } 676 fp.nodeCnt-- 677 if len(n.children) == 0 { 678 return 679 } 680 for i, nn := range n.children { 681 if i == 0 { 682 n = nn 683 } else { 684 fp.deleteNode(nn) 685 } 686 } 687 } 688 } 689 690 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 691 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 692 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 693 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 694 // and it has also been downloaded from any peer, either before or after the given announcement). 695 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 696 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 697 // the current global head). 698 type updateStatsEntry struct { 699 time mclock.AbsTime 700 td *big.Int 701 next *updateStatsEntry 702 } 703 704 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 705 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 706 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 707 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 708 // positive block delay value. 709 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 710 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 711 f.maxConfirmedTd = td 712 newEntry := &updateStatsEntry{ 713 time: mclock.Now(), 714 td: td, 715 } 716 if f.lastUpdateStats != nil { 717 f.lastUpdateStats.next = newEntry 718 } 719 f.lastUpdateStats = newEntry 720 for p := range f.peers { 721 f.checkUpdateStats(p, newEntry) 722 } 723 } 724 } 725 726 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 727 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 728 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 729 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 730 // items are removed from the head of the linked list. 731 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 732 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 733 // it can set the new head to newEntry. 734 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 735 now := mclock.Now() 736 fp := f.peers[p] 737 if fp == nil { 738 p.Log().Debug("Unknown peer to check update stats") 739 return 740 } 741 if newEntry != nil && fp.firstUpdateStats == nil { 742 fp.firstUpdateStats = newEntry 743 } 744 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 745 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 746 fp.firstUpdateStats = fp.firstUpdateStats.next 747 } 748 if fp.confirmedTd != nil { 749 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 750 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 751 fp.firstUpdateStats = fp.firstUpdateStats.next 752 } 753 } 754 }