github.com/ylsGit/go-ethereum@v1.6.5/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/mclock" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/core" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/light" 31 "github.com/ethereum/go-ethereum/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 ) 38 39 // lightFetcher 40 type lightFetcher struct { 41 pm *ProtocolManager 42 odr *LesOdr 43 chain *light.LightChain 44 45 maxConfirmedTd *big.Int 46 peers map[*peer]*fetcherPeerInfo 47 lastUpdateStats *updateStatsEntry 48 49 lock sync.Mutex // qwerqwerqwe 50 deliverChn chan fetchResponse 51 reqMu sync.RWMutex 52 requested map[uint64]fetchRequest 53 timeoutChn chan uint64 54 requestChn chan bool // true if initiated from outside 55 syncing bool 56 syncDone chan *peer 57 } 58 59 // fetcherPeerInfo holds fetcher-specific information about each active peer 60 type fetcherPeerInfo struct { 61 root, lastAnnounced *fetcherTreeNode 62 nodeCnt int 63 confirmedTd *big.Int 64 bestConfirmed *fetcherTreeNode 65 nodeByHash map[common.Hash]*fetcherTreeNode 66 firstUpdateStats *updateStatsEntry 67 } 68 69 // fetcherTreeNode is a node of a tree that holds information about blocks recently 70 // announced and confirmed by a certain peer. Each new announce message from a peer 71 // adds nodes to the tree, based on the previous announced head and the reorg depth. 72 // There are three possible states for a tree node: 73 // - announced: not downloaded (known) yet, but we know its head, number and td 74 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 75 // - known: both announced by this peer and downloaded (from any peer). 76 // This structure makes it possible to always know which peer has a certain block, 77 // which is necessary for selecting a suitable peer for ODR requests and also for 78 // canonizing new heads. It also helps to always download the minimum necessary 79 // amount of headers with a single request. 80 type fetcherTreeNode struct { 81 hash common.Hash 82 number uint64 83 td *big.Int 84 known, requested bool 85 parent *fetcherTreeNode 86 children []*fetcherTreeNode 87 } 88 89 // fetchRequest represents a header download request 90 type fetchRequest struct { 91 hash common.Hash 92 amount uint64 93 peer *peer 94 sent mclock.AbsTime 95 timeout bool 96 } 97 98 // fetchResponse represents a header download response 99 type fetchResponse struct { 100 reqID uint64 101 headers []*types.Header 102 peer *peer 103 } 104 105 // newLightFetcher creates a new light fetcher 106 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 107 f := &lightFetcher{ 108 pm: pm, 109 chain: pm.blockchain.(*light.LightChain), 110 odr: pm.odr, 111 peers: make(map[*peer]*fetcherPeerInfo), 112 deliverChn: make(chan fetchResponse, 100), 113 requested: make(map[uint64]fetchRequest), 114 timeoutChn: make(chan uint64), 115 requestChn: make(chan bool, 100), 116 syncDone: make(chan *peer), 117 maxConfirmedTd: big.NewInt(0), 118 } 119 go f.syncLoop() 120 return f 121 } 122 123 // syncLoop is the main event loop of the light fetcher 124 func (f *lightFetcher) syncLoop() { 125 f.pm.wg.Add(1) 126 defer f.pm.wg.Done() 127 128 requesting := false 129 for { 130 select { 131 case <-f.pm.quitSync: 132 return 133 // when a new announce is received, request loop keeps running until 134 // no further requests are necessary or possible 135 case newAnnounce := <-f.requestChn: 136 f.lock.Lock() 137 s := requesting 138 requesting = false 139 var ( 140 rq *distReq 141 reqID uint64 142 ) 143 if !f.syncing && !(newAnnounce && s) { 144 rq, reqID = f.nextRequest() 145 } 146 syncing := f.syncing 147 f.lock.Unlock() 148 149 if rq != nil { 150 requesting = true 151 _, ok := <-f.pm.reqDist.queue(rq) 152 if !ok { 153 f.requestChn <- false 154 } 155 156 if !syncing { 157 go func() { 158 time.Sleep(softRequestTimeout) 159 f.reqMu.Lock() 160 req, ok := f.requested[reqID] 161 if ok { 162 req.timeout = true 163 f.requested[reqID] = req 164 } 165 f.reqMu.Unlock() 166 // keep starting new requests while possible 167 f.requestChn <- false 168 }() 169 } 170 } 171 case reqID := <-f.timeoutChn: 172 f.reqMu.Lock() 173 req, ok := f.requested[reqID] 174 if ok { 175 delete(f.requested, reqID) 176 } 177 f.reqMu.Unlock() 178 if ok { 179 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 180 req.peer.Log().Debug("Fetching data timed out hard") 181 go f.pm.removePeer(req.peer.id) 182 } 183 case resp := <-f.deliverChn: 184 f.reqMu.Lock() 185 req, ok := f.requested[resp.reqID] 186 if ok && req.peer != resp.peer { 187 ok = false 188 } 189 if ok { 190 delete(f.requested, resp.reqID) 191 } 192 f.reqMu.Unlock() 193 if ok { 194 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 195 } 196 f.lock.Lock() 197 if !ok || !(f.syncing || f.processResponse(req, resp)) { 198 resp.peer.Log().Debug("Failed processing response") 199 go f.pm.removePeer(resp.peer.id) 200 } 201 f.lock.Unlock() 202 case p := <-f.syncDone: 203 f.lock.Lock() 204 p.Log().Debug("Done synchronising with peer") 205 f.checkSyncedHeaders(p) 206 f.syncing = false 207 f.lock.Unlock() 208 } 209 } 210 } 211 212 // addPeer adds a new peer to the fetcher's peer set 213 func (f *lightFetcher) addPeer(p *peer) { 214 p.lock.Lock() 215 p.hasBlock = func(hash common.Hash, number uint64) bool { 216 return f.peerHasBlock(p, hash, number) 217 } 218 p.lock.Unlock() 219 220 f.lock.Lock() 221 defer f.lock.Unlock() 222 223 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 224 } 225 226 // removePeer removes a new peer from the fetcher's peer set 227 func (f *lightFetcher) removePeer(p *peer) { 228 p.lock.Lock() 229 p.hasBlock = nil 230 p.lock.Unlock() 231 232 f.lock.Lock() 233 defer f.lock.Unlock() 234 235 // check for potential timed out block delay statistics 236 f.checkUpdateStats(p, nil) 237 delete(f.peers, p) 238 } 239 240 // announce processes a new announcement message received from a peer, adding new 241 // nodes to the peer's block tree and removing old nodes if necessary 242 func (f *lightFetcher) announce(p *peer, head *announceData) { 243 f.lock.Lock() 244 defer f.lock.Unlock() 245 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 246 247 fp := f.peers[p] 248 if fp == nil { 249 p.Log().Debug("Announcement from unknown peer") 250 return 251 } 252 253 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 254 // announced tds should be strictly monotonic 255 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 256 go f.pm.removePeer(p.id) 257 return 258 } 259 260 n := fp.lastAnnounced 261 for i := uint64(0); i < head.ReorgDepth; i++ { 262 if n == nil { 263 break 264 } 265 n = n.parent 266 } 267 if n != nil { 268 // n is now the reorg common ancestor, add a new branch of nodes 269 // check if the node count is too high to add new nodes 270 locked := false 271 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 272 if !locked { 273 f.chain.LockChain() 274 defer f.chain.UnlockChain() 275 locked = true 276 } 277 // if one of root's children is canonical, keep it, delete other branches and root itself 278 var newRoot *fetcherTreeNode 279 for i, nn := range fp.root.children { 280 if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 281 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 282 nn.parent = nil 283 newRoot = nn 284 break 285 } 286 } 287 fp.deleteNode(fp.root) 288 if n == fp.root { 289 n = newRoot 290 } 291 fp.root = newRoot 292 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 293 fp.bestConfirmed = nil 294 fp.confirmedTd = nil 295 } 296 297 if n == nil { 298 break 299 } 300 } 301 if n != nil { 302 for n.number < head.Number { 303 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 304 n.children = append(n.children, nn) 305 n = nn 306 fp.nodeCnt++ 307 } 308 n.hash = head.Hash 309 n.td = head.Td 310 fp.nodeByHash[n.hash] = n 311 } 312 } 313 if n == nil { 314 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 315 if fp.root != nil { 316 fp.deleteNode(fp.root) 317 } 318 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 319 fp.root = n 320 fp.nodeCnt++ 321 fp.nodeByHash[n.hash] = n 322 fp.bestConfirmed = nil 323 fp.confirmedTd = nil 324 } 325 326 f.checkKnownNode(p, n) 327 p.lock.Lock() 328 p.headInfo = head 329 fp.lastAnnounced = n 330 p.lock.Unlock() 331 f.checkUpdateStats(p, nil) 332 f.requestChn <- true 333 } 334 335 // peerHasBlock returns true if we can assume the peer knows the given block 336 // based on its announcements 337 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { 338 f.lock.Lock() 339 defer f.lock.Unlock() 340 341 if f.syncing { 342 // always return true when syncing 343 // false positives are acceptable, a more sophisticated condition can be implemented later 344 return true 345 } 346 347 fp := f.peers[p] 348 if fp == nil || fp.root == nil { 349 return false 350 } 351 352 if number >= fp.root.number { 353 // it is recent enough that if it is known, is should be in the peer's block tree 354 return fp.nodeByHash[hash] != nil 355 } 356 f.chain.LockChain() 357 defer f.chain.UnlockChain() 358 // if it's older than the peer's block tree root but it's in the same canonical chain 359 // as the root, we can still be sure the peer knows it 360 // 361 // when syncing, just check if it is part of the known chain, there is nothing better we 362 // can do since we do not know the most recent block hash yet 363 return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash 364 } 365 366 // requestAmount calculates the amount of headers to be downloaded starting 367 // from a certain head backwards 368 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 369 amount := uint64(0) 370 nn := n 371 for nn != nil && !f.checkKnownNode(p, nn) { 372 nn = nn.parent 373 amount++ 374 } 375 if nn == nil { 376 amount = n.number 377 } 378 return amount 379 } 380 381 // requestedID tells if a certain reqID has been requested by the fetcher 382 func (f *lightFetcher) requestedID(reqID uint64) bool { 383 f.reqMu.RLock() 384 _, ok := f.requested[reqID] 385 f.reqMu.RUnlock() 386 return ok 387 } 388 389 // nextRequest selects the peer and announced head to be requested next, amount 390 // to be downloaded starting from the head backwards is also returned 391 func (f *lightFetcher) nextRequest() (*distReq, uint64) { 392 var ( 393 bestHash common.Hash 394 bestAmount uint64 395 ) 396 bestTd := f.maxConfirmedTd 397 bestSyncing := false 398 399 for p, fp := range f.peers { 400 for hash, n := range fp.nodeByHash { 401 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 402 amount := f.requestAmount(p, n) 403 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 404 bestHash = hash 405 bestAmount = amount 406 bestTd = n.td 407 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 408 } 409 } 410 } 411 } 412 if bestTd == f.maxConfirmedTd { 413 return nil, 0 414 } 415 416 f.syncing = bestSyncing 417 418 var rq *distReq 419 reqID := getNextReqID() 420 if f.syncing { 421 rq = &distReq{ 422 getCost: func(dp distPeer) uint64 { 423 return 0 424 }, 425 canSend: func(dp distPeer) bool { 426 p := dp.(*peer) 427 fp := f.peers[p] 428 return fp != nil && fp.nodeByHash[bestHash] != nil 429 }, 430 request: func(dp distPeer) func() { 431 go func() { 432 p := dp.(*peer) 433 p.Log().Debug("Synchronisation started") 434 f.pm.synchronise(p) 435 f.syncDone <- p 436 }() 437 return nil 438 }, 439 } 440 } else { 441 rq = &distReq{ 442 getCost: func(dp distPeer) uint64 { 443 p := dp.(*peer) 444 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 445 }, 446 canSend: func(dp distPeer) bool { 447 p := dp.(*peer) 448 f.lock.Lock() 449 defer f.lock.Unlock() 450 451 fp := f.peers[p] 452 if fp == nil { 453 return false 454 } 455 n := fp.nodeByHash[bestHash] 456 return n != nil && !n.requested 457 }, 458 request: func(dp distPeer) func() { 459 p := dp.(*peer) 460 f.lock.Lock() 461 fp := f.peers[p] 462 if fp != nil { 463 n := fp.nodeByHash[bestHash] 464 if n != nil { 465 n.requested = true 466 } 467 } 468 f.lock.Unlock() 469 470 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 471 p.fcServer.QueueRequest(reqID, cost) 472 f.reqMu.Lock() 473 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 474 f.reqMu.Unlock() 475 go func() { 476 time.Sleep(hardRequestTimeout) 477 f.timeoutChn <- reqID 478 }() 479 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 480 }, 481 } 482 } 483 return rq, reqID 484 } 485 486 // deliverHeaders delivers header download request responses for processing 487 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 488 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 489 } 490 491 // processResponse processes header download request responses, returns true if successful 492 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 493 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 494 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 495 return false 496 } 497 headers := make([]*types.Header, req.amount) 498 for i, header := range resp.headers { 499 headers[int(req.amount)-1-i] = header 500 } 501 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 502 if err == consensus.ErrFutureBlock { 503 return true 504 } 505 log.Debug("Failed to insert header chain", "err", err) 506 return false 507 } 508 tds := make([]*big.Int, len(headers)) 509 for i, header := range headers { 510 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 511 if td == nil { 512 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 513 return false 514 } 515 tds[i] = td 516 } 517 f.newHeaders(headers, tds) 518 return true 519 } 520 521 // newHeaders updates the block trees of all active peers according to a newly 522 // downloaded and validated batch or headers 523 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 524 var maxTd *big.Int 525 for p, fp := range f.peers { 526 if !f.checkAnnouncedHeaders(fp, headers, tds) { 527 p.Log().Debug("Inconsistent announcement") 528 go f.pm.removePeer(p.id) 529 } 530 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 531 maxTd = fp.confirmedTd 532 } 533 } 534 if maxTd != nil { 535 f.updateMaxConfirmedTd(maxTd) 536 } 537 } 538 539 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 540 // a batch of headers. It searches for the latest header in the batch that has a 541 // matching tree node (if any), and if it has not been marked as known already, 542 // sets it and its parents to known (even those which are older than the currently 543 // validated ones). Return value shows if all hashes, numbers and Tds matched 544 // correctly to the announced values (otherwise the peer should be dropped). 545 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 546 var ( 547 n *fetcherTreeNode 548 header *types.Header 549 td *big.Int 550 ) 551 552 for i := len(headers) - 1; ; i-- { 553 if i < 0 { 554 if n == nil { 555 // no more headers and nothing to match 556 return true 557 } 558 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 559 td = f.chain.GetTd(header.ParentHash, header.Number.Uint64()-1) 560 header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) 561 } else { 562 header = headers[i] 563 td = tds[i] 564 } 565 hash := header.Hash() 566 number := header.Number.Uint64() 567 if n == nil { 568 n = fp.nodeByHash[hash] 569 } 570 if n != nil { 571 if n.td == nil { 572 // node was unannounced 573 if nn := fp.nodeByHash[hash]; nn != nil { 574 // if there was already a node with the same hash, continue there and drop this one 575 nn.children = append(nn.children, n.children...) 576 n.children = nil 577 fp.deleteNode(n) 578 n = nn 579 } else { 580 n.hash = hash 581 n.td = td 582 fp.nodeByHash[hash] = n 583 } 584 } 585 // check if it matches the header 586 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 587 // peer has previously made an invalid announcement 588 return false 589 } 590 if n.known { 591 // we reached a known node that matched our expectations, return with success 592 return true 593 } 594 n.known = true 595 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 596 fp.confirmedTd = td 597 fp.bestConfirmed = n 598 } 599 n = n.parent 600 if n == nil { 601 return true 602 } 603 } 604 } 605 } 606 607 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 608 // downloaded headers as known. If none of the announced headers are found after 609 // syncing, the peer is dropped. 610 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 611 fp := f.peers[p] 612 if fp == nil { 613 p.Log().Debug("Unknown peer to check sync headers") 614 return 615 } 616 n := fp.lastAnnounced 617 var td *big.Int 618 for n != nil { 619 if td = f.chain.GetTd(n.hash, n.number); td != nil { 620 break 621 } 622 n = n.parent 623 } 624 // now n is the latest downloaded header after syncing 625 if n == nil { 626 p.Log().Debug("Synchronisation failed") 627 go f.pm.removePeer(p.id) 628 } else { 629 header := f.chain.GetHeader(n.hash, n.number) 630 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 631 } 632 } 633 634 // checkKnownNode checks if a block tree node is known (downloaded and validated) 635 // If it was not known previously but found in the database, sets its known flag 636 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 637 if n.known { 638 return true 639 } 640 td := f.chain.GetTd(n.hash, n.number) 641 if td == nil { 642 return false 643 } 644 645 fp := f.peers[p] 646 if fp == nil { 647 p.Log().Debug("Unknown peer to check known nodes") 648 return false 649 } 650 header := f.chain.GetHeader(n.hash, n.number) 651 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 652 p.Log().Debug("Inconsistent announcement") 653 go f.pm.removePeer(p.id) 654 } 655 if fp.confirmedTd != nil { 656 f.updateMaxConfirmedTd(fp.confirmedTd) 657 } 658 return n.known 659 } 660 661 // deleteNode deletes a node and its child subtrees from a peer's block tree 662 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 663 if n.parent != nil { 664 for i, nn := range n.parent.children { 665 if nn == n { 666 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 667 break 668 } 669 } 670 } 671 for { 672 if n.td != nil { 673 delete(fp.nodeByHash, n.hash) 674 } 675 fp.nodeCnt-- 676 if len(n.children) == 0 { 677 return 678 } 679 for i, nn := range n.children { 680 if i == 0 { 681 n = nn 682 } else { 683 fp.deleteNode(nn) 684 } 685 } 686 } 687 } 688 689 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 690 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 691 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 692 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 693 // and it has also been downloaded from any peer, either before or after the given announcement). 694 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 695 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 696 // the current global head). 697 type updateStatsEntry struct { 698 time mclock.AbsTime 699 td *big.Int 700 next *updateStatsEntry 701 } 702 703 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 704 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 705 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 706 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 707 // positive block delay value. 708 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 709 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 710 f.maxConfirmedTd = td 711 newEntry := &updateStatsEntry{ 712 time: mclock.Now(), 713 td: td, 714 } 715 if f.lastUpdateStats != nil { 716 f.lastUpdateStats.next = newEntry 717 } 718 f.lastUpdateStats = newEntry 719 for p := range f.peers { 720 f.checkUpdateStats(p, newEntry) 721 } 722 } 723 } 724 725 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 726 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 727 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 728 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 729 // items are removed from the head of the linked list. 730 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 731 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 732 // it can set the new head to newEntry. 733 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 734 now := mclock.Now() 735 fp := f.peers[p] 736 if fp == nil { 737 p.Log().Debug("Unknown peer to check update stats") 738 return 739 } 740 if newEntry != nil && fp.firstUpdateStats == nil { 741 fp.firstUpdateStats = newEntry 742 } 743 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 744 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 745 fp.firstUpdateStats = fp.firstUpdateStats.next 746 } 747 if fp.confirmedTd != nil { 748 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 749 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 750 fp.firstUpdateStats = fp.firstUpdateStats.next 751 } 752 } 753 }