github.com/m3shine/gochain@v2.2.26+incompatible/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "context" 22 "math/big" 23 "sync" 24 "time" 25 26 "github.com/gochain-io/gochain/common" 27 "github.com/gochain-io/gochain/common/mclock" 28 "github.com/gochain-io/gochain/consensus" 29 "github.com/gochain-io/gochain/core/rawdb" 30 "github.com/gochain-io/gochain/core/types" 31 "github.com/gochain-io/gochain/light" 32 "github.com/gochain-io/gochain/log" 33 ) 34 35 const ( 36 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 37 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 38 ) 39 40 // lightFetcher 41 type lightFetcher struct { 42 pm *ProtocolManager 43 odr *LesOdr 44 chain *light.LightChain 45 46 maxConfirmedTd *big.Int 47 peers map[*peer]*fetcherPeerInfo 48 lastUpdateStats *updateStatsEntry 49 50 lock sync.Mutex // qwerqwerqwe 51 deliverChn chan fetchResponse 52 reqMu sync.RWMutex 53 requested map[uint64]fetchRequest 54 timeoutChn chan uint64 55 requestChn chan bool // true if initiated from outside 56 syncing bool 57 syncDone chan *peer 58 } 59 60 // fetcherPeerInfo holds fetcher-specific information about each active peer 61 type fetcherPeerInfo struct { 62 root, lastAnnounced *fetcherTreeNode 63 nodeCnt int 64 confirmedTd *big.Int 65 bestConfirmed *fetcherTreeNode 66 nodeByHash map[common.Hash]*fetcherTreeNode 67 firstUpdateStats *updateStatsEntry 68 } 69 70 // fetcherTreeNode is a node of a tree that holds information about blocks recently 71 // announced and confirmed by a certain peer. Each new announce message from a peer 72 // adds nodes to the tree, based on the previous announced head and the reorg depth. 73 // There are three possible states for a tree node: 74 // - announced: not downloaded (known) yet, but we know its head, number and td 75 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 76 // - known: both announced by this peer and downloaded (from any peer). 77 // This structure makes it possible to always know which peer has a certain block, 78 // which is necessary for selecting a suitable peer for ODR requests and also for 79 // canonizing new heads. It also helps to always download the minimum necessary 80 // amount of headers with a single request. 81 type fetcherTreeNode struct { 82 hash common.Hash 83 number uint64 84 td *big.Int 85 known, requested bool 86 parent *fetcherTreeNode 87 children []*fetcherTreeNode 88 } 89 90 // fetchRequest represents a header download request 91 type fetchRequest struct { 92 hash common.Hash 93 amount uint64 94 peer *peer 95 sent mclock.AbsTime 96 timeout bool 97 } 98 99 // fetchResponse represents a header download response 100 type fetchResponse struct { 101 reqID uint64 102 headers []*types.Header 103 peer *peer 104 } 105 106 // newLightFetcher creates a new light fetcher 107 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 108 f := &lightFetcher{ 109 pm: pm, 110 chain: pm.blockchain.(*light.LightChain), 111 odr: pm.odr, 112 peers: make(map[*peer]*fetcherPeerInfo), 113 deliverChn: make(chan fetchResponse, 100), 114 requested: make(map[uint64]fetchRequest), 115 timeoutChn: make(chan uint64), 116 requestChn: make(chan bool, 100), 117 syncDone: make(chan *peer), 118 maxConfirmedTd: big.NewInt(0), 119 } 120 pm.peers.notify(f) 121 122 f.pm.wg.Add(1) 123 go f.syncLoop() 124 return f 125 } 126 127 // syncLoop is the main event loop of the light fetcher 128 func (f *lightFetcher) syncLoop() { 129 ctx := context.TODO() 130 requesting := false 131 defer f.pm.wg.Done() 132 for { 133 select { 134 case <-f.pm.quitSync: 135 return 136 // when a new announce is received, request loop keeps running until 137 // no further requests are necessary or possible 138 case newAnnounce := <-f.requestChn: 139 f.lock.Lock() 140 s := requesting 141 requesting = false 142 var ( 143 rq *distReq 144 reqID uint64 145 ) 146 if !f.syncing && !(newAnnounce && s) { 147 rq, reqID = f.nextRequest() 148 } 149 syncing := f.syncing 150 f.lock.Unlock() 151 152 if rq != nil { 153 requesting = true 154 _, ok := <-f.pm.reqDist.queue(rq) 155 if !ok { 156 f.requestChn <- false 157 } 158 159 if !syncing { 160 go func() { 161 time.Sleep(softRequestTimeout) 162 f.reqMu.Lock() 163 req, ok := f.requested[reqID] 164 if ok { 165 req.timeout = true 166 f.requested[reqID] = req 167 } 168 f.reqMu.Unlock() 169 // keep starting new requests while possible 170 f.requestChn <- false 171 }() 172 } 173 } 174 case reqID := <-f.timeoutChn: 175 f.reqMu.Lock() 176 req, ok := f.requested[reqID] 177 if ok { 178 delete(f.requested, reqID) 179 } 180 f.reqMu.Unlock() 181 if ok { 182 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 183 req.peer.Log().Debug("Fetching data timed out hard") 184 go f.pm.removePeer(req.peer.id) 185 } 186 case resp := <-f.deliverChn: 187 f.reqMu.Lock() 188 req, ok := f.requested[resp.reqID] 189 if ok && req.peer != resp.peer { 190 ok = false 191 } 192 if ok { 193 delete(f.requested, resp.reqID) 194 } 195 f.reqMu.Unlock() 196 if ok { 197 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 198 } 199 f.lock.Lock() 200 if !ok || !(f.syncing || f.processResponse(ctx, req, resp)) { 201 resp.peer.Log().Debug("Failed processing response") 202 go f.pm.removePeer(resp.peer.id) 203 } 204 f.lock.Unlock() 205 case p := <-f.syncDone: 206 f.lock.Lock() 207 p.Log().Debug("Done synchronising with peer") 208 f.checkSyncedHeaders(p) 209 f.syncing = false 210 f.lock.Unlock() 211 } 212 } 213 } 214 215 // registerPeer adds a new peer to the fetcher's peer set 216 func (f *lightFetcher) registerPeer(p *peer) { 217 p.lock.Lock() 218 p.hasBlock = func(hash common.Hash, number uint64) bool { 219 return f.peerHasBlock(p, hash, number) 220 } 221 p.lock.Unlock() 222 223 f.lock.Lock() 224 defer f.lock.Unlock() 225 226 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 227 } 228 229 // unregisterPeer removes a new peer from the fetcher's peer set 230 func (f *lightFetcher) unregisterPeer(p *peer) { 231 p.lock.Lock() 232 p.hasBlock = nil 233 p.lock.Unlock() 234 235 f.lock.Lock() 236 defer f.lock.Unlock() 237 238 // check for potential timed out block delay statistics 239 f.checkUpdateStats(p, nil) 240 delete(f.peers, p) 241 } 242 243 // announce processes a new announcement message received from a peer, adding new 244 // nodes to the peer's block tree and removing old nodes if necessary 245 func (f *lightFetcher) announce(p *peer, head *announceData) { 246 f.lock.Lock() 247 defer f.lock.Unlock() 248 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 249 250 fp := f.peers[p] 251 if fp == nil { 252 p.Log().Debug("Announcement from unknown peer") 253 return 254 } 255 256 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 257 // announced tds should be strictly monotonic 258 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 259 go f.pm.removePeer(p.id) 260 return 261 } 262 263 n := fp.lastAnnounced 264 for i := uint64(0); i < head.ReorgDepth; i++ { 265 if n == nil { 266 break 267 } 268 n = n.parent 269 } 270 if n != nil { 271 // n is now the reorg common ancestor, add a new branch of nodes 272 // check if the node count is too high to add new nodes 273 locked := false 274 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 275 if !locked { 276 f.chain.LockChain() 277 defer f.chain.UnlockChain() 278 locked = true 279 } 280 // if one of root's children is canonical, keep it, delete other branches and root itself 281 var newRoot *fetcherTreeNode 282 for i, nn := range fp.root.children { 283 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 284 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 285 nn.parent = nil 286 newRoot = nn 287 break 288 } 289 } 290 fp.deleteNode(fp.root) 291 if n == fp.root { 292 n = newRoot 293 } 294 fp.root = newRoot 295 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 296 fp.bestConfirmed = nil 297 fp.confirmedTd = nil 298 } 299 300 if n == nil { 301 break 302 } 303 } 304 if n != nil { 305 for n.number < head.Number { 306 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 307 n.children = append(n.children, nn) 308 n = nn 309 fp.nodeCnt++ 310 } 311 n.hash = head.Hash 312 n.td = head.Td 313 fp.nodeByHash[n.hash] = n 314 } 315 } 316 if n == nil { 317 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 318 if fp.root != nil { 319 fp.deleteNode(fp.root) 320 } 321 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 322 fp.root = n 323 fp.nodeCnt++ 324 fp.nodeByHash[n.hash] = n 325 fp.bestConfirmed = nil 326 fp.confirmedTd = nil 327 } 328 329 f.checkKnownNode(p, n) 330 p.lock.Lock() 331 p.headInfo = head 332 fp.lastAnnounced = n 333 p.lock.Unlock() 334 f.checkUpdateStats(p, nil) 335 f.requestChn <- true 336 } 337 338 // peerHasBlock returns true if we can assume the peer knows the given block 339 // based on its announcements 340 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { 341 f.lock.Lock() 342 defer f.lock.Unlock() 343 344 if f.syncing { 345 // always return true when syncing 346 // false positives are acceptable, a more sophisticated condition can be implemented later 347 return true 348 } 349 350 fp := f.peers[p] 351 if fp == nil || fp.root == nil { 352 return false 353 } 354 355 if number >= fp.root.number { 356 // it is recent enough that if it is known, is should be in the peer's block tree 357 return fp.nodeByHash[hash] != nil 358 } 359 f.chain.LockChain() 360 defer f.chain.UnlockChain() 361 // if it's older than the peer's block tree root but it's in the same canonical chain 362 // as the root, we can still be sure the peer knows it 363 // 364 // when syncing, just check if it is part of the known chain, there is nothing better we 365 // can do since we do not know the most recent block hash yet 366 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash 367 } 368 369 // requestAmount calculates the amount of headers to be downloaded starting 370 // from a certain head backwards 371 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 372 amount := uint64(0) 373 nn := n 374 for nn != nil && !f.checkKnownNode(p, nn) { 375 nn = nn.parent 376 amount++ 377 } 378 if nn == nil { 379 amount = n.number 380 } 381 return amount 382 } 383 384 // requestedID tells if a certain reqID has been requested by the fetcher 385 func (f *lightFetcher) requestedID(reqID uint64) bool { 386 f.reqMu.RLock() 387 _, ok := f.requested[reqID] 388 f.reqMu.RUnlock() 389 return ok 390 } 391 392 // nextRequest selects the peer and announced head to be requested next, amount 393 // to be downloaded starting from the head backwards is also returned 394 func (f *lightFetcher) nextRequest() (*distReq, uint64) { 395 var ( 396 bestHash common.Hash 397 bestAmount uint64 398 ) 399 bestTd := f.maxConfirmedTd 400 bestSyncing := false 401 402 for p, fp := range f.peers { 403 for hash, n := range fp.nodeByHash { 404 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 405 amount := f.requestAmount(p, n) 406 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 407 bestHash = hash 408 bestAmount = amount 409 bestTd = n.td 410 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 411 } 412 } 413 } 414 } 415 if bestTd == f.maxConfirmedTd { 416 return nil, 0 417 } 418 419 f.syncing = bestSyncing 420 421 var rq *distReq 422 reqID := genReqID() 423 if f.syncing { 424 rq = &distReq{ 425 getCost: func(dp distPeer) uint64 { 426 return 0 427 }, 428 canSend: func(dp distPeer) bool { 429 p := dp.(*peer) 430 f.lock.Lock() 431 defer f.lock.Unlock() 432 433 fp := f.peers[p] 434 return fp != nil && fp.nodeByHash[bestHash] != nil 435 }, 436 request: func(dp distPeer) func(context.Context) { 437 go func() { 438 p := dp.(*peer) 439 p.Log().Debug("Synchronisation started") 440 f.pm.synchronise(p) 441 f.syncDone <- p 442 }() 443 return nil 444 }, 445 } 446 } else { 447 rq = &distReq{ 448 getCost: func(dp distPeer) uint64 { 449 p := dp.(*peer) 450 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 451 }, 452 canSend: func(dp distPeer) bool { 453 p := dp.(*peer) 454 f.lock.Lock() 455 defer f.lock.Unlock() 456 457 fp := f.peers[p] 458 if fp == nil { 459 return false 460 } 461 n := fp.nodeByHash[bestHash] 462 return n != nil && !n.requested 463 }, 464 request: func(dp distPeer) func(context.Context) { 465 p := dp.(*peer) 466 f.lock.Lock() 467 fp := f.peers[p] 468 if fp != nil { 469 n := fp.nodeByHash[bestHash] 470 if n != nil { 471 n.requested = true 472 } 473 } 474 f.lock.Unlock() 475 476 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 477 p.fcServer.QueueRequest(reqID, cost) 478 f.reqMu.Lock() 479 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 480 f.reqMu.Unlock() 481 go func() { 482 time.Sleep(hardRequestTimeout) 483 f.timeoutChn <- reqID 484 }() 485 return func(ctx context.Context) { 486 if err := p.RequestHeadersByHash(ctx, reqID, cost, bestHash, int(bestAmount), 0, true); err != nil { 487 log.Error("Cannot request headers by hash", "req_id", reqID, "err", err) 488 } 489 } 490 }, 491 } 492 } 493 return rq, reqID 494 } 495 496 // deliverHeaders delivers header download request responses for processing 497 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 498 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 499 } 500 501 // processResponse processes header download request responses, returns true if successful 502 func (f *lightFetcher) processResponse(ctx context.Context, req fetchRequest, resp fetchResponse) bool { 503 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 504 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 505 return false 506 } 507 headers := make([]*types.Header, req.amount) 508 for i, header := range resp.headers { 509 headers[int(req.amount)-1-i] = header 510 } 511 if _, err := f.chain.InsertHeaderChain(ctx, headers, 1); err != nil { 512 if err == consensus.ErrFutureBlock { 513 return true 514 } 515 log.Debug("Failed to insert header chain", "err", err) 516 return false 517 } 518 tds := make([]*big.Int, len(headers)) 519 for i, header := range headers { 520 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 521 if td == nil { 522 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 523 return false 524 } 525 tds[i] = td 526 } 527 f.newHeaders(headers, tds) 528 return true 529 } 530 531 // newHeaders updates the block trees of all active peers according to a newly 532 // downloaded and validated batch or headers 533 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 534 var maxTd *big.Int 535 for p, fp := range f.peers { 536 if !f.checkAnnouncedHeaders(fp, headers, tds) { 537 p.Log().Debug("Inconsistent announcement") 538 go f.pm.removePeer(p.id) 539 } 540 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 541 maxTd = fp.confirmedTd 542 } 543 } 544 if maxTd != nil { 545 f.updateMaxConfirmedTd(maxTd) 546 } 547 } 548 549 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 550 // a batch of headers. It searches for the latest header in the batch that has a 551 // matching tree node (if any), and if it has not been marked as known already, 552 // sets it and its parents to known (even those which are older than the currently 553 // validated ones). Return value shows if all hashes, numbers and Tds matched 554 // correctly to the announced values (otherwise the peer should be dropped). 555 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 556 var ( 557 n *fetcherTreeNode 558 header *types.Header 559 td *big.Int 560 ) 561 562 for i := len(headers) - 1; ; i-- { 563 if i < 0 { 564 if n == nil { 565 // no more headers and nothing to match 566 return true 567 } 568 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 569 td = f.chain.GetTd(header.ParentHash, header.Number.Uint64()-1) 570 header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) 571 } else { 572 header = headers[i] 573 td = tds[i] 574 } 575 hash := header.Hash() 576 number := header.Number.Uint64() 577 if n == nil { 578 n = fp.nodeByHash[hash] 579 } 580 if n != nil { 581 if n.td == nil { 582 // node was unannounced 583 if nn := fp.nodeByHash[hash]; nn != nil { 584 // if there was already a node with the same hash, continue there and drop this one 585 nn.children = append(nn.children, n.children...) 586 n.children = nil 587 fp.deleteNode(n) 588 n = nn 589 } else { 590 n.hash = hash 591 n.td = td 592 fp.nodeByHash[hash] = n 593 } 594 } 595 // check if it matches the header 596 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 597 // peer has previously made an invalid announcement 598 return false 599 } 600 if n.known { 601 // we reached a known node that matched our expectations, return with success 602 return true 603 } 604 n.known = true 605 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 606 fp.confirmedTd = td 607 fp.bestConfirmed = n 608 } 609 n = n.parent 610 if n == nil { 611 return true 612 } 613 } 614 } 615 } 616 617 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 618 // downloaded headers as known. If none of the announced headers are found after 619 // syncing, the peer is dropped. 620 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 621 fp := f.peers[p] 622 if fp == nil { 623 p.Log().Debug("Unknown peer to check sync headers") 624 return 625 } 626 n := fp.lastAnnounced 627 var td *big.Int 628 for n != nil { 629 if td = f.chain.GetTd(n.hash, n.number); td != nil { 630 break 631 } 632 n = n.parent 633 } 634 // now n is the latest downloaded header after syncing 635 if n == nil { 636 p.Log().Debug("Synchronisation failed") 637 go f.pm.removePeer(p.id) 638 } else { 639 header := f.chain.GetHeader(n.hash, n.number) 640 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 641 } 642 } 643 644 // checkKnownNode checks if a block tree node is known (downloaded and validated) 645 // If it was not known previously but found in the database, sets its known flag 646 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 647 if n.known { 648 return true 649 } 650 td := f.chain.GetTd(n.hash, n.number) 651 if td == nil { 652 return false 653 } 654 655 fp := f.peers[p] 656 if fp == nil { 657 p.Log().Debug("Unknown peer to check known nodes") 658 return false 659 } 660 header := f.chain.GetHeader(n.hash, n.number) 661 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 662 p.Log().Debug("Inconsistent announcement") 663 go f.pm.removePeer(p.id) 664 } 665 if fp.confirmedTd != nil { 666 f.updateMaxConfirmedTd(fp.confirmedTd) 667 } 668 return n.known 669 } 670 671 // deleteNode deletes a node and its child subtrees from a peer's block tree 672 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 673 if n.parent != nil { 674 for i, nn := range n.parent.children { 675 if nn == n { 676 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 677 break 678 } 679 } 680 } 681 for { 682 if n.td != nil { 683 delete(fp.nodeByHash, n.hash) 684 } 685 fp.nodeCnt-- 686 if len(n.children) == 0 { 687 return 688 } 689 for i, nn := range n.children { 690 if i == 0 { 691 n = nn 692 } else { 693 fp.deleteNode(nn) 694 } 695 } 696 } 697 } 698 699 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 700 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 701 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 702 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 703 // and it has also been downloaded from any peer, either before or after the given announcement). 704 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 705 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 706 // the current global head). 707 type updateStatsEntry struct { 708 time mclock.AbsTime 709 td *big.Int 710 next *updateStatsEntry 711 } 712 713 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 714 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 715 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 716 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 717 // positive block delay value. 718 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 719 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 720 f.maxConfirmedTd = td 721 newEntry := &updateStatsEntry{ 722 time: mclock.Now(), 723 td: td, 724 } 725 if f.lastUpdateStats != nil { 726 f.lastUpdateStats.next = newEntry 727 } 728 f.lastUpdateStats = newEntry 729 for p := range f.peers { 730 f.checkUpdateStats(p, newEntry) 731 } 732 } 733 } 734 735 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 736 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 737 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 738 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 739 // items are removed from the head of the linked list. 740 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 741 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 742 // it can set the new head to newEntry. 743 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 744 now := mclock.Now() 745 fp := f.peers[p] 746 if fp == nil { 747 p.Log().Debug("Unknown peer to check update stats") 748 return 749 } 750 if newEntry != nil && fp.firstUpdateStats == nil { 751 fp.firstUpdateStats = newEntry 752 } 753 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 754 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 755 fp.firstUpdateStats = fp.firstUpdateStats.next 756 } 757 if fp.confirmedTd != nil { 758 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 759 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 760 fp.firstUpdateStats = fp.firstUpdateStats.next 761 } 762 } 763 }