github.com/4000d/go-ethereum@v1.8.2-0.20180223170251-423c8bb1d821/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/mclock" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/core" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/light" 31 "github.com/ethereum/go-ethereum/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 ) 38 39 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 40 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 41 // and announced that block. 42 type lightFetcher struct { 43 pm *ProtocolManager 44 odr *LesOdr 45 chain *light.LightChain 46 47 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 48 maxConfirmedTd *big.Int 49 peers map[*peer]*fetcherPeerInfo 50 lastUpdateStats *updateStatsEntry 51 syncing bool 52 syncDone chan *peer 53 54 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 55 requested map[uint64]fetchRequest 56 deliverChn chan fetchResponse 57 timeoutChn chan uint64 58 requestChn chan bool // true if initiated from outside 59 } 60 61 // fetcherPeerInfo holds fetcher-specific information about each active peer 62 type fetcherPeerInfo struct { 63 root, lastAnnounced *fetcherTreeNode 64 nodeCnt int 65 confirmedTd *big.Int 66 bestConfirmed *fetcherTreeNode 67 nodeByHash map[common.Hash]*fetcherTreeNode 68 firstUpdateStats *updateStatsEntry 69 } 70 71 // fetcherTreeNode is a node of a tree that holds information about blocks recently 72 // announced and confirmed by a certain peer. Each new announce message from a peer 73 // adds nodes to the tree, based on the previous announced head and the reorg depth. 74 // There are three possible states for a tree node: 75 // - announced: not downloaded (known) yet, but we know its head, number and td 76 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 77 // - known: both announced by this peer and downloaded (from any peer). 78 // This structure makes it possible to always know which peer has a certain block, 79 // which is necessary for selecting a suitable peer for ODR requests and also for 80 // canonizing new heads. It also helps to always download the minimum necessary 81 // amount of headers with a single request. 82 type fetcherTreeNode struct { 83 hash common.Hash 84 number uint64 85 td *big.Int 86 known, requested bool 87 parent *fetcherTreeNode 88 children []*fetcherTreeNode 89 } 90 91 // fetchRequest represents a header download request 92 type fetchRequest struct { 93 hash common.Hash 94 amount uint64 95 peer *peer 96 sent mclock.AbsTime 97 timeout bool 98 } 99 100 // fetchResponse represents a header download response 101 type fetchResponse struct { 102 reqID uint64 103 headers []*types.Header 104 peer *peer 105 } 106 107 // newLightFetcher creates a new light fetcher 108 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 109 f := &lightFetcher{ 110 pm: pm, 111 chain: pm.blockchain.(*light.LightChain), 112 odr: pm.odr, 113 peers: make(map[*peer]*fetcherPeerInfo), 114 deliverChn: make(chan fetchResponse, 100), 115 requested: make(map[uint64]fetchRequest), 116 timeoutChn: make(chan uint64), 117 requestChn: make(chan bool, 100), 118 syncDone: make(chan *peer), 119 maxConfirmedTd: big.NewInt(0), 120 } 121 pm.peers.notify(f) 122 123 f.pm.wg.Add(1) 124 go f.syncLoop() 125 return f 126 } 127 128 // syncLoop is the main event loop of the light fetcher 129 func (f *lightFetcher) syncLoop() { 130 requesting := false 131 defer f.pm.wg.Done() 132 for { 133 select { 134 case <-f.pm.quitSync: 135 return 136 // when a new announce is received, request loop keeps running until 137 // no further requests are necessary or possible 138 case newAnnounce := <-f.requestChn: 139 f.lock.Lock() 140 s := requesting 141 requesting = false 142 var ( 143 rq *distReq 144 reqID uint64 145 ) 146 if !f.syncing && !(newAnnounce && s) { 147 rq, reqID = f.nextRequest() 148 } 149 syncing := f.syncing 150 f.lock.Unlock() 151 152 if rq != nil { 153 requesting = true 154 _, ok := <-f.pm.reqDist.queue(rq) 155 if !ok { 156 f.requestChn <- false 157 } 158 159 if !syncing { 160 go func() { 161 time.Sleep(softRequestTimeout) 162 f.reqMu.Lock() 163 req, ok := f.requested[reqID] 164 if ok { 165 req.timeout = true 166 f.requested[reqID] = req 167 } 168 f.reqMu.Unlock() 169 // keep starting new requests while possible 170 f.requestChn <- false 171 }() 172 } 173 } 174 case reqID := <-f.timeoutChn: 175 f.reqMu.Lock() 176 req, ok := f.requested[reqID] 177 if ok { 178 delete(f.requested, reqID) 179 } 180 f.reqMu.Unlock() 181 if ok { 182 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 183 req.peer.Log().Debug("Fetching data timed out hard") 184 go f.pm.removePeer(req.peer.id) 185 } 186 case resp := <-f.deliverChn: 187 f.reqMu.Lock() 188 req, ok := f.requested[resp.reqID] 189 if ok && req.peer != resp.peer { 190 ok = false 191 } 192 if ok { 193 delete(f.requested, resp.reqID) 194 } 195 f.reqMu.Unlock() 196 if ok { 197 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 198 } 199 f.lock.Lock() 200 if !ok || !(f.syncing || f.processResponse(req, resp)) { 201 resp.peer.Log().Debug("Failed processing response") 202 go f.pm.removePeer(resp.peer.id) 203 } 204 f.lock.Unlock() 205 case p := <-f.syncDone: 206 f.lock.Lock() 207 p.Log().Debug("Done synchronising with peer") 208 f.checkSyncedHeaders(p) 209 f.syncing = false 210 f.lock.Unlock() 211 } 212 } 213 } 214 215 // registerPeer adds a new peer to the fetcher's peer set 216 func (f *lightFetcher) registerPeer(p *peer) { 217 p.lock.Lock() 218 p.hasBlock = func(hash common.Hash, number uint64) bool { 219 return f.peerHasBlock(p, hash, number) 220 } 221 p.lock.Unlock() 222 223 f.lock.Lock() 224 defer f.lock.Unlock() 225 226 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 227 } 228 229 // unregisterPeer removes a new peer from the fetcher's peer set 230 func (f *lightFetcher) unregisterPeer(p *peer) { 231 p.lock.Lock() 232 p.hasBlock = nil 233 p.lock.Unlock() 234 235 f.lock.Lock() 236 defer f.lock.Unlock() 237 238 // check for potential timed out block delay statistics 239 f.checkUpdateStats(p, nil) 240 delete(f.peers, p) 241 } 242 243 // announce processes a new announcement message received from a peer, adding new 244 // nodes to the peer's block tree and removing old nodes if necessary 245 func (f *lightFetcher) announce(p *peer, head *announceData) { 246 f.lock.Lock() 247 defer f.lock.Unlock() 248 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 249 250 fp := f.peers[p] 251 if fp == nil { 252 p.Log().Debug("Announcement from unknown peer") 253 return 254 } 255 256 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 257 // announced tds should be strictly monotonic 258 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 259 go f.pm.removePeer(p.id) 260 return 261 } 262 263 n := fp.lastAnnounced 264 for i := uint64(0); i < head.ReorgDepth; i++ { 265 if n == nil { 266 break 267 } 268 n = n.parent 269 } 270 if n != nil { 271 // n is now the reorg common ancestor, add a new branch of nodes 272 // check if the node count is too high to add new nodes 273 locked := false 274 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 275 if !locked { 276 f.chain.LockChain() 277 defer f.chain.UnlockChain() 278 locked = true 279 } 280 // if one of root's children is canonical, keep it, delete other branches and root itself 281 var newRoot *fetcherTreeNode 282 for i, nn := range fp.root.children { 283 if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 284 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 285 nn.parent = nil 286 newRoot = nn 287 break 288 } 289 } 290 fp.deleteNode(fp.root) 291 if n == fp.root { 292 n = newRoot 293 } 294 fp.root = newRoot 295 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 296 fp.bestConfirmed = nil 297 fp.confirmedTd = nil 298 } 299 300 if n == nil { 301 break 302 } 303 } 304 if n != nil { 305 for n.number < head.Number { 306 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 307 n.children = append(n.children, nn) 308 n = nn 309 fp.nodeCnt++ 310 } 311 n.hash = head.Hash 312 n.td = head.Td 313 fp.nodeByHash[n.hash] = n 314 } 315 } 316 if n == nil { 317 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 318 if fp.root != nil { 319 fp.deleteNode(fp.root) 320 } 321 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 322 fp.root = n 323 fp.nodeCnt++ 324 fp.nodeByHash[n.hash] = n 325 fp.bestConfirmed = nil 326 fp.confirmedTd = nil 327 } 328 329 f.checkKnownNode(p, n) 330 p.lock.Lock() 331 p.headInfo = head 332 fp.lastAnnounced = n 333 p.lock.Unlock() 334 f.checkUpdateStats(p, nil) 335 f.requestChn <- true 336 } 337 338 // peerHasBlock returns true if we can assume the peer knows the given block 339 // based on its announcements 340 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { 341 f.lock.Lock() 342 defer f.lock.Unlock() 343 344 if f.syncing { 345 // always return true when syncing 346 // false positives are acceptable, a more sophisticated condition can be implemented later 347 return true 348 } 349 350 fp := f.peers[p] 351 if fp == nil || fp.root == nil { 352 return false 353 } 354 355 if number >= fp.root.number { 356 // it is recent enough that if it is known, is should be in the peer's block tree 357 return fp.nodeByHash[hash] != nil 358 } 359 f.chain.LockChain() 360 defer f.chain.UnlockChain() 361 // if it's older than the peer's block tree root but it's in the same canonical chain 362 // as the root, we can still be sure the peer knows it 363 // 364 // when syncing, just check if it is part of the known chain, there is nothing better we 365 // can do since we do not know the most recent block hash yet 366 return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash 367 } 368 369 // requestAmount calculates the amount of headers to be downloaded starting 370 // from a certain head backwards 371 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 372 amount := uint64(0) 373 nn := n 374 for nn != nil && !f.checkKnownNode(p, nn) { 375 nn = nn.parent 376 amount++ 377 } 378 if nn == nil { 379 amount = n.number 380 } 381 return amount 382 } 383 384 // requestedID tells if a certain reqID has been requested by the fetcher 385 func (f *lightFetcher) requestedID(reqID uint64) bool { 386 f.reqMu.RLock() 387 _, ok := f.requested[reqID] 388 f.reqMu.RUnlock() 389 return ok 390 } 391 392 // nextRequest selects the peer and announced head to be requested next, amount 393 // to be downloaded starting from the head backwards is also returned 394 func (f *lightFetcher) nextRequest() (*distReq, uint64) { 395 var ( 396 bestHash common.Hash 397 bestAmount uint64 398 ) 399 bestTd := f.maxConfirmedTd 400 bestSyncing := false 401 402 for p, fp := range f.peers { 403 for hash, n := range fp.nodeByHash { 404 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 405 amount := f.requestAmount(p, n) 406 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 407 bestHash = hash 408 bestAmount = amount 409 bestTd = n.td 410 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 411 } 412 } 413 } 414 } 415 if bestTd == f.maxConfirmedTd { 416 return nil, 0 417 } 418 419 f.syncing = bestSyncing 420 421 var rq *distReq 422 reqID := genReqID() 423 if f.syncing { 424 rq = &distReq{ 425 getCost: func(dp distPeer) uint64 { 426 return 0 427 }, 428 canSend: func(dp distPeer) bool { 429 p := dp.(*peer) 430 f.lock.Lock() 431 defer f.lock.Unlock() 432 433 fp := f.peers[p] 434 return fp != nil && fp.nodeByHash[bestHash] != nil 435 }, 436 request: func(dp distPeer) func() { 437 go func() { 438 p := dp.(*peer) 439 p.Log().Debug("Synchronisation started") 440 f.pm.synchronise(p) 441 f.syncDone <- p 442 }() 443 return nil 444 }, 445 } 446 } else { 447 rq = &distReq{ 448 getCost: func(dp distPeer) uint64 { 449 p := dp.(*peer) 450 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 451 }, 452 canSend: func(dp distPeer) bool { 453 p := dp.(*peer) 454 f.lock.Lock() 455 defer f.lock.Unlock() 456 457 fp := f.peers[p] 458 if fp == nil { 459 return false 460 } 461 n := fp.nodeByHash[bestHash] 462 return n != nil && !n.requested 463 }, 464 request: func(dp distPeer) func() { 465 p := dp.(*peer) 466 f.lock.Lock() 467 fp := f.peers[p] 468 if fp != nil { 469 n := fp.nodeByHash[bestHash] 470 if n != nil { 471 n.requested = true 472 } 473 } 474 f.lock.Unlock() 475 476 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 477 p.fcServer.QueueRequest(reqID, cost) 478 f.reqMu.Lock() 479 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 480 f.reqMu.Unlock() 481 go func() { 482 time.Sleep(hardRequestTimeout) 483 f.timeoutChn <- reqID 484 }() 485 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 486 }, 487 } 488 } 489 return rq, reqID 490 } 491 492 // deliverHeaders delivers header download request responses for processing 493 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 494 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 495 } 496 497 // processResponse processes header download request responses, returns true if successful 498 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 499 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 500 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 501 return false 502 } 503 headers := make([]*types.Header, req.amount) 504 for i, header := range resp.headers { 505 headers[int(req.amount)-1-i] = header 506 } 507 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 508 if err == consensus.ErrFutureBlock { 509 return true 510 } 511 log.Debug("Failed to insert header chain", "err", err) 512 return false 513 } 514 tds := make([]*big.Int, len(headers)) 515 for i, header := range headers { 516 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 517 if td == nil { 518 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 519 return false 520 } 521 tds[i] = td 522 } 523 f.newHeaders(headers, tds) 524 return true 525 } 526 527 // newHeaders updates the block trees of all active peers according to a newly 528 // downloaded and validated batch or headers 529 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 530 var maxTd *big.Int 531 for p, fp := range f.peers { 532 if !f.checkAnnouncedHeaders(fp, headers, tds) { 533 p.Log().Debug("Inconsistent announcement") 534 go f.pm.removePeer(p.id) 535 } 536 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 537 maxTd = fp.confirmedTd 538 } 539 } 540 if maxTd != nil { 541 f.updateMaxConfirmedTd(maxTd) 542 } 543 } 544 545 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 546 // a batch of headers. It searches for the latest header in the batch that has a 547 // matching tree node (if any), and if it has not been marked as known already, 548 // sets it and its parents to known (even those which are older than the currently 549 // validated ones). Return value shows if all hashes, numbers and Tds matched 550 // correctly to the announced values (otherwise the peer should be dropped). 551 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 552 var ( 553 n *fetcherTreeNode 554 header *types.Header 555 td *big.Int 556 ) 557 558 for i := len(headers) - 1; ; i-- { 559 if i < 0 { 560 if n == nil { 561 // no more headers and nothing to match 562 return true 563 } 564 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 565 hash, number := header.ParentHash, header.Number.Uint64()-1 566 td = f.chain.GetTd(hash, number) 567 header = f.chain.GetHeader(hash, number) 568 if header == nil || td == nil { 569 log.Error("Missing parent of validated header", "hash", hash, "number", number) 570 return false 571 } 572 } else { 573 header = headers[i] 574 td = tds[i] 575 } 576 hash := header.Hash() 577 number := header.Number.Uint64() 578 if n == nil { 579 n = fp.nodeByHash[hash] 580 } 581 if n != nil { 582 if n.td == nil { 583 // node was unannounced 584 if nn := fp.nodeByHash[hash]; nn != nil { 585 // if there was already a node with the same hash, continue there and drop this one 586 nn.children = append(nn.children, n.children...) 587 n.children = nil 588 fp.deleteNode(n) 589 n = nn 590 } else { 591 n.hash = hash 592 n.td = td 593 fp.nodeByHash[hash] = n 594 } 595 } 596 // check if it matches the header 597 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 598 // peer has previously made an invalid announcement 599 return false 600 } 601 if n.known { 602 // we reached a known node that matched our expectations, return with success 603 return true 604 } 605 n.known = true 606 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 607 fp.confirmedTd = td 608 fp.bestConfirmed = n 609 } 610 n = n.parent 611 if n == nil { 612 return true 613 } 614 } 615 } 616 } 617 618 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 619 // downloaded headers as known. If none of the announced headers are found after 620 // syncing, the peer is dropped. 621 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 622 fp := f.peers[p] 623 if fp == nil { 624 p.Log().Debug("Unknown peer to check sync headers") 625 return 626 } 627 n := fp.lastAnnounced 628 var td *big.Int 629 for n != nil { 630 if td = f.chain.GetTd(n.hash, n.number); td != nil { 631 break 632 } 633 n = n.parent 634 } 635 // now n is the latest downloaded header after syncing 636 if n == nil { 637 p.Log().Debug("Synchronisation failed") 638 go f.pm.removePeer(p.id) 639 } else { 640 header := f.chain.GetHeader(n.hash, n.number) 641 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 642 } 643 } 644 645 // checkKnownNode checks if a block tree node is known (downloaded and validated) 646 // If it was not known previously but found in the database, sets its known flag 647 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 648 if n.known { 649 return true 650 } 651 td := f.chain.GetTd(n.hash, n.number) 652 if td == nil { 653 return false 654 } 655 header := f.chain.GetHeader(n.hash, n.number) 656 // check the availability of both header and td because reads are not protected by chain db mutex 657 // Note: returning false is always safe here 658 if header == nil { 659 return false 660 } 661 662 fp := f.peers[p] 663 if fp == nil { 664 p.Log().Debug("Unknown peer to check known nodes") 665 return false 666 } 667 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 668 p.Log().Debug("Inconsistent announcement") 669 go f.pm.removePeer(p.id) 670 } 671 if fp.confirmedTd != nil { 672 f.updateMaxConfirmedTd(fp.confirmedTd) 673 } 674 return n.known 675 } 676 677 // deleteNode deletes a node and its child subtrees from a peer's block tree 678 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 679 if n.parent != nil { 680 for i, nn := range n.parent.children { 681 if nn == n { 682 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 683 break 684 } 685 } 686 } 687 for { 688 if n.td != nil { 689 delete(fp.nodeByHash, n.hash) 690 } 691 fp.nodeCnt-- 692 if len(n.children) == 0 { 693 return 694 } 695 for i, nn := range n.children { 696 if i == 0 { 697 n = nn 698 } else { 699 fp.deleteNode(nn) 700 } 701 } 702 } 703 } 704 705 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 706 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 707 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 708 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 709 // and it has also been downloaded from any peer, either before or after the given announcement). 710 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 711 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 712 // the current global head). 713 type updateStatsEntry struct { 714 time mclock.AbsTime 715 td *big.Int 716 next *updateStatsEntry 717 } 718 719 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 720 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 721 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 722 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 723 // positive block delay value. 724 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 725 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 726 f.maxConfirmedTd = td 727 newEntry := &updateStatsEntry{ 728 time: mclock.Now(), 729 td: td, 730 } 731 if f.lastUpdateStats != nil { 732 f.lastUpdateStats.next = newEntry 733 } 734 f.lastUpdateStats = newEntry 735 for p := range f.peers { 736 f.checkUpdateStats(p, newEntry) 737 } 738 } 739 } 740 741 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 742 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 743 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 744 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 745 // items are removed from the head of the linked list. 746 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 747 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 748 // it can set the new head to newEntry. 749 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 750 now := mclock.Now() 751 fp := f.peers[p] 752 if fp == nil { 753 p.Log().Debug("Unknown peer to check update stats") 754 return 755 } 756 if newEntry != nil && fp.firstUpdateStats == nil { 757 fp.firstUpdateStats = newEntry 758 } 759 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 760 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 761 fp.firstUpdateStats = fp.firstUpdateStats.next 762 } 763 if fp.confirmedTd != nil { 764 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 765 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 766 fp.firstUpdateStats = fp.firstUpdateStats.next 767 } 768 } 769 }