github.com/luckypickle/go-ethereum-vet@v1.14.2/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/luckypickle/go-ethereum-vet/common" 26 "github.com/luckypickle/go-ethereum-vet/common/mclock" 27 "github.com/luckypickle/go-ethereum-vet/consensus" 28 "github.com/luckypickle/go-ethereum-vet/core/rawdb" 29 "github.com/luckypickle/go-ethereum-vet/core/types" 30 "github.com/luckypickle/go-ethereum-vet/light" 31 "github.com/luckypickle/go-ethereum-vet/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 ) 38 39 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 40 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 41 // and announced that block. 42 type lightFetcher struct { 43 pm *ProtocolManager 44 odr *LesOdr 45 chain *light.LightChain 46 47 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 48 maxConfirmedTd *big.Int 49 peers map[*peer]*fetcherPeerInfo 50 lastUpdateStats *updateStatsEntry 51 syncing bool 52 syncDone chan *peer 53 54 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 55 requested map[uint64]fetchRequest 56 deliverChn chan fetchResponse 57 timeoutChn chan uint64 58 requestChn chan bool // true if initiated from outside 59 } 60 61 // fetcherPeerInfo holds fetcher-specific information about each active peer 62 type fetcherPeerInfo struct { 63 root, lastAnnounced *fetcherTreeNode 64 nodeCnt int 65 confirmedTd *big.Int 66 bestConfirmed *fetcherTreeNode 67 nodeByHash map[common.Hash]*fetcherTreeNode 68 firstUpdateStats *updateStatsEntry 69 } 70 71 // fetcherTreeNode is a node of a tree that holds information about blocks recently 72 // announced and confirmed by a certain peer. Each new announce message from a peer 73 // adds nodes to the tree, based on the previous announced head and the reorg depth. 74 // There are three possible states for a tree node: 75 // - announced: not downloaded (known) yet, but we know its head, number and td 76 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 77 // - known: both announced by this peer and downloaded (from any peer). 78 // This structure makes it possible to always know which peer has a certain block, 79 // which is necessary for selecting a suitable peer for ODR requests and also for 80 // canonizing new heads. It also helps to always download the minimum necessary 81 // amount of headers with a single request. 82 type fetcherTreeNode struct { 83 hash common.Hash 84 number uint64 85 td *big.Int 86 known, requested bool 87 parent *fetcherTreeNode 88 children []*fetcherTreeNode 89 } 90 91 // fetchRequest represents a header download request 92 type fetchRequest struct { 93 hash common.Hash 94 amount uint64 95 peer *peer 96 sent mclock.AbsTime 97 timeout bool 98 } 99 100 // fetchResponse represents a header download response 101 type fetchResponse struct { 102 reqID uint64 103 headers []*types.Header 104 peer *peer 105 } 106 107 // newLightFetcher creates a new light fetcher 108 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 109 f := &lightFetcher{ 110 pm: pm, 111 chain: pm.blockchain.(*light.LightChain), 112 odr: pm.odr, 113 peers: make(map[*peer]*fetcherPeerInfo), 114 deliverChn: make(chan fetchResponse, 100), 115 requested: make(map[uint64]fetchRequest), 116 timeoutChn: make(chan uint64), 117 requestChn: make(chan bool, 100), 118 syncDone: make(chan *peer), 119 maxConfirmedTd: big.NewInt(0), 120 } 121 pm.peers.notify(f) 122 123 f.pm.wg.Add(1) 124 go f.syncLoop() 125 return f 126 } 127 128 // syncLoop is the main event loop of the light fetcher 129 func (f *lightFetcher) syncLoop() { 130 requesting := false 131 defer f.pm.wg.Done() 132 for { 133 select { 134 case <-f.pm.quitSync: 135 return 136 // when a new announce is received, request loop keeps running until 137 // no further requests are necessary or possible 138 case newAnnounce := <-f.requestChn: 139 f.lock.Lock() 140 s := requesting 141 requesting = false 142 var ( 143 rq *distReq 144 reqID uint64 145 ) 146 if !f.syncing && !(newAnnounce && s) { 147 rq, reqID = f.nextRequest() 148 } 149 syncing := f.syncing 150 f.lock.Unlock() 151 152 if rq != nil { 153 requesting = true 154 _, ok := <-f.pm.reqDist.queue(rq) 155 if !ok { 156 f.requestChn <- false 157 } 158 159 if !syncing { 160 go func() { 161 time.Sleep(softRequestTimeout) 162 f.reqMu.Lock() 163 req, ok := f.requested[reqID] 164 if ok { 165 req.timeout = true 166 f.requested[reqID] = req 167 } 168 f.reqMu.Unlock() 169 // keep starting new requests while possible 170 f.requestChn <- false 171 }() 172 } 173 } 174 case reqID := <-f.timeoutChn: 175 f.reqMu.Lock() 176 req, ok := f.requested[reqID] 177 if ok { 178 delete(f.requested, reqID) 179 } 180 f.reqMu.Unlock() 181 if ok { 182 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 183 req.peer.Log().Debug("Fetching data timed out hard") 184 go f.pm.removePeer(req.peer.id) 185 } 186 case resp := <-f.deliverChn: 187 f.reqMu.Lock() 188 req, ok := f.requested[resp.reqID] 189 if ok && req.peer != resp.peer { 190 ok = false 191 } 192 if ok { 193 delete(f.requested, resp.reqID) 194 } 195 f.reqMu.Unlock() 196 if ok { 197 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 198 } 199 f.lock.Lock() 200 if !ok || !(f.syncing || f.processResponse(req, resp)) { 201 resp.peer.Log().Debug("Failed processing response") 202 go f.pm.removePeer(resp.peer.id) 203 } 204 f.lock.Unlock() 205 case p := <-f.syncDone: 206 f.lock.Lock() 207 p.Log().Debug("Done synchronising with peer") 208 f.checkSyncedHeaders(p) 209 f.syncing = false 210 f.lock.Unlock() 211 } 212 } 213 } 214 215 // registerPeer adds a new peer to the fetcher's peer set 216 func (f *lightFetcher) registerPeer(p *peer) { 217 p.lock.Lock() 218 p.hasBlock = func(hash common.Hash, number uint64) bool { 219 return f.peerHasBlock(p, hash, number) 220 } 221 p.lock.Unlock() 222 223 f.lock.Lock() 224 defer f.lock.Unlock() 225 226 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 227 } 228 229 // unregisterPeer removes a new peer from the fetcher's peer set 230 func (f *lightFetcher) unregisterPeer(p *peer) { 231 p.lock.Lock() 232 p.hasBlock = nil 233 p.lock.Unlock() 234 235 f.lock.Lock() 236 defer f.lock.Unlock() 237 238 // check for potential timed out block delay statistics 239 f.checkUpdateStats(p, nil) 240 delete(f.peers, p) 241 } 242 243 // announce processes a new announcement message received from a peer, adding new 244 // nodes to the peer's block tree and removing old nodes if necessary 245 func (f *lightFetcher) announce(p *peer, head *announceData) { 246 f.lock.Lock() 247 defer f.lock.Unlock() 248 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 249 250 fp := f.peers[p] 251 if fp == nil { 252 p.Log().Debug("Announcement from unknown peer") 253 return 254 } 255 256 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 257 // announced tds should be strictly monotonic 258 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 259 go f.pm.removePeer(p.id) 260 return 261 } 262 263 n := fp.lastAnnounced 264 for i := uint64(0); i < head.ReorgDepth; i++ { 265 if n == nil { 266 break 267 } 268 n = n.parent 269 } 270 // n is now the reorg common ancestor, add a new branch of nodes 271 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) { 272 // if announced head block height is lower or same as n or too far from it to add 273 // intermediate nodes then discard previous announcement info and trigger a resync 274 n = nil 275 fp.nodeCnt = 0 276 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode) 277 } 278 if n != nil { 279 // check if the node count is too high to add new nodes, discard oldest ones if necessary 280 locked := false 281 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 282 if !locked { 283 f.chain.LockChain() 284 defer f.chain.UnlockChain() 285 locked = true 286 } 287 // if one of root's children is canonical, keep it, delete other branches and root itself 288 var newRoot *fetcherTreeNode 289 for i, nn := range fp.root.children { 290 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 291 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 292 nn.parent = nil 293 newRoot = nn 294 break 295 } 296 } 297 fp.deleteNode(fp.root) 298 if n == fp.root { 299 n = newRoot 300 } 301 fp.root = newRoot 302 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 303 fp.bestConfirmed = nil 304 fp.confirmedTd = nil 305 } 306 307 if n == nil { 308 break 309 } 310 } 311 if n != nil { 312 for n.number < head.Number { 313 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 314 n.children = append(n.children, nn) 315 n = nn 316 fp.nodeCnt++ 317 } 318 n.hash = head.Hash 319 n.td = head.Td 320 fp.nodeByHash[n.hash] = n 321 } 322 } 323 if n == nil { 324 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 325 if fp.root != nil { 326 fp.deleteNode(fp.root) 327 } 328 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 329 fp.root = n 330 fp.nodeCnt++ 331 fp.nodeByHash[n.hash] = n 332 fp.bestConfirmed = nil 333 fp.confirmedTd = nil 334 } 335 336 f.checkKnownNode(p, n) 337 p.lock.Lock() 338 p.headInfo = head 339 fp.lastAnnounced = n 340 p.lock.Unlock() 341 f.checkUpdateStats(p, nil) 342 f.requestChn <- true 343 } 344 345 // peerHasBlock returns true if we can assume the peer knows the given block 346 // based on its announcements 347 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { 348 f.lock.Lock() 349 defer f.lock.Unlock() 350 351 if f.syncing { 352 // always return true when syncing 353 // false positives are acceptable, a more sophisticated condition can be implemented later 354 return true 355 } 356 357 fp := f.peers[p] 358 if fp == nil || fp.root == nil { 359 return false 360 } 361 362 if number >= fp.root.number { 363 // it is recent enough that if it is known, is should be in the peer's block tree 364 return fp.nodeByHash[hash] != nil 365 } 366 f.chain.LockChain() 367 defer f.chain.UnlockChain() 368 // if it's older than the peer's block tree root but it's in the same canonical chain 369 // as the root, we can still be sure the peer knows it 370 // 371 // when syncing, just check if it is part of the known chain, there is nothing better we 372 // can do since we do not know the most recent block hash yet 373 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash 374 } 375 376 // requestAmount calculates the amount of headers to be downloaded starting 377 // from a certain head backwards 378 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 379 amount := uint64(0) 380 nn := n 381 for nn != nil && !f.checkKnownNode(p, nn) { 382 nn = nn.parent 383 amount++ 384 } 385 if nn == nil { 386 amount = n.number 387 } 388 return amount 389 } 390 391 // requestedID tells if a certain reqID has been requested by the fetcher 392 func (f *lightFetcher) requestedID(reqID uint64) bool { 393 f.reqMu.RLock() 394 _, ok := f.requested[reqID] 395 f.reqMu.RUnlock() 396 return ok 397 } 398 399 // nextRequest selects the peer and announced head to be requested next, amount 400 // to be downloaded starting from the head backwards is also returned 401 func (f *lightFetcher) nextRequest() (*distReq, uint64) { 402 var ( 403 bestHash common.Hash 404 bestAmount uint64 405 ) 406 bestTd := f.maxConfirmedTd 407 bestSyncing := false 408 409 for p, fp := range f.peers { 410 for hash, n := range fp.nodeByHash { 411 if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) { 412 amount := f.requestAmount(p, n) 413 if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount { 414 bestHash = hash 415 bestAmount = amount 416 bestTd = n.td 417 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 418 } 419 } 420 } 421 } 422 if bestTd == f.maxConfirmedTd { 423 return nil, 0 424 } 425 426 f.syncing = bestSyncing 427 428 var rq *distReq 429 reqID := genReqID() 430 if f.syncing { 431 rq = &distReq{ 432 getCost: func(dp distPeer) uint64 { 433 return 0 434 }, 435 canSend: func(dp distPeer) bool { 436 p := dp.(*peer) 437 f.lock.Lock() 438 defer f.lock.Unlock() 439 440 fp := f.peers[p] 441 return fp != nil && fp.nodeByHash[bestHash] != nil 442 }, 443 request: func(dp distPeer) func() { 444 go func() { 445 p := dp.(*peer) 446 p.Log().Debug("Synchronisation started") 447 f.pm.synchronise(p) 448 f.syncDone <- p 449 }() 450 return nil 451 }, 452 } 453 } else { 454 rq = &distReq{ 455 getCost: func(dp distPeer) uint64 { 456 p := dp.(*peer) 457 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 458 }, 459 canSend: func(dp distPeer) bool { 460 p := dp.(*peer) 461 f.lock.Lock() 462 defer f.lock.Unlock() 463 464 fp := f.peers[p] 465 if fp == nil { 466 return false 467 } 468 n := fp.nodeByHash[bestHash] 469 return n != nil && !n.requested 470 }, 471 request: func(dp distPeer) func() { 472 p := dp.(*peer) 473 f.lock.Lock() 474 fp := f.peers[p] 475 if fp != nil { 476 n := fp.nodeByHash[bestHash] 477 if n != nil { 478 n.requested = true 479 } 480 } 481 f.lock.Unlock() 482 483 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 484 p.fcServer.QueueRequest(reqID, cost) 485 f.reqMu.Lock() 486 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 487 f.reqMu.Unlock() 488 go func() { 489 time.Sleep(hardRequestTimeout) 490 f.timeoutChn <- reqID 491 }() 492 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 493 }, 494 } 495 } 496 return rq, reqID 497 } 498 499 // deliverHeaders delivers header download request responses for processing 500 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 501 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 502 } 503 504 // processResponse processes header download request responses, returns true if successful 505 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 506 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 507 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 508 return false 509 } 510 headers := make([]*types.Header, req.amount) 511 for i, header := range resp.headers { 512 headers[int(req.amount)-1-i] = header 513 } 514 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 515 if err == consensus.ErrFutureBlock { 516 return true 517 } 518 log.Debug("Failed to insert header chain", "err", err) 519 return false 520 } 521 tds := make([]*big.Int, len(headers)) 522 for i, header := range headers { 523 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 524 if td == nil { 525 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 526 return false 527 } 528 tds[i] = td 529 } 530 f.newHeaders(headers, tds) 531 return true 532 } 533 534 // newHeaders updates the block trees of all active peers according to a newly 535 // downloaded and validated batch or headers 536 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 537 var maxTd *big.Int 538 for p, fp := range f.peers { 539 if !f.checkAnnouncedHeaders(fp, headers, tds) { 540 p.Log().Debug("Inconsistent announcement") 541 go f.pm.removePeer(p.id) 542 } 543 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 544 maxTd = fp.confirmedTd 545 } 546 } 547 if maxTd != nil { 548 f.updateMaxConfirmedTd(maxTd) 549 } 550 } 551 552 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 553 // a batch of headers. It searches for the latest header in the batch that has a 554 // matching tree node (if any), and if it has not been marked as known already, 555 // sets it and its parents to known (even those which are older than the currently 556 // validated ones). Return value shows if all hashes, numbers and Tds matched 557 // correctly to the announced values (otherwise the peer should be dropped). 558 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 559 var ( 560 n *fetcherTreeNode 561 header *types.Header 562 td *big.Int 563 ) 564 565 for i := len(headers) - 1; ; i-- { 566 if i < 0 { 567 if n == nil { 568 // no more headers and nothing to match 569 return true 570 } 571 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 572 hash, number := header.ParentHash, header.Number.Uint64()-1 573 td = f.chain.GetTd(hash, number) 574 header = f.chain.GetHeader(hash, number) 575 if header == nil || td == nil { 576 log.Error("Missing parent of validated header", "hash", hash, "number", number) 577 return false 578 } 579 } else { 580 header = headers[i] 581 td = tds[i] 582 } 583 hash := header.Hash() 584 number := header.Number.Uint64() 585 if n == nil { 586 n = fp.nodeByHash[hash] 587 } 588 if n != nil { 589 if n.td == nil { 590 // node was unannounced 591 if nn := fp.nodeByHash[hash]; nn != nil { 592 // if there was already a node with the same hash, continue there and drop this one 593 nn.children = append(nn.children, n.children...) 594 n.children = nil 595 fp.deleteNode(n) 596 n = nn 597 } else { 598 n.hash = hash 599 n.td = td 600 fp.nodeByHash[hash] = n 601 } 602 } 603 // check if it matches the header 604 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 605 // peer has previously made an invalid announcement 606 return false 607 } 608 if n.known { 609 // we reached a known node that matched our expectations, return with success 610 return true 611 } 612 n.known = true 613 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 614 fp.confirmedTd = td 615 fp.bestConfirmed = n 616 } 617 n = n.parent 618 if n == nil { 619 return true 620 } 621 } 622 } 623 } 624 625 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 626 // downloaded headers as known. If none of the announced headers are found after 627 // syncing, the peer is dropped. 628 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 629 fp := f.peers[p] 630 if fp == nil { 631 p.Log().Debug("Unknown peer to check sync headers") 632 return 633 } 634 n := fp.lastAnnounced 635 var td *big.Int 636 for n != nil { 637 if td = f.chain.GetTd(n.hash, n.number); td != nil { 638 break 639 } 640 n = n.parent 641 } 642 // now n is the latest downloaded header after syncing 643 if n == nil { 644 p.Log().Debug("Synchronisation failed") 645 go f.pm.removePeer(p.id) 646 } else { 647 header := f.chain.GetHeader(n.hash, n.number) 648 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 649 } 650 } 651 652 // checkKnownNode checks if a block tree node is known (downloaded and validated) 653 // If it was not known previously but found in the database, sets its known flag 654 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 655 if n.known { 656 return true 657 } 658 td := f.chain.GetTd(n.hash, n.number) 659 if td == nil { 660 return false 661 } 662 header := f.chain.GetHeader(n.hash, n.number) 663 // check the availability of both header and td because reads are not protected by chain db mutex 664 // Note: returning false is always safe here 665 if header == nil { 666 return false 667 } 668 669 fp := f.peers[p] 670 if fp == nil { 671 p.Log().Debug("Unknown peer to check known nodes") 672 return false 673 } 674 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 675 p.Log().Debug("Inconsistent announcement") 676 go f.pm.removePeer(p.id) 677 } 678 if fp.confirmedTd != nil { 679 f.updateMaxConfirmedTd(fp.confirmedTd) 680 } 681 return n.known 682 } 683 684 // deleteNode deletes a node and its child subtrees from a peer's block tree 685 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 686 if n.parent != nil { 687 for i, nn := range n.parent.children { 688 if nn == n { 689 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 690 break 691 } 692 } 693 } 694 for { 695 if n.td != nil { 696 delete(fp.nodeByHash, n.hash) 697 } 698 fp.nodeCnt-- 699 if len(n.children) == 0 { 700 return 701 } 702 for i, nn := range n.children { 703 if i == 0 { 704 n = nn 705 } else { 706 fp.deleteNode(nn) 707 } 708 } 709 } 710 } 711 712 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 713 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 714 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 715 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 716 // and it has also been downloaded from any peer, either before or after the given announcement). 717 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 718 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 719 // the current global head). 720 type updateStatsEntry struct { 721 time mclock.AbsTime 722 td *big.Int 723 next *updateStatsEntry 724 } 725 726 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 727 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 728 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 729 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 730 // positive block delay value. 731 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 732 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 733 f.maxConfirmedTd = td 734 newEntry := &updateStatsEntry{ 735 time: mclock.Now(), 736 td: td, 737 } 738 if f.lastUpdateStats != nil { 739 f.lastUpdateStats.next = newEntry 740 } 741 f.lastUpdateStats = newEntry 742 for p := range f.peers { 743 f.checkUpdateStats(p, newEntry) 744 } 745 } 746 } 747 748 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 749 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 750 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 751 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 752 // items are removed from the head of the linked list. 753 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 754 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 755 // it can set the new head to newEntry. 756 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 757 now := mclock.Now() 758 fp := f.peers[p] 759 if fp == nil { 760 p.Log().Debug("Unknown peer to check update stats") 761 return 762 } 763 if newEntry != nil && fp.firstUpdateStats == nil { 764 fp.firstUpdateStats = newEntry 765 } 766 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 767 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 768 fp.firstUpdateStats = fp.firstUpdateStats.next 769 } 770 if fp.confirmedTd != nil { 771 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 772 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 773 fp.firstUpdateStats = fp.firstUpdateStats.next 774 } 775 } 776 }