github.com/jartcoin/go-jartdiuma@v0.0.0-20210708013502-b71bfe42bfc3/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "math/big" 21 "sync" 22 "time" 23 24 "github.com/jartcoin/go-jartdiuma/common" 25 "github.com/jartcoin/go-jartdiuma/common/mclock" 26 "github.com/jartcoin/go-jartdiuma/consensus" 27 "github.com/jartcoin/go-jartdiuma/core/rawdb" 28 "github.com/jartcoin/go-jartdiuma/core/types" 29 "github.com/jartcoin/go-jartdiuma/light" 30 "github.com/jartcoin/go-jartdiuma/log" 31 ) 32 33 const ( 34 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 35 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 36 serverStateAvailable = 100 // number of recent blocks where state availability is assumed 37 ) 38 39 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 40 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 41 // and announced that block. 42 type lightFetcher struct { 43 pm *ProtocolManager 44 odr *LesOdr 45 chain lightChain 46 47 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 48 maxConfirmedTd *big.Int 49 peers map[*peer]*fetcherPeerInfo 50 lastUpdateStats *updateStatsEntry 51 syncing bool 52 syncDone chan *peer 53 54 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 55 requested map[uint64]fetchRequest 56 deliverChn chan fetchResponse 57 timeoutChn chan uint64 58 requestTriggered bool 59 requestTrigger chan struct{} 60 lastTrustedHeader *types.Header 61 } 62 63 // lightChain extends the BlockChain interface by locking. 64 type lightChain interface { 65 BlockChain 66 LockChain() 67 UnlockChain() 68 } 69 70 // fetcherPeerInfo holds fetcher-specific information about each active peer 71 type fetcherPeerInfo struct { 72 root, lastAnnounced *fetcherTreeNode 73 nodeCnt int 74 confirmedTd *big.Int 75 bestConfirmed *fetcherTreeNode 76 nodeByHash map[common.Hash]*fetcherTreeNode 77 firstUpdateStats *updateStatsEntry 78 } 79 80 // fetcherTreeNode is a node of a tree that holds information about blocks recently 81 // announced and confirmed by a certain peer. Each new announce message from a peer 82 // adds nodes to the tree, based on the previous announced head and the reorg depth. 83 // There are three possible states for a tree node: 84 // - announced: not downloaded (known) yet, but we know its head, number and td 85 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 86 // - known: both announced by this peer and downloaded (from any peer). 87 // This structure makes it possible to always know which peer has a certain block, 88 // which is necessary for selecting a suitable peer for ODR requests and also for 89 // canonizing new heads. It also helps to always download the minimum necessary 90 // amount of headers with a single request. 91 type fetcherTreeNode struct { 92 hash common.Hash 93 number uint64 94 td *big.Int 95 known, requested bool 96 parent *fetcherTreeNode 97 children []*fetcherTreeNode 98 } 99 100 // fetchRequest represents a header download request 101 type fetchRequest struct { 102 hash common.Hash 103 amount uint64 104 peer *peer 105 sent mclock.AbsTime 106 timeout bool 107 } 108 109 // fetchResponse represents a header download response 110 type fetchResponse struct { 111 reqID uint64 112 headers []*types.Header 113 peer *peer 114 } 115 116 // newLightFetcher creates a new light fetcher 117 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 118 f := &lightFetcher{ 119 pm: pm, 120 chain: pm.blockchain.(*light.LightChain), 121 odr: pm.odr, 122 peers: make(map[*peer]*fetcherPeerInfo), 123 deliverChn: make(chan fetchResponse, 100), 124 requested: make(map[uint64]fetchRequest), 125 timeoutChn: make(chan uint64), 126 requestTrigger: make(chan struct{}, 1), 127 syncDone: make(chan *peer), 128 maxConfirmedTd: big.NewInt(0), 129 } 130 pm.peers.notify(f) 131 132 f.pm.wg.Add(1) 133 go f.syncLoop() 134 return f 135 } 136 137 // syncLoop is the main event loop of the light fetcher 138 func (f *lightFetcher) syncLoop() { 139 defer f.pm.wg.Done() 140 for { 141 select { 142 case <-f.pm.quitSync: 143 return 144 // request loop keeps running until no further requests are necessary or possible 145 case <-f.requestTrigger: 146 f.lock.Lock() 147 var ( 148 rq *distReq 149 reqID uint64 150 syncing bool 151 ) 152 if !f.syncing { 153 rq, reqID, syncing = f.nextRequest() 154 } 155 f.requestTriggered = rq != nil 156 f.lock.Unlock() 157 158 if rq != nil { 159 if _, ok := <-f.pm.reqDist.queue(rq); ok { 160 if syncing { 161 f.lock.Lock() 162 f.syncing = true 163 f.lock.Unlock() 164 } else { 165 go func() { 166 time.Sleep(softRequestTimeout) 167 f.reqMu.Lock() 168 req, ok := f.requested[reqID] 169 if ok { 170 req.timeout = true 171 f.requested[reqID] = req 172 } 173 f.reqMu.Unlock() 174 // keep starting new requests while possible 175 f.requestTrigger <- struct{}{} 176 }() 177 } 178 } else { 179 f.requestTrigger <- struct{}{} 180 } 181 } 182 case reqID := <-f.timeoutChn: 183 f.reqMu.Lock() 184 req, ok := f.requested[reqID] 185 if ok { 186 delete(f.requested, reqID) 187 } 188 f.reqMu.Unlock() 189 if ok { 190 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 191 req.peer.Log().Debug("Fetching data timed out hard") 192 go f.pm.removePeer(req.peer.id) 193 } 194 case resp := <-f.deliverChn: 195 f.reqMu.Lock() 196 req, ok := f.requested[resp.reqID] 197 if ok && req.peer != resp.peer { 198 ok = false 199 } 200 if ok { 201 delete(f.requested, resp.reqID) 202 } 203 f.reqMu.Unlock() 204 if ok { 205 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 206 } 207 f.lock.Lock() 208 if !ok || !(f.syncing || f.processResponse(req, resp)) { 209 resp.peer.Log().Debug("Failed processing response") 210 go f.pm.removePeer(resp.peer.id) 211 } 212 f.lock.Unlock() 213 case p := <-f.syncDone: 214 f.lock.Lock() 215 p.Log().Debug("Done synchronising with peer") 216 f.checkSyncedHeaders(p) 217 f.syncing = false 218 f.lock.Unlock() 219 f.requestTrigger <- struct{}{} // f.requestTriggered is always true here 220 } 221 } 222 } 223 224 // registerPeer adds a new peer to the fetcher's peer set 225 func (f *lightFetcher) registerPeer(p *peer) { 226 p.lock.Lock() 227 p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { 228 return f.peerHasBlock(p, hash, number, hasState) 229 } 230 p.lock.Unlock() 231 232 f.lock.Lock() 233 defer f.lock.Unlock() 234 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 235 } 236 237 // unregisterPeer removes a new peer from the fetcher's peer set 238 func (f *lightFetcher) unregisterPeer(p *peer) { 239 p.lock.Lock() 240 p.hasBlock = nil 241 p.lock.Unlock() 242 243 f.lock.Lock() 244 defer f.lock.Unlock() 245 246 // check for potential timed out block delay statistics 247 f.checkUpdateStats(p, nil) 248 delete(f.peers, p) 249 } 250 251 // announce processes a new announcement message received from a peer, adding new 252 // nodes to the peer's block tree and removing old nodes if necessary 253 func (f *lightFetcher) announce(p *peer, head *announceData) { 254 f.lock.Lock() 255 defer f.lock.Unlock() 256 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 257 258 fp := f.peers[p] 259 if fp == nil { 260 p.Log().Debug("Announcement from unknown peer") 261 return 262 } 263 264 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 265 // announced tds should be strictly monotonic 266 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 267 go f.pm.removePeer(p.id) 268 return 269 } 270 271 n := fp.lastAnnounced 272 for i := uint64(0); i < head.ReorgDepth; i++ { 273 if n == nil { 274 break 275 } 276 n = n.parent 277 } 278 // n is now the reorg common ancestor, add a new branch of nodes 279 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) { 280 // if announced head block height is lower or same as n or too far from it to add 281 // intermediate nodes then discard previous announcement info and trigger a resync 282 n = nil 283 fp.nodeCnt = 0 284 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode) 285 } 286 // check if the node count is too high to add new nodes, discard oldest ones if necessary 287 if n != nil { 288 // n is now the reorg common ancestor, add a new branch of nodes 289 // check if the node count is too high to add new nodes 290 locked := false 291 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 292 if !locked { 293 f.chain.LockChain() 294 defer f.chain.UnlockChain() 295 locked = true 296 } 297 // if one of root's children is canonical, keep it, delete other branches and root itself 298 var newRoot *fetcherTreeNode 299 for i, nn := range fp.root.children { 300 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 301 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 302 nn.parent = nil 303 newRoot = nn 304 break 305 } 306 } 307 fp.deleteNode(fp.root) 308 if n == fp.root { 309 n = newRoot 310 } 311 fp.root = newRoot 312 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 313 fp.bestConfirmed = nil 314 fp.confirmedTd = nil 315 } 316 317 if n == nil { 318 break 319 } 320 } 321 if n != nil { 322 for n.number < head.Number { 323 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 324 n.children = append(n.children, nn) 325 n = nn 326 fp.nodeCnt++ 327 } 328 n.hash = head.Hash 329 n.td = head.Td 330 fp.nodeByHash[n.hash] = n 331 } 332 } 333 334 if n == nil { 335 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 336 if fp.root != nil { 337 fp.deleteNode(fp.root) 338 } 339 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 340 fp.root = n 341 fp.nodeCnt++ 342 fp.nodeByHash[n.hash] = n 343 fp.bestConfirmed = nil 344 fp.confirmedTd = nil 345 } 346 347 f.checkKnownNode(p, n) 348 p.lock.Lock() 349 p.headInfo = head 350 fp.lastAnnounced = n 351 p.lock.Unlock() 352 f.checkUpdateStats(p, nil) 353 if !f.requestTriggered { 354 f.requestTriggered = true 355 f.requestTrigger <- struct{}{} 356 } 357 } 358 359 // peerHasBlock returns true if we can assume the peer knows the given block 360 // based on its announcements 361 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { 362 f.lock.Lock() 363 defer f.lock.Unlock() 364 365 fp := f.peers[p] 366 if fp == nil || fp.root == nil { 367 return false 368 } 369 370 if hasState { 371 if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable { 372 return false 373 } 374 } 375 376 if f.syncing { 377 // always return true when syncing 378 // false positives are acceptable, a more sophisticated condition can be implemented later 379 return true 380 } 381 382 if number >= fp.root.number { 383 // it is recent enough that if it is known, is should be in the peer's block tree 384 return fp.nodeByHash[hash] != nil 385 } 386 f.chain.LockChain() 387 defer f.chain.UnlockChain() 388 // if it's older than the peer's block tree root but it's in the same canonical chain 389 // as the root, we can still be sure the peer knows it 390 // 391 // when syncing, just check if it is part of the known chain, there is nothing better we 392 // can do since we do not know the most recent block hash yet 393 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash 394 } 395 396 // requestAmount calculates the amount of headers to be downloaded starting 397 // from a certain head backwards 398 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 399 amount := uint64(0) 400 nn := n 401 for nn != nil && !f.checkKnownNode(p, nn) { 402 nn = nn.parent 403 amount++ 404 } 405 if nn == nil { 406 amount = n.number 407 } 408 return amount 409 } 410 411 // requestedID tells if a certain reqID has been requested by the fetcher 412 func (f *lightFetcher) requestedID(reqID uint64) bool { 413 f.reqMu.RLock() 414 _, ok := f.requested[reqID] 415 f.reqMu.RUnlock() 416 return ok 417 } 418 419 // nextRequest selects the peer and announced head to be requested next, amount 420 // to be downloaded starting from the head backwards is also returned 421 func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { 422 var ( 423 bestHash common.Hash 424 bestAmount uint64 425 bestTd *big.Int 426 bestSyncing bool 427 ) 428 bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest() 429 430 if bestTd == f.maxConfirmedTd { 431 return nil, 0, false 432 } 433 434 var rq *distReq 435 reqID := genReqID() 436 if bestSyncing { 437 rq = f.newFetcherDistReqForSync(bestHash) 438 } else { 439 rq = f.newFetcherDistReq(bestHash, reqID, bestAmount) 440 } 441 return rq, reqID, bestSyncing 442 } 443 444 // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer. 445 // It also returns the announced Td (which should be verified after fetching the head), 446 // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request. 447 func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) { 448 bestTd = f.maxConfirmedTd 449 bestSyncing = false 450 451 for p, fp := range f.peers { 452 for hash, n := range fp.nodeByHash { 453 if f.checkKnownNode(p, n) || n.requested { 454 continue 455 } 456 457 //if ulc mode is disabled, isTrustedHash returns true 458 amount := f.requestAmount(p, n) 459 if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) { 460 bestHash = hash 461 bestTd = n.td 462 bestAmount = amount 463 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 464 } 465 } 466 } 467 return 468 } 469 470 // isTrustedHash checks if the block can be trusted by the minimum trusted fraction. 471 func (f *lightFetcher) isTrustedHash(hash common.Hash) bool { 472 // If ultra light cliet mode is disabled, trust all hashes 473 if f.pm.ulc == nil { 474 return true 475 } 476 // Ultra light enabled, only trust after enough confirmations 477 var agreed int 478 for peer, info := range f.peers { 479 if peer.trusted && info.nodeByHash[hash] != nil { 480 agreed++ 481 } 482 } 483 return 100*agreed/len(f.pm.ulc.keys) >= f.pm.ulc.fraction 484 } 485 486 func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { 487 return &distReq{ 488 getCost: func(dp distPeer) uint64 { 489 return 0 490 }, 491 canSend: func(dp distPeer) bool { 492 p := dp.(*peer) 493 f.lock.Lock() 494 defer f.lock.Unlock() 495 496 if p.onlyAnnounce { 497 return false 498 } 499 fp := f.peers[p] 500 return fp != nil && fp.nodeByHash[bestHash] != nil 501 }, 502 request: func(dp distPeer) func() { 503 if f.pm.ulc != nil { 504 // Keep last trusted header before sync 505 f.setLastTrustedHeader(f.chain.CurrentHeader()) 506 } 507 go func() { 508 p := dp.(*peer) 509 p.Log().Debug("Synchronisation started") 510 f.pm.synchronise(p) 511 f.syncDone <- p 512 }() 513 return nil 514 }, 515 } 516 } 517 518 // newFetcherDistReq creates a new request for the distributor. 519 func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq { 520 return &distReq{ 521 getCost: func(dp distPeer) uint64 { 522 p := dp.(*peer) 523 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 524 }, 525 canSend: func(dp distPeer) bool { 526 p := dp.(*peer) 527 f.lock.Lock() 528 defer f.lock.Unlock() 529 530 if p.onlyAnnounce { 531 return false 532 } 533 fp := f.peers[p] 534 if fp == nil { 535 return false 536 } 537 n := fp.nodeByHash[bestHash] 538 return n != nil && !n.requested 539 }, 540 request: func(dp distPeer) func() { 541 p := dp.(*peer) 542 f.lock.Lock() 543 fp := f.peers[p] 544 if fp != nil { 545 n := fp.nodeByHash[bestHash] 546 if n != nil { 547 n.requested = true 548 } 549 } 550 f.lock.Unlock() 551 552 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 553 p.fcServer.QueuedRequest(reqID, cost) 554 f.reqMu.Lock() 555 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 556 f.reqMu.Unlock() 557 go func() { 558 time.Sleep(hardRequestTimeout) 559 f.timeoutChn <- reqID 560 }() 561 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 562 }, 563 } 564 } 565 566 // deliverHeaders delivers header download request responses for processing 567 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 568 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 569 } 570 571 // processResponse processes header download request responses, returns true if successful 572 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 573 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 574 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 575 return false 576 } 577 headers := make([]*types.Header, req.amount) 578 for i, header := range resp.headers { 579 headers[int(req.amount)-1-i] = header 580 } 581 582 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 583 if err == consensus.ErrFutureBlock { 584 return true 585 } 586 log.Debug("Failed to insert header chain", "err", err) 587 return false 588 } 589 tds := make([]*big.Int, len(headers)) 590 for i, header := range headers { 591 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 592 if td == nil { 593 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 594 return false 595 } 596 tds[i] = td 597 } 598 f.newHeaders(headers, tds) 599 return true 600 } 601 602 // newHeaders updates the block trees of all active peers according to a newly 603 // downloaded and validated batch or headers 604 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 605 var maxTd *big.Int 606 607 for p, fp := range f.peers { 608 if !f.checkAnnouncedHeaders(fp, headers, tds) { 609 p.Log().Debug("Inconsistent announcement") 610 go f.pm.removePeer(p.id) 611 } 612 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 613 maxTd = fp.confirmedTd 614 } 615 } 616 617 if maxTd != nil { 618 f.updateMaxConfirmedTd(maxTd) 619 } 620 } 621 622 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 623 // a batch of headers. It searches for the latest header in the batch that has a 624 // matching tree node (if any), and if it has not been marked as known already, 625 // sets it and its parents to known (even those which are older than the currently 626 // validated ones). Return value shows if all hashes, numbers and Tds matched 627 // correctly to the announced values (otherwise the peer should be dropped). 628 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 629 var ( 630 n *fetcherTreeNode 631 header *types.Header 632 td *big.Int 633 ) 634 635 for i := len(headers) - 1; ; i-- { 636 if i < 0 { 637 if n == nil { 638 // no more headers and nothing to match 639 return true 640 } 641 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 642 hash, number := header.ParentHash, header.Number.Uint64()-1 643 td = f.chain.GetTd(hash, number) 644 header = f.chain.GetHeader(hash, number) 645 if header == nil || td == nil { 646 log.Error("Missing parent of validated header", "hash", hash, "number", number) 647 return false 648 } 649 } else { 650 header = headers[i] 651 td = tds[i] 652 } 653 hash := header.Hash() 654 number := header.Number.Uint64() 655 if n == nil { 656 n = fp.nodeByHash[hash] 657 } 658 if n != nil { 659 if n.td == nil { 660 // node was unannounced 661 if nn := fp.nodeByHash[hash]; nn != nil { 662 // if there was already a node with the same hash, continue there and drop this one 663 nn.children = append(nn.children, n.children...) 664 n.children = nil 665 fp.deleteNode(n) 666 n = nn 667 } else { 668 n.hash = hash 669 n.td = td 670 fp.nodeByHash[hash] = n 671 } 672 } 673 // check if it matches the header 674 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 675 // peer has previously made an invalid announcement 676 return false 677 } 678 if n.known { 679 // we reached a known node that matched our expectations, return with success 680 return true 681 } 682 n.known = true 683 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 684 fp.confirmedTd = td 685 fp.bestConfirmed = n 686 } 687 n = n.parent 688 if n == nil { 689 return true 690 } 691 } 692 } 693 } 694 695 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 696 // downloaded headers as known. If none of the announced headers are found after 697 // syncing, the peer is dropped. 698 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 699 fp := f.peers[p] 700 if fp == nil { 701 p.Log().Debug("Unknown peer to check sync headers") 702 return 703 } 704 var ( 705 node = fp.lastAnnounced 706 td *big.Int 707 ) 708 if f.pm.ulc != nil { 709 // Roll back untrusted blocks 710 h, unapproved := f.lastTrustedTreeNode(p) 711 f.chain.Rollback(unapproved) 712 node = fp.nodeByHash[h.Hash()] 713 } 714 // Find last valid block 715 for node != nil { 716 if td = f.chain.GetTd(node.hash, node.number); td != nil { 717 break 718 } 719 node = node.parent 720 } 721 // Now node is the latest downloaded/approved header after syncing 722 if node == nil { 723 p.Log().Debug("Synchronisation failed") 724 go f.pm.removePeer(p.id) 725 return 726 } 727 header := f.chain.GetHeader(node.hash, node.number) 728 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 729 } 730 731 // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes 732 func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) { 733 unapprovedHashes := make([]common.Hash, 0) 734 current := f.chain.CurrentHeader() 735 736 if f.lastTrustedHeader == nil { 737 return current, unapprovedHashes 738 } 739 740 canonical := f.chain.CurrentHeader() 741 if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() { 742 canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64()) 743 } 744 commonAncestor := rawdb.FindCommonAncestor(f.pm.chainDb, canonical, f.lastTrustedHeader) 745 if commonAncestor == nil { 746 log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash()) 747 return current, unapprovedHashes 748 } 749 750 for current.Hash() == commonAncestor.Hash() { 751 if f.isTrustedHash(current.Hash()) { 752 break 753 } 754 unapprovedHashes = append(unapprovedHashes, current.Hash()) 755 current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1) 756 } 757 return current, unapprovedHashes 758 } 759 760 func (f *lightFetcher) setLastTrustedHeader(h *types.Header) { 761 f.lock.Lock() 762 defer f.lock.Unlock() 763 f.lastTrustedHeader = h 764 } 765 766 // checkKnownNode checks if a block tree node is known (downloaded and validated) 767 // If it was not known previously but found in the database, sets its known flag 768 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 769 if n.known { 770 return true 771 } 772 td := f.chain.GetTd(n.hash, n.number) 773 if td == nil { 774 return false 775 } 776 header := f.chain.GetHeader(n.hash, n.number) 777 // check the availability of both header and td because reads are not protected by chain db mutex 778 // Note: returning false is always safe here 779 if header == nil { 780 return false 781 } 782 783 fp := f.peers[p] 784 if fp == nil { 785 p.Log().Debug("Unknown peer to check known nodes") 786 return false 787 } 788 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 789 p.Log().Debug("Inconsistent announcement") 790 go f.pm.removePeer(p.id) 791 } 792 if fp.confirmedTd != nil { 793 f.updateMaxConfirmedTd(fp.confirmedTd) 794 } 795 return n.known 796 } 797 798 // deleteNode deletes a node and its child subtrees from a peer's block tree 799 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 800 if n.parent != nil { 801 for i, nn := range n.parent.children { 802 if nn == n { 803 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 804 break 805 } 806 } 807 } 808 for { 809 if n.td != nil { 810 delete(fp.nodeByHash, n.hash) 811 } 812 fp.nodeCnt-- 813 if len(n.children) == 0 { 814 return 815 } 816 for i, nn := range n.children { 817 if i == 0 { 818 n = nn 819 } else { 820 fp.deleteNode(nn) 821 } 822 } 823 } 824 } 825 826 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 827 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 828 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 829 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 830 // and it has also been downloaded from any peer, either before or after the given announcement). 831 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 832 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 833 // the current global head). 834 type updateStatsEntry struct { 835 time mclock.AbsTime 836 td *big.Int 837 next *updateStatsEntry 838 } 839 840 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 841 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 842 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 843 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 844 // positive block delay value. 845 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 846 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 847 f.maxConfirmedTd = td 848 newEntry := &updateStatsEntry{ 849 time: mclock.Now(), 850 td: td, 851 } 852 if f.lastUpdateStats != nil { 853 f.lastUpdateStats.next = newEntry 854 } 855 856 f.lastUpdateStats = newEntry 857 for p := range f.peers { 858 f.checkUpdateStats(p, newEntry) 859 } 860 } 861 } 862 863 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 864 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 865 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 866 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 867 // items are removed from the head of the linked list. 868 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 869 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 870 // it can set the new head to newEntry. 871 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 872 now := mclock.Now() 873 fp := f.peers[p] 874 if fp == nil { 875 p.Log().Debug("Unknown peer to check update stats") 876 return 877 } 878 879 if newEntry != nil && fp.firstUpdateStats == nil { 880 fp.firstUpdateStats = newEntry 881 } 882 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 883 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 884 fp.firstUpdateStats = fp.firstUpdateStats.next 885 } 886 if fp.confirmedTd != nil { 887 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 888 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 889 fp.firstUpdateStats = fp.firstUpdateStats.next 890 } 891 } 892 }