github.com/letterj/go-ethereum@v1.8.22-0.20190204142846-520024dfd689/les/fetcher.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package les implements the Light Ethereum Subprotocol. 18 package les 19 20 import ( 21 "math/big" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/mclock" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/light" 31 "github.com/ethereum/go-ethereum/log" 32 ) 33 34 const ( 35 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 36 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 37 serverStateAvailable = 100 // number of recent blocks where state availability is assumed 38 ) 39 40 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 41 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 42 // and announced that block. 43 type lightFetcher struct { 44 pm *ProtocolManager 45 odr *LesOdr 46 chain lightChain 47 48 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 49 maxConfirmedTd *big.Int 50 peers map[*peer]*fetcherPeerInfo 51 lastUpdateStats *updateStatsEntry 52 syncing bool 53 syncDone chan *peer 54 55 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 56 requested map[uint64]fetchRequest 57 deliverChn chan fetchResponse 58 timeoutChn chan uint64 59 requestChn chan bool // true if initiated from outside 60 lastTrustedHeader *types.Header 61 } 62 63 // lightChain extends the BlockChain interface by locking. 64 type lightChain interface { 65 BlockChain 66 LockChain() 67 UnlockChain() 68 } 69 70 // fetcherPeerInfo holds fetcher-specific information about each active peer 71 type fetcherPeerInfo struct { 72 root, lastAnnounced *fetcherTreeNode 73 nodeCnt int 74 confirmedTd *big.Int 75 bestConfirmed *fetcherTreeNode 76 nodeByHash map[common.Hash]*fetcherTreeNode 77 firstUpdateStats *updateStatsEntry 78 } 79 80 // fetcherTreeNode is a node of a tree that holds information about blocks recently 81 // announced and confirmed by a certain peer. Each new announce message from a peer 82 // adds nodes to the tree, based on the previous announced head and the reorg depth. 83 // There are three possible states for a tree node: 84 // - announced: not downloaded (known) yet, but we know its head, number and td 85 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 86 // - known: both announced by this peer and downloaded (from any peer). 87 // This structure makes it possible to always know which peer has a certain block, 88 // which is necessary for selecting a suitable peer for ODR requests and also for 89 // canonizing new heads. It also helps to always download the minimum necessary 90 // amount of headers with a single request. 91 type fetcherTreeNode struct { 92 hash common.Hash 93 number uint64 94 td *big.Int 95 known, requested bool 96 parent *fetcherTreeNode 97 children []*fetcherTreeNode 98 } 99 100 // fetchRequest represents a header download request 101 type fetchRequest struct { 102 hash common.Hash 103 amount uint64 104 peer *peer 105 sent mclock.AbsTime 106 timeout bool 107 } 108 109 // fetchResponse represents a header download response 110 type fetchResponse struct { 111 reqID uint64 112 headers []*types.Header 113 peer *peer 114 } 115 116 // newLightFetcher creates a new light fetcher 117 func newLightFetcher(pm *ProtocolManager) *lightFetcher { 118 f := &lightFetcher{ 119 pm: pm, 120 chain: pm.blockchain.(*light.LightChain), 121 odr: pm.odr, 122 peers: make(map[*peer]*fetcherPeerInfo), 123 deliverChn: make(chan fetchResponse, 100), 124 requested: make(map[uint64]fetchRequest), 125 timeoutChn: make(chan uint64), 126 requestChn: make(chan bool, 100), 127 syncDone: make(chan *peer), 128 maxConfirmedTd: big.NewInt(0), 129 } 130 pm.peers.notify(f) 131 132 f.pm.wg.Add(1) 133 go f.syncLoop() 134 return f 135 } 136 137 // syncLoop is the main event loop of the light fetcher 138 func (f *lightFetcher) syncLoop() { 139 requesting := false 140 defer f.pm.wg.Done() 141 for { 142 select { 143 case <-f.pm.quitSync: 144 return 145 // when a new announce is received, request loop keeps running until 146 // no further requests are necessary or possible 147 case newAnnounce := <-f.requestChn: 148 f.lock.Lock() 149 s := requesting 150 requesting = false 151 var ( 152 rq *distReq 153 reqID uint64 154 syncing bool 155 ) 156 157 if !f.syncing && !(newAnnounce && s) { 158 rq, reqID, syncing = f.nextRequest() 159 } 160 f.lock.Unlock() 161 162 if rq != nil { 163 requesting = true 164 if _, ok := <-f.pm.reqDist.queue(rq); ok { 165 if syncing { 166 f.lock.Lock() 167 f.syncing = true 168 f.lock.Unlock() 169 } else { 170 go func() { 171 time.Sleep(softRequestTimeout) 172 f.reqMu.Lock() 173 req, ok := f.requested[reqID] 174 if ok { 175 req.timeout = true 176 f.requested[reqID] = req 177 } 178 f.reqMu.Unlock() 179 // keep starting new requests while possible 180 f.requestChn <- false 181 }() 182 } 183 } else { 184 f.requestChn <- false 185 } 186 } 187 case reqID := <-f.timeoutChn: 188 f.reqMu.Lock() 189 req, ok := f.requested[reqID] 190 if ok { 191 delete(f.requested, reqID) 192 } 193 f.reqMu.Unlock() 194 if ok { 195 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 196 req.peer.Log().Debug("Fetching data timed out hard") 197 go f.pm.removePeer(req.peer.id) 198 } 199 case resp := <-f.deliverChn: 200 f.reqMu.Lock() 201 req, ok := f.requested[resp.reqID] 202 if ok && req.peer != resp.peer { 203 ok = false 204 } 205 if ok { 206 delete(f.requested, resp.reqID) 207 } 208 f.reqMu.Unlock() 209 if ok { 210 f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 211 } 212 f.lock.Lock() 213 if !ok || !(f.syncing || f.processResponse(req, resp)) { 214 resp.peer.Log().Debug("Failed processing response") 215 go f.pm.removePeer(resp.peer.id) 216 } 217 f.lock.Unlock() 218 case p := <-f.syncDone: 219 f.lock.Lock() 220 p.Log().Debug("Done synchronising with peer") 221 f.checkSyncedHeaders(p) 222 f.syncing = false 223 f.lock.Unlock() 224 f.requestChn <- false 225 } 226 } 227 } 228 229 // registerPeer adds a new peer to the fetcher's peer set 230 func (f *lightFetcher) registerPeer(p *peer) { 231 p.lock.Lock() 232 p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { 233 return f.peerHasBlock(p, hash, number, hasState) 234 } 235 p.lock.Unlock() 236 237 f.lock.Lock() 238 defer f.lock.Unlock() 239 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 240 } 241 242 // unregisterPeer removes a new peer from the fetcher's peer set 243 func (f *lightFetcher) unregisterPeer(p *peer) { 244 p.lock.Lock() 245 p.hasBlock = nil 246 p.lock.Unlock() 247 248 f.lock.Lock() 249 defer f.lock.Unlock() 250 251 // check for potential timed out block delay statistics 252 f.checkUpdateStats(p, nil) 253 delete(f.peers, p) 254 } 255 256 // announce processes a new announcement message received from a peer, adding new 257 // nodes to the peer's block tree and removing old nodes if necessary 258 func (f *lightFetcher) announce(p *peer, head *announceData) { 259 f.lock.Lock() 260 defer f.lock.Unlock() 261 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 262 263 fp := f.peers[p] 264 if fp == nil { 265 p.Log().Debug("Announcement from unknown peer") 266 return 267 } 268 269 if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { 270 // announced tds should be strictly monotonic 271 p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td) 272 go f.pm.removePeer(p.id) 273 return 274 } 275 276 n := fp.lastAnnounced 277 for i := uint64(0); i < head.ReorgDepth; i++ { 278 if n == nil { 279 break 280 } 281 n = n.parent 282 } 283 // n is now the reorg common ancestor, add a new branch of nodes 284 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) { 285 // if announced head block height is lower or same as n or too far from it to add 286 // intermediate nodes then discard previous announcement info and trigger a resync 287 n = nil 288 fp.nodeCnt = 0 289 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode) 290 } 291 // check if the node count is too high to add new nodes, discard oldest ones if necessary 292 if n != nil { 293 // n is now the reorg common ancestor, add a new branch of nodes 294 // check if the node count is too high to add new nodes 295 locked := false 296 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 297 if !locked { 298 f.chain.LockChain() 299 defer f.chain.UnlockChain() 300 locked = true 301 } 302 // if one of root's children is canonical, keep it, delete other branches and root itself 303 var newRoot *fetcherTreeNode 304 for i, nn := range fp.root.children { 305 if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash { 306 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 307 nn.parent = nil 308 newRoot = nn 309 break 310 } 311 } 312 fp.deleteNode(fp.root) 313 if n == fp.root { 314 n = newRoot 315 } 316 fp.root = newRoot 317 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 318 fp.bestConfirmed = nil 319 fp.confirmedTd = nil 320 } 321 322 if n == nil { 323 break 324 } 325 } 326 if n != nil { 327 for n.number < head.Number { 328 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 329 n.children = append(n.children, nn) 330 n = nn 331 fp.nodeCnt++ 332 } 333 n.hash = head.Hash 334 n.td = head.Td 335 fp.nodeByHash[n.hash] = n 336 } 337 } 338 339 if n == nil { 340 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 341 if fp.root != nil { 342 fp.deleteNode(fp.root) 343 } 344 n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td} 345 fp.root = n 346 fp.nodeCnt++ 347 fp.nodeByHash[n.hash] = n 348 fp.bestConfirmed = nil 349 fp.confirmedTd = nil 350 } 351 352 f.checkKnownNode(p, n) 353 p.lock.Lock() 354 p.headInfo = head 355 fp.lastAnnounced = n 356 p.lock.Unlock() 357 f.checkUpdateStats(p, nil) 358 f.requestChn <- true 359 } 360 361 // peerHasBlock returns true if we can assume the peer knows the given block 362 // based on its announcements 363 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { 364 f.lock.Lock() 365 defer f.lock.Unlock() 366 367 fp := f.peers[p] 368 if fp == nil || fp.root == nil { 369 return false 370 } 371 372 if hasState { 373 if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable { 374 return false 375 } 376 } 377 378 if f.syncing { 379 // always return true when syncing 380 // false positives are acceptable, a more sophisticated condition can be implemented later 381 return true 382 } 383 384 if number >= fp.root.number { 385 // it is recent enough that if it is known, is should be in the peer's block tree 386 return fp.nodeByHash[hash] != nil 387 } 388 f.chain.LockChain() 389 defer f.chain.UnlockChain() 390 // if it's older than the peer's block tree root but it's in the same canonical chain 391 // as the root, we can still be sure the peer knows it 392 // 393 // when syncing, just check if it is part of the known chain, there is nothing better we 394 // can do since we do not know the most recent block hash yet 395 return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash 396 } 397 398 // requestAmount calculates the amount of headers to be downloaded starting 399 // from a certain head backwards 400 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 401 amount := uint64(0) 402 nn := n 403 for nn != nil && !f.checkKnownNode(p, nn) { 404 nn = nn.parent 405 amount++ 406 } 407 if nn == nil { 408 amount = n.number 409 } 410 return amount 411 } 412 413 // requestedID tells if a certain reqID has been requested by the fetcher 414 func (f *lightFetcher) requestedID(reqID uint64) bool { 415 f.reqMu.RLock() 416 _, ok := f.requested[reqID] 417 f.reqMu.RUnlock() 418 return ok 419 } 420 421 // nextRequest selects the peer and announced head to be requested next, amount 422 // to be downloaded starting from the head backwards is also returned 423 func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { 424 var ( 425 bestHash common.Hash 426 bestAmount uint64 427 bestTd *big.Int 428 bestSyncing bool 429 ) 430 bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest() 431 432 if bestTd == f.maxConfirmedTd { 433 return nil, 0, false 434 } 435 436 var rq *distReq 437 reqID := genReqID() 438 if bestSyncing { 439 rq = f.newFetcherDistReqForSync(bestHash) 440 } else { 441 rq = f.newFetcherDistReq(bestHash, reqID, bestAmount) 442 } 443 return rq, reqID, bestSyncing 444 } 445 446 // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer. 447 // It also returns the announced Td (which should be verified after fetching the head), 448 // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request. 449 func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) { 450 bestTd = f.maxConfirmedTd 451 bestSyncing = false 452 453 for p, fp := range f.peers { 454 for hash, n := range fp.nodeByHash { 455 if f.checkKnownNode(p, n) || n.requested { 456 continue 457 } 458 459 //if ulc mode is disabled, isTrustedHash returns true 460 amount := f.requestAmount(p, n) 461 if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) { 462 bestHash = hash 463 bestTd = n.td 464 bestAmount = amount 465 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 466 } 467 } 468 } 469 return 470 } 471 472 // isTrustedHash checks if the block can be trusted by the minimum trusted fraction. 473 func (f *lightFetcher) isTrustedHash(hash common.Hash) bool { 474 if !f.pm.isULCEnabled() { 475 return true 476 } 477 478 var numAgreed int 479 for p, fp := range f.peers { 480 if !p.isTrusted { 481 continue 482 } 483 if _, ok := fp.nodeByHash[hash]; !ok { 484 continue 485 } 486 487 numAgreed++ 488 } 489 490 return 100*numAgreed/len(f.pm.ulc.trustedKeys) >= f.pm.ulc.minTrustedFraction 491 } 492 493 func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { 494 return &distReq{ 495 getCost: func(dp distPeer) uint64 { 496 return 0 497 }, 498 canSend: func(dp distPeer) bool { 499 p := dp.(*peer) 500 f.lock.Lock() 501 defer f.lock.Unlock() 502 503 if p.isOnlyAnnounce { 504 return false 505 } 506 507 fp := f.peers[p] 508 return fp != nil && fp.nodeByHash[bestHash] != nil 509 }, 510 request: func(dp distPeer) func() { 511 if f.pm.isULCEnabled() { 512 //keep last trusted header before sync 513 f.setLastTrustedHeader(f.chain.CurrentHeader()) 514 } 515 go func() { 516 p := dp.(*peer) 517 p.Log().Debug("Synchronisation started") 518 f.pm.synchronise(p) 519 f.syncDone <- p 520 }() 521 return nil 522 }, 523 } 524 } 525 526 // newFetcherDistReq creates a new request for the distributor. 527 func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq { 528 return &distReq{ 529 getCost: func(dp distPeer) uint64 { 530 p := dp.(*peer) 531 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 532 }, 533 canSend: func(dp distPeer) bool { 534 p := dp.(*peer) 535 f.lock.Lock() 536 defer f.lock.Unlock() 537 538 if p.isOnlyAnnounce { 539 return false 540 } 541 542 fp := f.peers[p] 543 if fp == nil { 544 return false 545 } 546 n := fp.nodeByHash[bestHash] 547 return n != nil && !n.requested 548 }, 549 request: func(dp distPeer) func() { 550 p := dp.(*peer) 551 f.lock.Lock() 552 fp := f.peers[p] 553 if fp != nil { 554 n := fp.nodeByHash[bestHash] 555 if n != nil { 556 n.requested = true 557 } 558 } 559 f.lock.Unlock() 560 561 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 562 p.fcServer.QueueRequest(reqID, cost) 563 f.reqMu.Lock() 564 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 565 f.reqMu.Unlock() 566 go func() { 567 time.Sleep(hardRequestTimeout) 568 f.timeoutChn <- reqID 569 }() 570 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 571 }, 572 } 573 } 574 575 // deliverHeaders delivers header download request responses for processing 576 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 577 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 578 } 579 580 // processResponse processes header download request responses, returns true if successful 581 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 582 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 583 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 584 return false 585 } 586 headers := make([]*types.Header, req.amount) 587 for i, header := range resp.headers { 588 headers[int(req.amount)-1-i] = header 589 } 590 591 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 592 if err == consensus.ErrFutureBlock { 593 return true 594 } 595 log.Debug("Failed to insert header chain", "err", err) 596 return false 597 } 598 tds := make([]*big.Int, len(headers)) 599 for i, header := range headers { 600 td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) 601 if td == nil { 602 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 603 return false 604 } 605 tds[i] = td 606 } 607 f.newHeaders(headers, tds) 608 return true 609 } 610 611 // newHeaders updates the block trees of all active peers according to a newly 612 // downloaded and validated batch or headers 613 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 614 var maxTd *big.Int 615 616 for p, fp := range f.peers { 617 if !f.checkAnnouncedHeaders(fp, headers, tds) { 618 p.Log().Debug("Inconsistent announcement") 619 go f.pm.removePeer(p.id) 620 } 621 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 622 maxTd = fp.confirmedTd 623 } 624 } 625 626 if maxTd != nil { 627 f.updateMaxConfirmedTd(maxTd) 628 } 629 } 630 631 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 632 // a batch of headers. It searches for the latest header in the batch that has a 633 // matching tree node (if any), and if it has not been marked as known already, 634 // sets it and its parents to known (even those which are older than the currently 635 // validated ones). Return value shows if all hashes, numbers and Tds matched 636 // correctly to the announced values (otherwise the peer should be dropped). 637 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 638 var ( 639 n *fetcherTreeNode 640 header *types.Header 641 td *big.Int 642 ) 643 644 for i := len(headers) - 1; ; i-- { 645 if i < 0 { 646 if n == nil { 647 // no more headers and nothing to match 648 return true 649 } 650 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 651 hash, number := header.ParentHash, header.Number.Uint64()-1 652 td = f.chain.GetTd(hash, number) 653 header = f.chain.GetHeader(hash, number) 654 if header == nil || td == nil { 655 log.Error("Missing parent of validated header", "hash", hash, "number", number) 656 return false 657 } 658 } else { 659 header = headers[i] 660 td = tds[i] 661 } 662 hash := header.Hash() 663 number := header.Number.Uint64() 664 if n == nil { 665 n = fp.nodeByHash[hash] 666 } 667 if n != nil { 668 if n.td == nil { 669 // node was unannounced 670 if nn := fp.nodeByHash[hash]; nn != nil { 671 // if there was already a node with the same hash, continue there and drop this one 672 nn.children = append(nn.children, n.children...) 673 n.children = nil 674 fp.deleteNode(n) 675 n = nn 676 } else { 677 n.hash = hash 678 n.td = td 679 fp.nodeByHash[hash] = n 680 } 681 } 682 // check if it matches the header 683 if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 { 684 // peer has previously made an invalid announcement 685 return false 686 } 687 if n.known { 688 // we reached a known node that matched our expectations, return with success 689 return true 690 } 691 n.known = true 692 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 693 fp.confirmedTd = td 694 fp.bestConfirmed = n 695 } 696 n = n.parent 697 if n == nil { 698 return true 699 } 700 } 701 } 702 } 703 704 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 705 // downloaded headers as known. If none of the announced headers are found after 706 // syncing, the peer is dropped. 707 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 708 fp := f.peers[p] 709 if fp == nil { 710 p.Log().Debug("Unknown peer to check sync headers") 711 return 712 } 713 714 n := fp.lastAnnounced 715 var td *big.Int 716 717 var h *types.Header 718 if f.pm.isULCEnabled() { 719 var unapprovedHashes []common.Hash 720 // Overwrite last announced for ULC mode 721 h, unapprovedHashes = f.lastTrustedTreeNode(p) 722 //rollback untrusted blocks 723 f.chain.Rollback(unapprovedHashes) 724 //overwrite to last trusted 725 n = fp.nodeByHash[h.Hash()] 726 } 727 728 //find last valid block 729 for n != nil { 730 if td = f.chain.GetTd(n.hash, n.number); td != nil { 731 break 732 } 733 n = n.parent 734 } 735 736 // Now n is the latest downloaded/approved header after syncing 737 if n == nil { 738 p.Log().Debug("Synchronisation failed") 739 go f.pm.removePeer(p.id) 740 return 741 } 742 header := f.chain.GetHeader(n.hash, n.number) 743 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 744 } 745 746 // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes 747 func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) { 748 unapprovedHashes := make([]common.Hash, 0) 749 current := f.chain.CurrentHeader() 750 751 if f.lastTrustedHeader == nil { 752 return current, unapprovedHashes 753 } 754 755 canonical := f.chain.CurrentHeader() 756 if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() { 757 canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64()) 758 } 759 commonAncestor := rawdb.FindCommonAncestor(f.pm.chainDb, canonical, f.lastTrustedHeader) 760 if commonAncestor == nil { 761 log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash()) 762 return current, unapprovedHashes 763 } 764 765 for current.Hash() == commonAncestor.Hash() { 766 if f.isTrustedHash(current.Hash()) { 767 break 768 } 769 unapprovedHashes = append(unapprovedHashes, current.Hash()) 770 current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1) 771 } 772 return current, unapprovedHashes 773 } 774 775 func (f *lightFetcher) setLastTrustedHeader(h *types.Header) { 776 f.lock.Lock() 777 defer f.lock.Unlock() 778 f.lastTrustedHeader = h 779 } 780 781 // checkKnownNode checks if a block tree node is known (downloaded and validated) 782 // If it was not known previously but found in the database, sets its known flag 783 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 784 if n.known { 785 return true 786 } 787 td := f.chain.GetTd(n.hash, n.number) 788 if td == nil { 789 return false 790 } 791 header := f.chain.GetHeader(n.hash, n.number) 792 // check the availability of both header and td because reads are not protected by chain db mutex 793 // Note: returning false is always safe here 794 if header == nil { 795 return false 796 } 797 798 fp := f.peers[p] 799 if fp == nil { 800 p.Log().Debug("Unknown peer to check known nodes") 801 return false 802 } 803 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 804 p.Log().Debug("Inconsistent announcement") 805 go f.pm.removePeer(p.id) 806 } 807 if fp.confirmedTd != nil { 808 f.updateMaxConfirmedTd(fp.confirmedTd) 809 } 810 return n.known 811 } 812 813 // deleteNode deletes a node and its child subtrees from a peer's block tree 814 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 815 if n.parent != nil { 816 for i, nn := range n.parent.children { 817 if nn == n { 818 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 819 break 820 } 821 } 822 } 823 for { 824 if n.td != nil { 825 delete(fp.nodeByHash, n.hash) 826 } 827 fp.nodeCnt-- 828 if len(n.children) == 0 { 829 return 830 } 831 for i, nn := range n.children { 832 if i == 0 { 833 n = nn 834 } else { 835 fp.deleteNode(nn) 836 } 837 } 838 } 839 } 840 841 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 842 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 843 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 844 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 845 // and it has also been downloaded from any peer, either before or after the given announcement). 846 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 847 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 848 // the current global head). 849 type updateStatsEntry struct { 850 time mclock.AbsTime 851 td *big.Int 852 next *updateStatsEntry 853 } 854 855 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 856 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 857 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 858 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 859 // positive block delay value. 860 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 861 if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 { 862 f.maxConfirmedTd = td 863 newEntry := &updateStatsEntry{ 864 time: mclock.Now(), 865 td: td, 866 } 867 if f.lastUpdateStats != nil { 868 f.lastUpdateStats.next = newEntry 869 } 870 871 f.lastUpdateStats = newEntry 872 for p := range f.peers { 873 f.checkUpdateStats(p, newEntry) 874 } 875 } 876 } 877 878 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 879 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 880 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 881 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 882 // items are removed from the head of the linked list. 883 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 884 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 885 // it can set the new head to newEntry. 886 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 887 now := mclock.Now() 888 fp := f.peers[p] 889 if fp == nil { 890 p.Log().Debug("Unknown peer to check update stats") 891 return 892 } 893 894 if newEntry != nil && fp.firstUpdateStats == nil { 895 fp.firstUpdateStats = newEntry 896 } 897 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 898 f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 899 fp.firstUpdateStats = fp.firstUpdateStats.next 900 } 901 if fp.confirmedTd != nil { 902 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 903 f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 904 fp.firstUpdateStats = fp.firstUpdateStats.next 905 } 906 } 907 }