github.com/ebakus/go-ebakus@v1.0.5-0.20200520105415-dbccef9ec421/les/fetcher.go (about) 1 // Copyright 2019 The ebakus/go-ebakus Authors 2 // This file is part of the ebakus/go-ebakus library. 3 // 4 // The ebakus/go-ebakus library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The ebakus/go-ebakus library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the ebakus/go-ebakus library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "math/big" 21 "sync" 22 "time" 23 24 "github.com/ebakus/go-ebakus/common" 25 "github.com/ebakus/go-ebakus/common/mclock" 26 "github.com/ebakus/go-ebakus/consensus" 27 "github.com/ebakus/go-ebakus/core/rawdb" 28 "github.com/ebakus/go-ebakus/core/types" 29 "github.com/ebakus/go-ebakus/light" 30 "github.com/ebakus/go-ebakus/log" 31 ) 32 33 const ( 34 blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others 35 maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer 36 serverStateAvailable = 100 // number of recent blocks where state availability is assumed 37 ) 38 39 // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the 40 // ODR system to ensure that we only request data related to a certain block from peers who have already processed 41 // and announced that block. 42 type lightFetcher struct { 43 handler *clientHandler 44 chain *light.LightChain 45 46 lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests 47 maxConfirmedNumber *big.Int 48 peers map[*peer]*fetcherPeerInfo 49 lastUpdateStats *updateStatsEntry 50 syncing bool 51 syncDone chan *peer 52 53 maxConfirmedTd *big.Int 54 55 reqMu sync.RWMutex // reqMu protects access to sent header fetch requests 56 requested map[uint64]fetchRequest 57 deliverChn chan fetchResponse 58 timeoutChn chan uint64 59 requestTriggered bool 60 requestTrigger chan struct{} 61 lastTrustedHeader *types.Header 62 63 closeCh chan struct{} 64 wg sync.WaitGroup 65 } 66 67 // fetcherPeerInfo holds fetcher-specific information about each active peer 68 type fetcherPeerInfo struct { 69 root, lastAnnounced *fetcherTreeNode 70 nodeCnt int 71 confirmedTd *big.Int 72 bestConfirmed *fetcherTreeNode 73 nodeByHash map[common.Hash]*fetcherTreeNode 74 firstUpdateStats *updateStatsEntry 75 } 76 77 // fetcherTreeNode is a node of a tree that holds information about blocks recently 78 // announced and confirmed by a certain peer. Each new announce message from a peer 79 // adds nodes to the tree, based on the previous announced head and the reorg depth. 80 // There are three possible states for a tree node: 81 // - announced: not downloaded (known) yet, but we know its head, number and td 82 // - intermediate: not known, hash and td are empty, they are filled out when it becomes known 83 // - known: both announced by this peer and downloaded (from any peer). 84 // This structure makes it possible to always know which peer has a certain block, 85 // which is necessary for selecting a suitable peer for ODR requests and also for 86 // canonizing new heads. It also helps to always download the minimum necessary 87 // amount of headers with a single request. 88 type fetcherTreeNode struct { 89 hash common.Hash 90 number uint64 91 known, requested bool 92 parent *fetcherTreeNode 93 children []*fetcherTreeNode 94 } 95 96 // fetchRequest represents a header download request 97 type fetchRequest struct { 98 hash common.Hash 99 amount uint64 100 peer *peer 101 sent mclock.AbsTime 102 timeout bool 103 } 104 105 // fetchResponse represents a header download response 106 type fetchResponse struct { 107 reqID uint64 108 headers []*types.Header 109 peer *peer 110 } 111 112 // newLightFetcher creates a new light fetcher 113 func newLightFetcher(h *clientHandler) *lightFetcher { 114 f := &lightFetcher{ 115 handler: h, 116 chain: h.backend.blockchain, 117 peers: make(map[*peer]*fetcherPeerInfo), 118 deliverChn: make(chan fetchResponse, 100), 119 requested: make(map[uint64]fetchRequest), 120 timeoutChn: make(chan uint64), 121 requestTrigger: make(chan struct{}, 1), 122 syncDone: make(chan *peer), 123 closeCh: make(chan struct{}), 124 maxConfirmedTd: big.NewInt(0), 125 } 126 h.backend.peers.notify(f) 127 128 f.wg.Add(1) 129 go f.syncLoop() 130 return f 131 } 132 133 func (f *lightFetcher) close() { 134 close(f.closeCh) 135 f.wg.Wait() 136 } 137 138 // syncLoop is the main event loop of the light fetcher 139 func (f *lightFetcher) syncLoop() { 140 defer f.wg.Done() 141 for { 142 select { 143 case <-f.closeCh: 144 return 145 // request loop keeps running until no further requests are necessary or possible 146 case <-f.requestTrigger: 147 f.lock.Lock() 148 var ( 149 rq *distReq 150 reqID uint64 151 syncing bool 152 ) 153 if !f.syncing { 154 rq, reqID, syncing = f.nextRequest() 155 } 156 f.requestTriggered = rq != nil 157 f.lock.Unlock() 158 159 if rq != nil { 160 if _, ok := <-f.handler.backend.reqDist.queue(rq); ok { 161 if syncing { 162 f.lock.Lock() 163 f.syncing = true 164 f.lock.Unlock() 165 } else { 166 go func() { 167 time.Sleep(softRequestTimeout) 168 f.reqMu.Lock() 169 req, ok := f.requested[reqID] 170 if ok { 171 req.timeout = true 172 f.requested[reqID] = req 173 } 174 f.reqMu.Unlock() 175 // keep starting new requests while possible 176 f.requestTrigger <- struct{}{} 177 }() 178 } 179 } else { 180 f.requestTrigger <- struct{}{} 181 } 182 } 183 case reqID := <-f.timeoutChn: 184 f.reqMu.Lock() 185 req, ok := f.requested[reqID] 186 if ok { 187 delete(f.requested, reqID) 188 } 189 f.reqMu.Unlock() 190 if ok { 191 f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) 192 req.peer.Log().Debug("Fetching data timed out hard") 193 go f.handler.removePeer(req.peer.id) 194 } 195 case resp := <-f.deliverChn: 196 f.reqMu.Lock() 197 req, ok := f.requested[resp.reqID] 198 if ok && req.peer != resp.peer { 199 ok = false 200 } 201 if ok { 202 delete(f.requested, resp.reqID) 203 } 204 f.reqMu.Unlock() 205 if ok { 206 f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout) 207 } 208 f.lock.Lock() 209 if !ok || !(f.syncing || f.processResponse(req, resp)) { 210 resp.peer.Log().Debug("Failed processing response") 211 go f.handler.removePeer(resp.peer.id) 212 } 213 f.lock.Unlock() 214 case p := <-f.syncDone: 215 f.lock.Lock() 216 p.Log().Debug("Done synchronising with peer") 217 f.checkSyncedHeaders(p) 218 f.syncing = false 219 f.lock.Unlock() 220 f.requestTrigger <- struct{}{} // f.requestTriggered is always true here 221 } 222 } 223 } 224 225 // registerPeer adds a new peer to the fetcher's peer set 226 func (f *lightFetcher) registerPeer(p *peer) { 227 p.lock.Lock() 228 p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool { 229 return f.peerHasBlock(p, hash, number, hasState) 230 } 231 p.lock.Unlock() 232 233 f.lock.Lock() 234 defer f.lock.Unlock() 235 f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)} 236 } 237 238 // unregisterPeer removes a new peer from the fetcher's peer set 239 func (f *lightFetcher) unregisterPeer(p *peer) { 240 p.lock.Lock() 241 p.hasBlock = nil 242 p.lock.Unlock() 243 244 f.lock.Lock() 245 defer f.lock.Unlock() 246 247 // check for potential timed out block delay statistics 248 f.checkUpdateStats(p, nil) 249 delete(f.peers, p) 250 } 251 252 // announce processes a new announcement message received from a peer, adding new 253 // nodes to the peer's block tree and removing old nodes if necessary 254 func (f *lightFetcher) announce(p *peer, head *announceData) { 255 f.lock.Lock() 256 defer f.lock.Unlock() 257 p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth) 258 259 fp := f.peers[p] 260 if fp == nil { 261 p.Log().Debug("Announcement from unknown peer") 262 return 263 } 264 265 if fp.lastAnnounced != nil && head.Number <= fp.lastAnnounced.number { 266 // announced tds should be strictly monotonic 267 p.Log().Debug("Received non-monotonic td", "current", head.Number, "previous", fp.lastAnnounced.number) 268 go f.handler.removePeer(p.id) 269 return 270 } 271 272 n := fp.lastAnnounced 273 for i := uint64(0); i < head.ReorgDepth; i++ { 274 if n == nil { 275 break 276 } 277 n = n.parent 278 } 279 // n is now the reorg common ancestor, add a new branch of nodes 280 if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) { 281 // if announced head block height is lower or same as n or too far from it to add 282 // intermediate nodes then discard previous announcement info and trigger a resync 283 n = nil 284 fp.nodeCnt = 0 285 fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode) 286 } 287 // check if the node count is too high to add new nodes, discard oldest ones if necessary 288 if n != nil { 289 // n is now the reorg common ancestor, add a new branch of nodes 290 // check if the node count is too high to add new nodes 291 locked := false 292 for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil { 293 if !locked { 294 f.chain.LockChain() 295 defer f.chain.UnlockChain() 296 locked = true 297 } 298 // if one of root's children is canonical, keep it, delete other branches and root itself 299 var newRoot *fetcherTreeNode 300 for i, nn := range fp.root.children { 301 if rawdb.ReadCanonicalHash(f.handler.backend.chainDb, nn.number) == nn.hash { 302 fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...) 303 nn.parent = nil 304 newRoot = nn 305 break 306 } 307 } 308 fp.deleteNode(fp.root) 309 if n == fp.root { 310 n = newRoot 311 } 312 fp.root = newRoot 313 if newRoot == nil || !f.checkKnownNode(p, newRoot) { 314 fp.bestConfirmed = nil 315 fp.confirmedTd = nil 316 } 317 318 if n == nil { 319 break 320 } 321 } 322 if n != nil { 323 for n.number < head.Number { 324 nn := &fetcherTreeNode{number: n.number + 1, parent: n} 325 n.children = append(n.children, nn) 326 n = nn 327 fp.nodeCnt++ 328 } 329 n.hash = head.Hash 330 n.number = head.Number 331 fp.nodeByHash[n.hash] = n 332 } 333 } 334 335 if n == nil { 336 // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed 337 if fp.root != nil { 338 fp.deleteNode(fp.root) 339 } 340 n = &fetcherTreeNode{hash: head.Hash, number: head.Number} 341 fp.root = n 342 fp.nodeCnt++ 343 fp.nodeByHash[n.hash] = n 344 fp.bestConfirmed = nil 345 fp.confirmedTd = nil 346 } 347 348 f.checkKnownNode(p, n) 349 p.lock.Lock() 350 p.headInfo = head 351 fp.lastAnnounced = n 352 p.lock.Unlock() 353 f.checkUpdateStats(p, nil) 354 if !f.requestTriggered { 355 f.requestTriggered = true 356 f.requestTrigger <- struct{}{} 357 } 358 } 359 360 // peerHasBlock returns true if we can assume the peer knows the given block 361 // based on its announcements 362 func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool { 363 f.lock.Lock() 364 defer f.lock.Unlock() 365 366 fp := f.peers[p] 367 if fp == nil || fp.root == nil { 368 return false 369 } 370 371 if hasState { 372 if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable { 373 return false 374 } 375 } 376 377 if f.syncing { 378 // always return true when syncing 379 // false positives are acceptable, a more sophisticated condition can be implemented later 380 return true 381 } 382 383 if number >= fp.root.number { 384 // it is recent enough that if it is known, is should be in the peer's block tree 385 return fp.nodeByHash[hash] != nil 386 } 387 f.chain.LockChain() 388 defer f.chain.UnlockChain() 389 // if it's older than the peer's block tree root but it's in the same canonical chain 390 // as the root, we can still be sure the peer knows it 391 // 392 // when syncing, just check if it is part of the known chain, there is nothing better we 393 // can do since we do not know the most recent block hash yet 394 return rawdb.ReadCanonicalHash(f.handler.backend.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.handler.backend.chainDb, number) == hash 395 } 396 397 // requestAmount calculates the amount of headers to be downloaded starting 398 // from a certain head backwards 399 func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 { 400 amount := uint64(0) 401 nn := n 402 for nn != nil && !f.checkKnownNode(p, nn) { 403 nn = nn.parent 404 amount++ 405 } 406 if nn == nil { 407 amount = n.number 408 } 409 return amount 410 } 411 412 // requestedID tells if a certain reqID has been requested by the fetcher 413 func (f *lightFetcher) requestedID(reqID uint64) bool { 414 f.reqMu.RLock() 415 _, ok := f.requested[reqID] 416 f.reqMu.RUnlock() 417 return ok 418 } 419 420 // nextRequest selects the peer and announced head to be requested next, amount 421 // to be downloaded starting from the head backwards is also returned 422 func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { 423 var ( 424 bestHash common.Hash 425 bestAmount uint64 426 bestNumber *big.Int 427 bestSyncing bool 428 ) 429 bestHash, bestAmount, bestNumber, bestSyncing = f.findBestRequest() 430 431 if bestNumber == f.maxConfirmedNumber { 432 return nil, 0, false 433 } 434 435 var rq *distReq 436 reqID := genReqID() 437 if bestSyncing { 438 rq = f.newFetcherDistReqForSync(bestHash) 439 } else { 440 rq = f.newFetcherDistReq(bestHash, reqID, bestAmount) 441 } 442 return rq, reqID, bestSyncing 443 } 444 445 // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer. 446 // It also returns the announced Td (which should be verified after fetching the head), 447 // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request. 448 func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestNumber *big.Int, bestSyncing bool) { 449 bestNumber = f.maxConfirmedNumber 450 bestSyncing = false 451 452 for p, fp := range f.peers { 453 for hash, n := range fp.nodeByHash { 454 if f.checkKnownNode(p, n) || n.requested { 455 continue 456 } 457 // if ulc mode is disabled, isTrustedHash returns true 458 amount := f.requestAmount(p, n) 459 if (bestNumber == nil || n.number > bestNumber.Uint64() || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedNumber.Uint64() == 0) { 460 bestHash = hash 461 bestNumber = big.NewInt(int64(n.number)) 462 bestAmount = amount 463 bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) 464 } 465 } 466 } 467 return 468 } 469 470 // isTrustedHash checks if the block can be trusted by the minimum trusted fraction. 471 func (f *lightFetcher) isTrustedHash(hash common.Hash) bool { 472 // If ultra light cliet mode is disabled, trust all hashes 473 if f.handler.ulc == nil { 474 return true 475 } 476 // Ultra light enabled, only trust after enough confirmations 477 var agreed int 478 for peer, info := range f.peers { 479 if peer.trusted && info.nodeByHash[hash] != nil { 480 agreed++ 481 } 482 } 483 return 100*agreed/len(f.handler.ulc.keys) >= f.handler.ulc.fraction 484 } 485 486 func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq { 487 return &distReq{ 488 getCost: func(dp distPeer) uint64 { 489 return 0 490 }, 491 canSend: func(dp distPeer) bool { 492 p := dp.(*peer) 493 f.lock.Lock() 494 defer f.lock.Unlock() 495 496 if p.onlyAnnounce { 497 return false 498 } 499 fp := f.peers[p] 500 return fp != nil && fp.nodeByHash[bestHash] != nil 501 }, 502 request: func(dp distPeer) func() { 503 if f.handler.ulc != nil { 504 // Keep last trusted header before sync 505 f.setLastTrustedHeader(f.chain.CurrentHeader()) 506 } 507 go func() { 508 p := dp.(*peer) 509 p.Log().Debug("Synchronisation started") 510 f.handler.synchronise(p) 511 f.syncDone <- p 512 }() 513 return nil 514 }, 515 } 516 } 517 518 // newFetcherDistReq creates a new request for the distributor. 519 func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq { 520 return &distReq{ 521 getCost: func(dp distPeer) uint64 { 522 p := dp.(*peer) 523 return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 524 }, 525 canSend: func(dp distPeer) bool { 526 p := dp.(*peer) 527 f.lock.Lock() 528 defer f.lock.Unlock() 529 530 if p.onlyAnnounce { 531 return false 532 } 533 fp := f.peers[p] 534 if fp == nil { 535 return false 536 } 537 n := fp.nodeByHash[bestHash] 538 return n != nil && !n.requested 539 }, 540 request: func(dp distPeer) func() { 541 p := dp.(*peer) 542 f.lock.Lock() 543 fp := f.peers[p] 544 if fp != nil { 545 n := fp.nodeByHash[bestHash] 546 if n != nil { 547 n.requested = true 548 } 549 } 550 f.lock.Unlock() 551 552 cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount)) 553 p.fcServer.QueuedRequest(reqID, cost) 554 f.reqMu.Lock() 555 f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()} 556 f.reqMu.Unlock() 557 go func() { 558 time.Sleep(hardRequestTimeout) 559 f.timeoutChn <- reqID 560 }() 561 return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } 562 }, 563 } 564 } 565 566 // deliverHeaders delivers header download request responses for processing 567 func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) { 568 f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer} 569 } 570 571 // processResponse processes header download request responses, returns true if successful 572 func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { 573 if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { 574 req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash) 575 return false 576 } 577 headers := make([]*types.Header, req.amount) 578 for i, header := range resp.headers { 579 headers[int(req.amount)-1-i] = header 580 } 581 582 if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { 583 if err == consensus.ErrFutureBlock { 584 return true 585 } 586 log.Debug("Failed to insert header chain", "err", err) 587 return false 588 } 589 tds := make([]*big.Int, len(headers)) 590 for i, header := range headers { 591 td := header.Number 592 if td == nil { 593 log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash()) 594 return false 595 } 596 tds[i] = td 597 } 598 f.newHeaders(headers, tds) 599 return true 600 } 601 602 // newHeaders updates the block trees of all active peers according to a newly 603 // downloaded and validated batch or headers 604 func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) { 605 var maxTd *big.Int 606 607 for p, fp := range f.peers { 608 if !f.checkAnnouncedHeaders(fp, headers, tds) { 609 p.Log().Debug("Inconsistent announcement") 610 go f.handler.removePeer(p.id) 611 } 612 if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { 613 maxTd = fp.confirmedTd 614 } 615 } 616 617 if maxTd != nil { 618 f.updateMaxConfirmedTd(maxTd) 619 } 620 } 621 622 // checkAnnouncedHeaders updates peer's block tree if necessary after validating 623 // a batch of headers. It searches for the latest header in the batch that has a 624 // matching tree node (if any), and if it has not been marked as known already, 625 // sets it and its parents to known (even those which are older than the currently 626 // validated ones). Return value shows if all hashes, numbers and Tds matched 627 // correctly to the announced values (otherwise the peer should be dropped). 628 func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool { 629 var ( 630 n *fetcherTreeNode 631 header *types.Header 632 td *big.Int 633 ) 634 635 for i := len(headers) - 1; ; i-- { 636 if i < 0 { 637 if n == nil { 638 // no more headers and nothing to match 639 return true 640 } 641 // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching 642 td = new(big.Int).SetUint64(header.Number.Uint64() - 1) 643 header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) 644 if header == nil { 645 log.Error("Missing parent of validated header", "hash", header.ParentHash, "number", td) 646 return false 647 } 648 } else { 649 header = headers[i] 650 td = tds[i] 651 } 652 hash := header.Hash() 653 number := header.Number.Uint64() 654 if n == nil { 655 n = fp.nodeByHash[hash] 656 } 657 if n != nil { 658 // node was unannounced 659 if nn := fp.nodeByHash[hash]; nn != nil { 660 // if there was already a node with the same hash, continue there and drop this one 661 nn.children = append(nn.children, n.children...) 662 n.children = nil 663 fp.deleteNode(n) 664 n = nn 665 } else { 666 n.hash = hash 667 n.number = number 668 fp.nodeByHash[hash] = n 669 } 670 // check if it matches the header 671 if n.hash != hash || n.number != number || n.number != td.Uint64() { 672 // peer has previously made an invalid announcement 673 return false 674 } 675 if n.known { 676 // we reached a known node that matched our expectations, return with success 677 return true 678 } 679 n.known = true 680 if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 { 681 fp.confirmedTd = td 682 fp.bestConfirmed = n 683 } 684 n = n.parent 685 if n == nil { 686 return true 687 } 688 } 689 } 690 } 691 692 // checkSyncedHeaders updates peer's block tree after synchronisation by marking 693 // downloaded headers as known. If none of the announced headers are found after 694 // syncing, the peer is dropped. 695 func (f *lightFetcher) checkSyncedHeaders(p *peer) { 696 fp := f.peers[p] 697 if fp == nil { 698 p.Log().Debug("Unknown peer to check sync headers") 699 return 700 } 701 var ( 702 node = fp.lastAnnounced 703 td *big.Int 704 ) 705 if f.handler.ulc != nil { 706 // Roll back untrusted blocks 707 h, unapproved := f.lastTrustedTreeNode(p) 708 f.chain.Rollback(unapproved) 709 node = fp.nodeByHash[h.Hash()] 710 } 711 // Find last valid block 712 for node != nil { 713 if td = big.NewInt(int64(node.number)); td != nil { 714 break 715 } 716 node = node.parent 717 } 718 // Now node is the latest downloaded/approved header after syncing 719 if node == nil { 720 p.Log().Debug("Synchronisation failed") 721 go f.handler.removePeer(p.id) 722 return 723 } 724 header := f.chain.GetHeader(node.hash, node.number) 725 f.newHeaders([]*types.Header{header}, []*big.Int{td}) 726 } 727 728 // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes 729 func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) { 730 unapprovedHashes := make([]common.Hash, 0) 731 current := f.chain.CurrentHeader() 732 733 if f.lastTrustedHeader == nil { 734 return current, unapprovedHashes 735 } 736 737 canonical := f.chain.CurrentHeader() 738 if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() { 739 canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64()) 740 } 741 commonAncestor := rawdb.FindCommonAncestor(f.handler.backend.chainDb, canonical, f.lastTrustedHeader) 742 if commonAncestor == nil { 743 log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash()) 744 return current, unapprovedHashes 745 } 746 747 for current.Hash() == commonAncestor.Hash() { 748 if f.isTrustedHash(current.Hash()) { 749 break 750 } 751 unapprovedHashes = append(unapprovedHashes, current.Hash()) 752 current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1) 753 } 754 return current, unapprovedHashes 755 } 756 757 func (f *lightFetcher) setLastTrustedHeader(h *types.Header) { 758 f.lock.Lock() 759 defer f.lock.Unlock() 760 f.lastTrustedHeader = h 761 } 762 763 // checkKnownNode checks if a block tree node is known (downloaded and validated) 764 // If it was not known previously but found in the database, sets its known flag 765 func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool { 766 if n.known { 767 return true 768 } 769 td := big.NewInt(int64(n.number)) 770 if td == nil { 771 return false 772 } 773 header := f.chain.GetHeader(n.hash, n.number) 774 // check the availability of both header and td because reads are not protected by chain db mutex 775 // Note: returning false is always safe here 776 if header == nil { 777 return false 778 } 779 780 fp := f.peers[p] 781 if fp == nil { 782 p.Log().Debug("Unknown peer to check known nodes") 783 return false 784 } 785 if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { 786 p.Log().Debug("Inconsistent announcement") 787 go f.handler.removePeer(p.id) 788 } 789 if fp.confirmedTd != nil { 790 f.updateMaxConfirmedTd(fp.confirmedTd) 791 } 792 return n.known 793 } 794 795 // deleteNode deletes a node and its child subtrees from a peer's block tree 796 func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) { 797 if n.parent != nil { 798 for i, nn := range n.parent.children { 799 if nn == n { 800 n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...) 801 break 802 } 803 } 804 } 805 for { 806 delete(fp.nodeByHash, n.hash) 807 fp.nodeCnt-- 808 if len(n.children) == 0 { 809 return 810 } 811 for i, nn := range n.children { 812 if i == 0 { 813 n = nn 814 } else { 815 fp.deleteNode(nn) 816 } 817 } 818 } 819 } 820 821 // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td 822 // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values 823 // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated 824 // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head 825 // and it has also been downloaded from any peer, either before or after the given announcement). 826 // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer, 827 // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed 828 // the current global head). 829 type updateStatsEntry struct { 830 time mclock.AbsTime 831 td *big.Int 832 next *updateStatsEntry 833 } 834 835 // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed, 836 // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have 837 // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics. 838 // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a 839 // positive block delay value. 840 func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { 841 if f.maxConfirmedNumber == nil || td.Cmp(f.maxConfirmedNumber) > 0 { 842 f.maxConfirmedNumber = td 843 newEntry := &updateStatsEntry{ 844 time: mclock.Now(), 845 td: td, 846 } 847 if f.lastUpdateStats != nil { 848 f.lastUpdateStats.next = newEntry 849 } 850 851 f.lastUpdateStats = newEntry 852 for p := range f.peers { 853 f.checkUpdateStats(p, newEntry) 854 } 855 } 856 } 857 858 // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it 859 // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the 860 // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed, 861 // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry 862 // items are removed from the head of the linked list. 863 // If a new entry has been added to the global tail, it is passed as a parameter here even though this function 864 // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil), 865 // it can set the new head to newEntry. 866 func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) { 867 now := mclock.Now() 868 fp := f.peers[p] 869 if fp == nil { 870 p.Log().Debug("Unknown peer to check update stats") 871 return 872 } 873 874 if newEntry != nil && fp.firstUpdateStats == nil { 875 fp.firstUpdateStats = newEntry 876 } 877 for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) { 878 f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout) 879 fp.firstUpdateStats = fp.firstUpdateStats.next 880 } 881 if fp.confirmedTd != nil { 882 for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 { 883 f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time)) 884 fp.firstUpdateStats = fp.firstUpdateStats.next 885 } 886 } 887 }