github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/les/fetcher.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package les implements the Light Ethereum Subprotocol.
    18  package les
    19  
    20  import (
    21  	"fmt"
    22  	"math/big"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/common/mclock"
    28  	"github.com/ethereum/go-ethereum/consensus"
    29  	"github.com/ethereum/go-ethereum/core/rawdb"
    30  	"github.com/ethereum/go-ethereum/core/types"
    31  	"github.com/ethereum/go-ethereum/light"
    32  	"github.com/ethereum/go-ethereum/log"
    33  )
    34  
    35  const (
    36  	blockDelayTimeout    = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
    37  	maxNodeCount         = 20               // maximum number of fetcherTreeNode entries remembered for each peer
    38  	serverStateAvailable = 100              // number of recent blocks where state availability is assumed
    39  )
    40  
    41  // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
    42  // ODR system to ensure that we only request data related to a certain block from peers who have already processed
    43  // and announced that block.
    44  type lightFetcher struct {
    45  	pm    *ProtocolManager
    46  	odr   *LesOdr
    47  	chain *light.LightChain
    48  
    49  	lock            sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
    50  	maxConfirmedTd  *big.Int
    51  	peers           map[*peer]*fetcherPeerInfo
    52  	lastUpdateStats *updateStatsEntry
    53  	syncing         bool
    54  	syncDone        chan *peer
    55  
    56  	reqMu      sync.RWMutex // reqMu protects access to sent header fetch requests
    57  	requested  map[uint64]fetchRequest
    58  	deliverChn chan fetchResponse
    59  	timeoutChn chan uint64
    60  	requestChn chan bool // true if initiated from outside
    61  }
    62  
    63  // fetcherPeerInfo holds fetcher-specific information about each active peer
    64  type fetcherPeerInfo struct {
    65  	root, lastAnnounced *fetcherTreeNode
    66  	nodeCnt             int
    67  	confirmedTd         *big.Int
    68  	bestConfirmed       *fetcherTreeNode
    69  	nodeByHash          map[common.Hash]*fetcherTreeNode
    70  	firstUpdateStats    *updateStatsEntry
    71  }
    72  
    73  // fetcherTreeNode is a node of a tree that holds information about blocks recently
    74  // announced and confirmed by a certain peer. Each new announce message from a peer
    75  // adds nodes to the tree, based on the previous announced head and the reorg depth.
    76  // There are three possible states for a tree node:
    77  // - announced: not downloaded (known) yet, but we know its head, number and td
    78  // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
    79  // - known: both announced by this peer and downloaded (from any peer).
    80  // This structure makes it possible to always know which peer has a certain block,
    81  // which is necessary for selecting a suitable peer for ODR requests and also for
    82  // canonizing new heads. It also helps to always download the minimum necessary
    83  // amount of headers with a single request.
    84  type fetcherTreeNode struct {
    85  	hash             common.Hash
    86  	number           uint64
    87  	td               *big.Int
    88  	known, requested bool
    89  	parent           *fetcherTreeNode
    90  	children         []*fetcherTreeNode
    91  }
    92  
    93  // fetchRequest represents a header download request
    94  type fetchRequest struct {
    95  	hash    common.Hash
    96  	amount  uint64
    97  	peer    *peer
    98  	sent    mclock.AbsTime
    99  	timeout bool
   100  }
   101  
   102  // fetchResponse represents a header download response
   103  type fetchResponse struct {
   104  	reqID   uint64
   105  	headers []*types.Header
   106  	peer    *peer
   107  }
   108  
   109  // newLightFetcher creates a new light fetcher
   110  func newLightFetcher(pm *ProtocolManager) *lightFetcher {
   111  	f := &lightFetcher{
   112  		pm:             pm,
   113  		chain:          pm.blockchain.(*light.LightChain),
   114  		odr:            pm.odr,
   115  		peers:          make(map[*peer]*fetcherPeerInfo),
   116  		deliverChn:     make(chan fetchResponse, 100),
   117  		requested:      make(map[uint64]fetchRequest),
   118  		timeoutChn:     make(chan uint64),
   119  		requestChn:     make(chan bool, 100),
   120  		syncDone:       make(chan *peer),
   121  		maxConfirmedTd: big.NewInt(0),
   122  	}
   123  	pm.peers.notify(f)
   124  
   125  	f.pm.wg.Add(1)
   126  	go f.syncLoop()
   127  	return f
   128  }
   129  
   130  // syncLoop is the main event loop of the light fetcher
   131  func (f *lightFetcher) syncLoop() {
   132  	requesting := false
   133  	defer f.pm.wg.Done()
   134  	for {
   135  		select {
   136  		case <-f.pm.quitSync:
   137  			return
   138  		// when a new announce is received, request loop keeps running until
   139  		// no further requests are necessary or possible
   140  		case newAnnounce := <-f.requestChn:
   141  			f.lock.Lock()
   142  			s := requesting
   143  			requesting = false
   144  			var (
   145  				rq      *distReq
   146  				reqID   uint64
   147  				syncing bool
   148  			)
   149  			if !f.syncing && !(newAnnounce && s) {
   150  				rq, reqID, syncing = f.nextRequest()
   151  			}
   152  			f.lock.Unlock()
   153  
   154  			if rq != nil {
   155  				requesting = true
   156  				if _, ok := <-f.pm.reqDist.queue(rq); ok {
   157  					if syncing {
   158  						f.lock.Lock()
   159  						f.syncing = true
   160  						f.lock.Unlock()
   161  					} else {
   162  						go func() {
   163  							time.Sleep(softRequestTimeout)
   164  							f.reqMu.Lock()
   165  							req, ok := f.requested[reqID]
   166  							if ok {
   167  								req.timeout = true
   168  								f.requested[reqID] = req
   169  							}
   170  							f.reqMu.Unlock()
   171  							// keep starting new requests while possible
   172  							f.requestChn <- false
   173  						}()
   174  					}
   175  				} else {
   176  					f.requestChn <- false
   177  				}
   178  			}
   179  		case reqID := <-f.timeoutChn:
   180  			f.reqMu.Lock()
   181  			req, ok := f.requested[reqID]
   182  			if ok {
   183  				delete(f.requested, reqID)
   184  			}
   185  			f.reqMu.Unlock()
   186  			if ok {
   187  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
   188  				req.peer.Log().Debug("Fetching data timed out hard")
   189  				go f.pm.removePeer(req.peer.id)
   190  			}
   191  		case resp := <-f.deliverChn:
   192  			f.reqMu.Lock()
   193  			req, ok := f.requested[resp.reqID]
   194  			if ok && req.peer != resp.peer {
   195  				ok = false
   196  			}
   197  			if ok {
   198  				delete(f.requested, resp.reqID)
   199  			}
   200  			f.reqMu.Unlock()
   201  			if ok {
   202  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
   203  			}
   204  			f.lock.Lock()
   205  			if !ok || !(f.syncing || f.processResponse(req, resp)) {
   206  				resp.peer.Log().Debug("Failed processing response")
   207  				go f.pm.removePeer(resp.peer.id)
   208  			}
   209  			f.lock.Unlock()
   210  		case p := <-f.syncDone:
   211  			f.lock.Lock()
   212  			p.Log().Debug("Done synchronising with peer")
   213  			f.checkSyncedHeaders(p)
   214  			f.syncing = false
   215  			f.lock.Unlock()
   216  			f.requestChn <- false
   217  		}
   218  	}
   219  }
   220  
   221  // registerPeer adds a new peer to the fetcher's peer set
   222  func (f *lightFetcher) registerPeer(p *peer) {
   223  	p.lock.Lock()
   224  	p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
   225  		return f.peerHasBlock(p, hash, number, hasState)
   226  	}
   227  	p.lock.Unlock()
   228  
   229  	f.lock.Lock()
   230  	defer f.lock.Unlock()
   231  
   232  	f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
   233  }
   234  
   235  // unregisterPeer removes a new peer from the fetcher's peer set
   236  func (f *lightFetcher) unregisterPeer(p *peer) {
   237  	p.lock.Lock()
   238  	p.hasBlock = nil
   239  	p.lock.Unlock()
   240  
   241  	f.lock.Lock()
   242  	defer f.lock.Unlock()
   243  
   244  	// check for potential timed out block delay statistics
   245  	f.checkUpdateStats(p, nil)
   246  	delete(f.peers, p)
   247  }
   248  
   249  // announce processes a new announcement message received from a peer, adding new
   250  // nodes to the peer's block tree and removing old nodes if necessary
   251  func (f *lightFetcher) announce(p *peer, head *announceData) {
   252  	f.lock.Lock()
   253  	defer f.lock.Unlock()
   254  	p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
   255  
   256  	fp := f.peers[p]
   257  	if fp == nil {
   258  		p.Log().Debug("Announcement from unknown peer")
   259  		return
   260  	}
   261  
   262  	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
   263  		// announced tds should be strictly monotonic
   264  		p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
   265  		go f.pm.removePeer(p.id)
   266  		return
   267  	}
   268  
   269  	n := fp.lastAnnounced
   270  	for i := uint64(0); i < head.ReorgDepth; i++ {
   271  		if n == nil {
   272  			break
   273  		}
   274  		n = n.parent
   275  	}
   276  	// n is now the reorg common ancestor, add a new branch of nodes
   277  	if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
   278  		// if announced head block height is lower or same as n or too far from it to add
   279  		// intermediate nodes then discard previous announcement info and trigger a resync
   280  		n = nil
   281  		fp.nodeCnt = 0
   282  		fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
   283  	}
   284  	if n != nil {
   285  		// check if the node count is too high to add new nodes, discard oldest ones if necessary
   286  		locked := false
   287  		for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
   288  			if !locked {
   289  				f.chain.LockChain()
   290  				defer f.chain.UnlockChain()
   291  				locked = true
   292  			}
   293  			// if one of root's children is canonical, keep it, delete other branches and root itself
   294  			var newRoot *fetcherTreeNode
   295  			for i, nn := range fp.root.children {
   296  				if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
   297  					fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
   298  					nn.parent = nil
   299  					newRoot = nn
   300  					break
   301  				}
   302  			}
   303  			fp.deleteNode(fp.root)
   304  			if n == fp.root {
   305  				n = newRoot
   306  			}
   307  			fp.root = newRoot
   308  			if newRoot == nil || !f.checkKnownNode(p, newRoot) {
   309  				fp.bestConfirmed = nil
   310  				fp.confirmedTd = nil
   311  			}
   312  
   313  			if n == nil {
   314  				break
   315  			}
   316  		}
   317  		if n != nil {
   318  			for n.number < head.Number {
   319  				nn := &fetcherTreeNode{number: n.number + 1, parent: n}
   320  				n.children = append(n.children, nn)
   321  				n = nn
   322  				fp.nodeCnt++
   323  			}
   324  			n.hash = head.Hash
   325  			n.td = head.Td
   326  			fp.nodeByHash[n.hash] = n
   327  		}
   328  	}
   329  	if n == nil {
   330  		// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
   331  		if fp.root != nil {
   332  			fp.deleteNode(fp.root)
   333  		}
   334  		n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
   335  		fp.root = n
   336  		fp.nodeCnt++
   337  		fp.nodeByHash[n.hash] = n
   338  		fp.bestConfirmed = nil
   339  		fp.confirmedTd = nil
   340  	}
   341  
   342  	f.checkKnownNode(p, n)
   343  	p.lock.Lock()
   344  	p.headInfo = head
   345  	fp.lastAnnounced = n
   346  	p.lock.Unlock()
   347  	f.checkUpdateStats(p, nil)
   348  	f.requestChn <- true
   349  }
   350  
   351  // peerHasBlock returns true if we can assume the peer knows the given block
   352  // based on its announcements
   353  func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {
   354  	f.lock.Lock()
   355  	defer f.lock.Unlock()
   356  
   357  	fp := f.peers[p]
   358  	if fp == nil || fp.root == nil {
   359  		return false
   360  	}
   361  
   362  	if hasState {
   363  		if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
   364  			return false
   365  		}
   366  	}
   367  
   368  	if f.syncing {
   369  		// always return true when syncing
   370  		// false positives are acceptable, a more sophisticated condition can be implemented later
   371  		return true
   372  	}
   373  
   374  	if number >= fp.root.number {
   375  		// it is recent enough that if it is known, is should be in the peer's block tree
   376  		return fp.nodeByHash[hash] != nil
   377  	}
   378  	f.chain.LockChain()
   379  	defer f.chain.UnlockChain()
   380  	// if it's older than the peer's block tree root but it's in the same canonical chain
   381  	// as the root, we can still be sure the peer knows it
   382  	//
   383  	// when syncing, just check if it is part of the known chain, there is nothing better we
   384  	// can do since we do not know the most recent block hash yet
   385  	return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash
   386  }
   387  
   388  // requestAmount calculates the amount of headers to be downloaded starting
   389  // from a certain head backwards
   390  func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
   391  	amount := uint64(0)
   392  	nn := n
   393  	for nn != nil && !f.checkKnownNode(p, nn) {
   394  		nn = nn.parent
   395  		amount++
   396  	}
   397  	if nn == nil {
   398  		amount = n.number
   399  	}
   400  	return amount
   401  }
   402  
   403  // requestedID tells if a certain reqID has been requested by the fetcher
   404  func (f *lightFetcher) requestedID(reqID uint64) bool {
   405  	f.reqMu.RLock()
   406  	_, ok := f.requested[reqID]
   407  	f.reqMu.RUnlock()
   408  	return ok
   409  }
   410  
   411  // nextRequest selects the peer and announced head to be requested next, amount
   412  // to be downloaded starting from the head backwards is also returned
   413  func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
   414  	var (
   415  		bestHash   common.Hash
   416  		bestAmount uint64
   417  	)
   418  	bestTd := f.maxConfirmedTd
   419  	bestSyncing := false
   420  
   421  	for p, fp := range f.peers {
   422  		for hash, n := range fp.nodeByHash {
   423  			if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) {
   424  				amount := f.requestAmount(p, n)
   425  				if bestTd == nil || n.td.Cmp(bestTd) > 0 || (amount < bestAmount && f.pm.downloader.Mode.SyncFullHeaderChain()) {
   426  					bestHash = hash
   427  					bestAmount = amount
   428  					bestTd = n.td
   429  					bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
   430  				}
   431  			}
   432  		}
   433  	}
   434  	if bestTd == f.maxConfirmedTd {
   435  		return nil, 0, false
   436  	} else {
   437  		log.Trace("nextRequest", "bestTd", bestTd, "f.maxConfirmedTd", f.maxConfirmedTd)
   438  	}
   439  
   440  	var rq *distReq
   441  	reqID := genReqID()
   442  	if bestSyncing {
   443  		rq = &distReq{
   444  			getCost: func(dp distPeer) uint64 {
   445  				return 0
   446  			},
   447  			canSend: func(dp distPeer) bool {
   448  				p := dp.(*peer)
   449  				f.lock.Lock()
   450  				defer f.lock.Unlock()
   451  
   452  				fp := f.peers[p]
   453  				return fp != nil && fp.nodeByHash[bestHash] != nil
   454  			},
   455  			request: func(dp distPeer) func() {
   456  				go func() {
   457  					p := dp.(*peer)
   458  					p.Log().Debug("Synchronisation started")
   459  					f.pm.synchronise(p, f.pm.downloader.Mode)
   460  					f.syncDone <- p
   461  				}()
   462  				return nil
   463  			},
   464  		}
   465  	} else {
   466  		rq = &distReq{
   467  			getCost: func(dp distPeer) uint64 {
   468  				p := dp.(*peer)
   469  				return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   470  			},
   471  			canSend: func(dp distPeer) bool {
   472  				p := dp.(*peer)
   473  				f.lock.Lock()
   474  				defer f.lock.Unlock()
   475  
   476  				fp := f.peers[p]
   477  				if fp == nil {
   478  					return false
   479  				}
   480  				n := fp.nodeByHash[bestHash]
   481  				return n != nil && !n.requested
   482  			},
   483  			request: func(dp distPeer) func() {
   484  				p := dp.(*peer)
   485  				f.lock.Lock()
   486  				fp := f.peers[p]
   487  				if fp != nil {
   488  					n := fp.nodeByHash[bestHash]
   489  					if n != nil {
   490  						n.requested = true
   491  					}
   492  				}
   493  				f.lock.Unlock()
   494  
   495  				cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   496  				p.fcServer.QueueRequest(reqID, cost)
   497  				f.reqMu.Lock()
   498  				f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
   499  				f.reqMu.Unlock()
   500  				go func() {
   501  					time.Sleep(hardRequestTimeout)
   502  					f.timeoutChn <- reqID
   503  				}()
   504  				return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
   505  			},
   506  		}
   507  	}
   508  	return rq, reqID, bestSyncing
   509  }
   510  
   511  // deliverHeaders delivers header download request responses for processing
   512  func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
   513  	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
   514  }
   515  
   516  // processResponse processes header download request responses, returns true if successful
   517  func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
   518  	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
   519  		req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
   520  		return false
   521  	}
   522  	headers := make([]*types.Header, req.amount)
   523  	for i, header := range resp.headers {
   524  		headers[int(req.amount)-1-i] = header
   525  	}
   526  	if _, err := f.chain.InsertHeaderChain(headers, 1, f.pm.downloader.Mode.SyncFullHeaderChain()); err != nil {
   527  		if err == consensus.ErrFutureBlock {
   528  			return true
   529  		}
   530  		log.Debug("Failed to insert header chain", "err", err)
   531  		return false
   532  	}
   533  	tds := make([]*big.Int, len(headers))
   534  	for i, header := range headers {
   535  		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
   536  		if td == nil {
   537  			log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
   538  			return false
   539  		}
   540  		tds[i] = td
   541  	}
   542  	f.newHeaders(headers, tds)
   543  	return true
   544  }
   545  
   546  // newHeaders updates the block trees of all active peers according to a newly
   547  // downloaded and validated batch or headers
   548  func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
   549  	var maxTd *big.Int
   550  	for p, fp := range f.peers {
   551  		if !f.checkAnnouncedHeaders(fp, headers, tds) {
   552  			p.Log().Debug("Inconsistent announcement")
   553  			go f.pm.removePeer(p.id)
   554  		}
   555  		log.Trace("newHeaders", "fp.confirmedTd", fp.confirmedTd, "maxTd", maxTd)
   556  		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
   557  			maxTd = fp.confirmedTd
   558  		}
   559  	}
   560  	if maxTd != nil {
   561  		f.updateMaxConfirmedTd(maxTd)
   562  	} else {
   563  		log.Trace("newHeaders maxTd is nil")
   564  	}
   565  }
   566  
   567  // checkAnnouncedHeaders updates peer's block tree if necessary after validating
   568  // a batch of headers. It searches for the latest header in the batch that has a
   569  // matching tree node (if any), and if it has not been marked as known already,
   570  // sets it and its parents to known (even those which are older than the currently
   571  // validated ones). Return value shows if all hashes, numbers and Tds matched
   572  // correctly to the announced values (otherwise the peer should be dropped).
   573  func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
   574  	var (
   575  		n      *fetcherTreeNode
   576  		header *types.Header
   577  		td     *big.Int
   578  	)
   579  
   580  	for i := len(headers) - 1; ; i-- {
   581  		if i < 0 {
   582  			if n == nil {
   583  				// no more headers and nothing to match
   584  				return true
   585  			}
   586  			// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
   587  			hash, number := header.ParentHash, header.Number.Uint64()-1
   588  			td = f.chain.GetTd(hash, number)
   589  			header = f.chain.GetHeader(hash, number)
   590  			if header == nil || td == nil {
   591  				log.Error("Missing parent of validated header", "hash", hash, "number", number)
   592  				return false
   593  			}
   594  		} else {
   595  			header = headers[i]
   596  			td = tds[i]
   597  		}
   598  		log.Trace(fmt.Sprintf("checkAnnouncedHeaders/header %d/%d is %v", i, len(headers), header.Number))
   599  		hash := header.Hash()
   600  		number := header.Number.Uint64()
   601  		if n == nil {
   602  			n = fp.nodeByHash[hash]
   603  		}
   604  		if n != nil {
   605  			if n.td == nil {
   606  				// node was unannounced
   607  				if nn := fp.nodeByHash[hash]; nn != nil {
   608  					// if there was already a node with the same hash, continue there and drop this one
   609  					nn.children = append(nn.children, n.children...)
   610  					n.children = nil
   611  					fp.deleteNode(n)
   612  					n = nn
   613  				} else {
   614  					n.hash = hash
   615  					n.td = td
   616  					log.Debug("checkAnnouncedHeaders setting n.td", "n.td", n.td)
   617  					fp.nodeByHash[hash] = n
   618  				}
   619  			}
   620  			// check if it matches the header
   621  			if n.hash != hash || n.number != number || (n.td.Cmp(td) != 0) {
   622  				// peer has previously made an invalid announcement
   623  				log.Trace("checkAnnouncedHeaders", "hash", hash, "number", number, "td", td, "n.hash", n.hash, "n.number", n.number, "n.td", n.td)
   624  				return false
   625  			}
   626  			if n.known {
   627  				// we reached a known node that matched our expectations, return with success
   628  				return true
   629  			}
   630  			n.known = true
   631  			if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
   632  				fp.confirmedTd = td
   633  				fp.bestConfirmed = n
   634  			}
   635  			n = n.parent
   636  			if n == nil {
   637  				return true
   638  			}
   639  		}
   640  	}
   641  }
   642  
   643  // checkSyncedHeaders updates peer's block tree after synchronisation by marking
   644  // downloaded headers as known. If none of the announced headers are found after
   645  // syncing, the peer is dropped.
   646  func (f *lightFetcher) checkSyncedHeaders(p *peer) {
   647  	fp := f.peers[p]
   648  	if fp == nil {
   649  		p.Log().Debug("Unknown peer to check sync headers")
   650  		return
   651  	}
   652  	n := fp.lastAnnounced
   653  	var td *big.Int
   654  
   655  	log.Debug(fmt.Sprintf("Last announced block is %v", n.number))
   656  	// Check for the latest announced header in our chain. If it is present, then move on, otherwise,
   657  	// move through the parents until we find a header that we have downloaded.
   658  	for n != nil {
   659  		if td = f.chain.GetTd(n.hash, n.number); td != nil {
   660  			break
   661  		}
   662  		n = n.parent
   663  	}
   664  	// Now n is the latest announced block by this peer that exists in our chain.
   665  	if n == nil {
   666  		p.Log().Debug("Synchronisation failed")
   667  		go f.pm.removePeer(p.id)
   668  	} else {
   669  		header := f.chain.GetHeader(n.hash, n.number)
   670  		f.newHeaders([]*types.Header{header}, []*big.Int{td})
   671  	}
   672  }
   673  
   674  // checkKnownNode checks if a block tree node is known (downloaded and validated)
   675  // If it was not known previously but found in the database, sets its known flag
   676  func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
   677  	if n.known {
   678  		return true
   679  	}
   680  	td := f.chain.GetTd(n.hash, n.number)
   681  	if td == nil {
   682  		return false
   683  	}
   684  	header := f.chain.GetHeader(n.hash, n.number)
   685  	// check the availability of both header and td because reads are not protected by chain db mutex
   686  	// Note: returning false is always safe here
   687  	if header == nil {
   688  		return false
   689  	}
   690  
   691  	fp := f.peers[p]
   692  	if fp == nil {
   693  		p.Log().Debug("Unknown peer to check known nodes")
   694  		return false
   695  	}
   696  	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
   697  		p.Log().Debug("Inconsistent announcement")
   698  		go f.pm.removePeer(p.id)
   699  	}
   700  	if fp.confirmedTd != nil {
   701  		f.updateMaxConfirmedTd(fp.confirmedTd)
   702  	}
   703  	return n.known
   704  }
   705  
   706  // deleteNode deletes a node and its child subtrees from a peer's block tree
   707  func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
   708  	if n.parent != nil {
   709  		for i, nn := range n.parent.children {
   710  			if nn == n {
   711  				n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
   712  				break
   713  			}
   714  		}
   715  	}
   716  	for {
   717  		if n.td != nil {
   718  			delete(fp.nodeByHash, n.hash)
   719  		}
   720  		fp.nodeCnt--
   721  		if len(n.children) == 0 {
   722  			return
   723  		}
   724  		for i, nn := range n.children {
   725  			if i == 0 {
   726  				n = nn
   727  			} else {
   728  				fp.deleteNode(nn)
   729  			}
   730  		}
   731  	}
   732  }
   733  
   734  // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
   735  // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
   736  // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
   737  // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
   738  // and it has also been downloaded from any peer, either before or after the given announcement).
   739  // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
   740  // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
   741  // the current global head).
   742  type updateStatsEntry struct {
   743  	time mclock.AbsTime
   744  	td   *big.Int
   745  	next *updateStatsEntry
   746  }
   747  
   748  // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
   749  // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
   750  // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
   751  // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
   752  // positive block delay value.
   753  func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
   754  	if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
   755  		f.maxConfirmedTd = td
   756  		newEntry := &updateStatsEntry{
   757  			time: mclock.Now(),
   758  			td:   td,
   759  		}
   760  		if f.lastUpdateStats != nil {
   761  			f.lastUpdateStats.next = newEntry
   762  		}
   763  		f.lastUpdateStats = newEntry
   764  		for p := range f.peers {
   765  			f.checkUpdateStats(p, newEntry)
   766  		}
   767  	}
   768  }
   769  
   770  // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
   771  // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
   772  // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
   773  // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
   774  // items are removed from the head of the linked list.
   775  // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
   776  // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
   777  // it can set the new head to newEntry.
   778  func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
   779  	now := mclock.Now()
   780  	fp := f.peers[p]
   781  	if fp == nil {
   782  		p.Log().Debug("Unknown peer to check update stats")
   783  		return
   784  	}
   785  	if newEntry != nil && fp.firstUpdateStats == nil {
   786  		fp.firstUpdateStats = newEntry
   787  	}
   788  	for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
   789  		f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
   790  		fp.firstUpdateStats = fp.firstUpdateStats.next
   791  	}
   792  	if fp.confirmedTd != nil {
   793  		for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
   794  			f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
   795  			fp.firstUpdateStats = fp.firstUpdateStats.next
   796  		}
   797  	}
   798  }