github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/les/fetcher.go (about)

     1  // Package les implements the Light Ethereum Subprotocol.
     2  package les
     3  
     4  import (
     5  	"math/big"
     6  	"sync"
     7  	"time"
     8  
     9  	"github.com/quickchainproject/quickchain/common"
    10  	"github.com/quickchainproject/quickchain/common/mclock"
    11  	"github.com/quickchainproject/quickchain/consensus"
    12  	"github.com/quickchainproject/quickchain/core"
    13  	"github.com/quickchainproject/quickchain/core/types"
    14  	"github.com/quickchainproject/quickchain/light"
    15  	"github.com/quickchainproject/quickchain/log"
    16  )
    17  
    18  const (
    19  	blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
    20  	maxNodeCount      = 20               // maximum number of fetcherTreeNode entries remembered for each peer
    21  )
    22  
    23  // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
    24  // ODR system to ensure that we only request data related to a certain block from peers who have already processed
    25  // and announced that block.
    26  type lightFetcher struct {
    27  	pm    *ProtocolManager
    28  	odr   *LesOdr
    29  	chain *light.LightChain
    30  
    31  	lock            sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
    32  	maxConfirmedTd  *big.Int
    33  	peers           map[*peer]*fetcherPeerInfo
    34  	lastUpdateStats *updateStatsEntry
    35  	syncing         bool
    36  	syncDone        chan *peer
    37  
    38  	reqMu      sync.RWMutex // reqMu protects access to sent header fetch requests
    39  	requested  map[uint64]fetchRequest
    40  	deliverChn chan fetchResponse
    41  	timeoutChn chan uint64
    42  	requestChn chan bool // true if initiated from outside
    43  }
    44  
    45  // fetcherPeerInfo holds fetcher-specific information about each active peer
    46  type fetcherPeerInfo struct {
    47  	root, lastAnnounced *fetcherTreeNode
    48  	nodeCnt             int
    49  	confirmedTd         *big.Int
    50  	bestConfirmed       *fetcherTreeNode
    51  	nodeByHash          map[common.Hash]*fetcherTreeNode
    52  	firstUpdateStats    *updateStatsEntry
    53  }
    54  
    55  // fetcherTreeNode is a node of a tree that holds information about blocks recently
    56  // announced and confirmed by a certain peer. Each new announce message from a peer
    57  // adds nodes to the tree, based on the previous announced head and the reorg depth.
    58  // There are three possible states for a tree node:
    59  // - announced: not downloaded (known) yet, but we know its head, number and td
    60  // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
    61  // - known: both announced by this peer and downloaded (from any peer).
    62  // This structure makes it possible to always know which peer has a certain block,
    63  // which is necessary for selecting a suitable peer for ODR requests and also for
    64  // canonizing new heads. It also helps to always download the minimum necessary
    65  // amount of headers with a single request.
    66  type fetcherTreeNode struct {
    67  	hash             common.Hash
    68  	number           uint64
    69  	td               *big.Int
    70  	known, requested bool
    71  	parent           *fetcherTreeNode
    72  	children         []*fetcherTreeNode
    73  }
    74  
    75  // fetchRequest represents a header download request
    76  type fetchRequest struct {
    77  	hash    common.Hash
    78  	amount  uint64
    79  	peer    *peer
    80  	sent    mclock.AbsTime
    81  	timeout bool
    82  }
    83  
    84  // fetchResponse represents a header download response
    85  type fetchResponse struct {
    86  	reqID   uint64
    87  	headers []*types.Header
    88  	peer    *peer
    89  }
    90  
    91  // newLightFetcher creates a new light fetcher
    92  func newLightFetcher(pm *ProtocolManager) *lightFetcher {
    93  	f := &lightFetcher{
    94  		pm:             pm,
    95  		chain:          pm.blockchain.(*light.LightChain),
    96  		odr:            pm.odr,
    97  		peers:          make(map[*peer]*fetcherPeerInfo),
    98  		deliverChn:     make(chan fetchResponse, 100),
    99  		requested:      make(map[uint64]fetchRequest),
   100  		timeoutChn:     make(chan uint64),
   101  		requestChn:     make(chan bool, 100),
   102  		syncDone:       make(chan *peer),
   103  		maxConfirmedTd: big.NewInt(0),
   104  	}
   105  	pm.peers.notify(f)
   106  
   107  	f.pm.wg.Add(1)
   108  	go f.syncLoop()
   109  	return f
   110  }
   111  
   112  // syncLoop is the main event loop of the light fetcher
   113  func (f *lightFetcher) syncLoop() {
   114  	requesting := false
   115  	defer f.pm.wg.Done()
   116  	for {
   117  		select {
   118  		case <-f.pm.quitSync:
   119  			return
   120  		// when a new announce is received, request loop keeps running until
   121  		// no further requests are necessary or possible
   122  		case newAnnounce := <-f.requestChn:
   123  			f.lock.Lock()
   124  			s := requesting
   125  			requesting = false
   126  			var (
   127  				rq    *distReq
   128  				reqID uint64
   129  			)
   130  			if !f.syncing && !(newAnnounce && s) {
   131  				rq, reqID = f.nextRequest()
   132  			}
   133  			syncing := f.syncing
   134  			f.lock.Unlock()
   135  
   136  			if rq != nil {
   137  				requesting = true
   138  				_, ok := <-f.pm.reqDist.queue(rq)
   139  				if !ok {
   140  					f.requestChn <- false
   141  				}
   142  
   143  				if !syncing {
   144  					go func() {
   145  						time.Sleep(softRequestTimeout)
   146  						f.reqMu.Lock()
   147  						req, ok := f.requested[reqID]
   148  						if ok {
   149  							req.timeout = true
   150  							f.requested[reqID] = req
   151  						}
   152  						f.reqMu.Unlock()
   153  						// keep starting new requests while possible
   154  						f.requestChn <- false
   155  					}()
   156  				}
   157  			}
   158  		case reqID := <-f.timeoutChn:
   159  			f.reqMu.Lock()
   160  			req, ok := f.requested[reqID]
   161  			if ok {
   162  				delete(f.requested, reqID)
   163  			}
   164  			f.reqMu.Unlock()
   165  			if ok {
   166  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
   167  				req.peer.Log().Debug("Fetching data timed out hard")
   168  				go f.pm.removePeer(req.peer.id)
   169  			}
   170  		case resp := <-f.deliverChn:
   171  			f.reqMu.Lock()
   172  			req, ok := f.requested[resp.reqID]
   173  			if ok && req.peer != resp.peer {
   174  				ok = false
   175  			}
   176  			if ok {
   177  				delete(f.requested, resp.reqID)
   178  			}
   179  			f.reqMu.Unlock()
   180  			if ok {
   181  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
   182  			}
   183  			f.lock.Lock()
   184  			if !ok || !(f.syncing || f.processResponse(req, resp)) {
   185  				resp.peer.Log().Debug("Failed processing response")
   186  				go f.pm.removePeer(resp.peer.id)
   187  			}
   188  			f.lock.Unlock()
   189  		case p := <-f.syncDone:
   190  			f.lock.Lock()
   191  			p.Log().Debug("Done synchronising with peer")
   192  			f.checkSyncedHeaders(p)
   193  			f.syncing = false
   194  			f.lock.Unlock()
   195  		}
   196  	}
   197  }
   198  
   199  // registerPeer adds a new peer to the fetcher's peer set
   200  func (f *lightFetcher) registerPeer(p *peer) {
   201  	p.lock.Lock()
   202  	p.hasBlock = func(hash common.Hash, number uint64) bool {
   203  		return f.peerHasBlock(p, hash, number)
   204  	}
   205  	p.lock.Unlock()
   206  
   207  	f.lock.Lock()
   208  	defer f.lock.Unlock()
   209  
   210  	f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
   211  }
   212  
   213  // unregisterPeer removes a new peer from the fetcher's peer set
   214  func (f *lightFetcher) unregisterPeer(p *peer) {
   215  	p.lock.Lock()
   216  	p.hasBlock = nil
   217  	p.lock.Unlock()
   218  
   219  	f.lock.Lock()
   220  	defer f.lock.Unlock()
   221  
   222  	// check for potential timed out block delay statistics
   223  	f.checkUpdateStats(p, nil)
   224  	delete(f.peers, p)
   225  }
   226  
   227  // announce processes a new announcement message received from a peer, adding new
   228  // nodes to the peer's block tree and removing old nodes if necessary
   229  func (f *lightFetcher) announce(p *peer, head *announceData) {
   230  	f.lock.Lock()
   231  	defer f.lock.Unlock()
   232  	p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
   233  
   234  	fp := f.peers[p]
   235  	if fp == nil {
   236  		p.Log().Debug("Announcement from unknown peer")
   237  		return
   238  	}
   239  
   240  	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
   241  		// announced tds should be strictly monotonic
   242  		p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
   243  		go f.pm.removePeer(p.id)
   244  		return
   245  	}
   246  
   247  	n := fp.lastAnnounced
   248  	for i := uint64(0); i < head.ReorgDepth; i++ {
   249  		if n == nil {
   250  			break
   251  		}
   252  		n = n.parent
   253  	}
   254  	if n != nil {
   255  		// n is now the reorg common ancestor, add a new branch of nodes
   256  		// check if the node count is too high to add new nodes
   257  		locked := false
   258  		for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
   259  			if !locked {
   260  				f.chain.LockChain()
   261  				defer f.chain.UnlockChain()
   262  				locked = true
   263  			}
   264  			// if one of root's children is canonical, keep it, delete other branches and root itself
   265  			var newRoot *fetcherTreeNode
   266  			for i, nn := range fp.root.children {
   267  				if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
   268  					fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
   269  					nn.parent = nil
   270  					newRoot = nn
   271  					break
   272  				}
   273  			}
   274  			fp.deleteNode(fp.root)
   275  			if n == fp.root {
   276  				n = newRoot
   277  			}
   278  			fp.root = newRoot
   279  			if newRoot == nil || !f.checkKnownNode(p, newRoot) {
   280  				fp.bestConfirmed = nil
   281  				fp.confirmedTd = nil
   282  			}
   283  
   284  			if n == nil {
   285  				break
   286  			}
   287  		}
   288  		if n != nil {
   289  			for n.number < head.Number {
   290  				nn := &fetcherTreeNode{number: n.number + 1, parent: n}
   291  				n.children = append(n.children, nn)
   292  				n = nn
   293  				fp.nodeCnt++
   294  			}
   295  			n.hash = head.Hash
   296  			n.td = head.Td
   297  			fp.nodeByHash[n.hash] = n
   298  		}
   299  	}
   300  	if n == nil {
   301  		// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
   302  		if fp.root != nil {
   303  			fp.deleteNode(fp.root)
   304  		}
   305  		n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
   306  		fp.root = n
   307  		fp.nodeCnt++
   308  		fp.nodeByHash[n.hash] = n
   309  		fp.bestConfirmed = nil
   310  		fp.confirmedTd = nil
   311  	}
   312  
   313  	f.checkKnownNode(p, n)
   314  	p.lock.Lock()
   315  	p.headInfo = head
   316  	fp.lastAnnounced = n
   317  	p.lock.Unlock()
   318  	f.checkUpdateStats(p, nil)
   319  	f.requestChn <- true
   320  }
   321  
   322  // peerHasBlock returns true if we can assume the peer knows the given block
   323  // based on its announcements
   324  func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool {
   325  	f.lock.Lock()
   326  	defer f.lock.Unlock()
   327  
   328  	if f.syncing {
   329  		// always return true when syncing
   330  		// false positives are acceptable, a more sophisticated condition can be implemented later
   331  		return true
   332  	}
   333  
   334  	fp := f.peers[p]
   335  	if fp == nil || fp.root == nil {
   336  		return false
   337  	}
   338  
   339  	if number >= fp.root.number {
   340  		// it is recent enough that if it is known, is should be in the peer's block tree
   341  		return fp.nodeByHash[hash] != nil
   342  	}
   343  	f.chain.LockChain()
   344  	defer f.chain.UnlockChain()
   345  	// if it's older than the peer's block tree root but it's in the same canonical chain
   346  	// as the root, we can still be sure the peer knows it
   347  	//
   348  	// when syncing, just check if it is part of the known chain, there is nothing better we
   349  	// can do since we do not know the most recent block hash yet
   350  	return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash
   351  }
   352  
   353  // requestAmount calculates the amount of headers to be downloaded starting
   354  // from a certain head backwards
   355  func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
   356  	amount := uint64(0)
   357  	nn := n
   358  	for nn != nil && !f.checkKnownNode(p, nn) {
   359  		nn = nn.parent
   360  		amount++
   361  	}
   362  	if nn == nil {
   363  		amount = n.number
   364  	}
   365  	return amount
   366  }
   367  
   368  // requestedID tells if a certain reqID has been requested by the fetcher
   369  func (f *lightFetcher) requestedID(reqID uint64) bool {
   370  	f.reqMu.RLock()
   371  	_, ok := f.requested[reqID]
   372  	f.reqMu.RUnlock()
   373  	return ok
   374  }
   375  
   376  // nextRequest selects the peer and announced head to be requested next, amount
   377  // to be downloaded starting from the head backwards is also returned
   378  func (f *lightFetcher) nextRequest() (*distReq, uint64) {
   379  	var (
   380  		bestHash   common.Hash
   381  		bestAmount uint64
   382  	)
   383  	bestTd := f.maxConfirmedTd
   384  	bestSyncing := false
   385  
   386  	for p, fp := range f.peers {
   387  		for hash, n := range fp.nodeByHash {
   388  			if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) {
   389  				amount := f.requestAmount(p, n)
   390  				if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount {
   391  					bestHash = hash
   392  					bestAmount = amount
   393  					bestTd = n.td
   394  					bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
   395  				}
   396  			}
   397  		}
   398  	}
   399  	if bestTd == f.maxConfirmedTd {
   400  		return nil, 0
   401  	}
   402  
   403  	f.syncing = bestSyncing
   404  
   405  	var rq *distReq
   406  	reqID := genReqID()
   407  	if f.syncing {
   408  		rq = &distReq{
   409  			getCost: func(dp distPeer) uint64 {
   410  				return 0
   411  			},
   412  			canSend: func(dp distPeer) bool {
   413  				p := dp.(*peer)
   414  				f.lock.Lock()
   415  				defer f.lock.Unlock()
   416  
   417  				fp := f.peers[p]
   418  				return fp != nil && fp.nodeByHash[bestHash] != nil
   419  			},
   420  			request: func(dp distPeer) func() {
   421  				go func() {
   422  					p := dp.(*peer)
   423  					p.Log().Debug("Synchronisation started")
   424  					f.pm.synchronise(p)
   425  					f.syncDone <- p
   426  				}()
   427  				return nil
   428  			},
   429  		}
   430  	} else {
   431  		rq = &distReq{
   432  			getCost: func(dp distPeer) uint64 {
   433  				p := dp.(*peer)
   434  				return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   435  			},
   436  			canSend: func(dp distPeer) bool {
   437  				p := dp.(*peer)
   438  				f.lock.Lock()
   439  				defer f.lock.Unlock()
   440  
   441  				fp := f.peers[p]
   442  				if fp == nil {
   443  					return false
   444  				}
   445  				n := fp.nodeByHash[bestHash]
   446  				return n != nil && !n.requested
   447  			},
   448  			request: func(dp distPeer) func() {
   449  				p := dp.(*peer)
   450  				f.lock.Lock()
   451  				fp := f.peers[p]
   452  				if fp != nil {
   453  					n := fp.nodeByHash[bestHash]
   454  					if n != nil {
   455  						n.requested = true
   456  					}
   457  				}
   458  				f.lock.Unlock()
   459  
   460  				cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   461  				p.fcServer.QueueRequest(reqID, cost)
   462  				f.reqMu.Lock()
   463  				f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
   464  				f.reqMu.Unlock()
   465  				go func() {
   466  					time.Sleep(hardRequestTimeout)
   467  					f.timeoutChn <- reqID
   468  				}()
   469  				return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
   470  			},
   471  		}
   472  	}
   473  	return rq, reqID
   474  }
   475  
   476  // deliverHeaders delivers header download request responses for processing
   477  func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
   478  	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
   479  }
   480  
   481  // processResponse processes header download request responses, returns true if successful
   482  func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
   483  	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
   484  		req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
   485  		return false
   486  	}
   487  	headers := make([]*types.Header, req.amount)
   488  	for i, header := range resp.headers {
   489  		headers[int(req.amount)-1-i] = header
   490  	}
   491  	if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
   492  		if err == consensus.ErrFutureBlock {
   493  			return true
   494  		}
   495  		log.Debug("Failed to insert header chain", "err", err)
   496  		return false
   497  	}
   498  	tds := make([]*big.Int, len(headers))
   499  	for i, header := range headers {
   500  		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
   501  		if td == nil {
   502  			log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
   503  			return false
   504  		}
   505  		tds[i] = td
   506  	}
   507  	f.newHeaders(headers, tds)
   508  	return true
   509  }
   510  
   511  // newHeaders updates the block trees of all active peers according to a newly
   512  // downloaded and validated batch or headers
   513  func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
   514  	var maxTd *big.Int
   515  	for p, fp := range f.peers {
   516  		if !f.checkAnnouncedHeaders(fp, headers, tds) {
   517  			p.Log().Debug("Inconsistent announcement")
   518  			go f.pm.removePeer(p.id)
   519  		}
   520  		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
   521  			maxTd = fp.confirmedTd
   522  		}
   523  	}
   524  	if maxTd != nil {
   525  		f.updateMaxConfirmedTd(maxTd)
   526  	}
   527  }
   528  
   529  // checkAnnouncedHeaders updates peer's block tree if necessary after validating
   530  // a batch of headers. It searches for the latest header in the batch that has a
   531  // matching tree node (if any), and if it has not been marked as known already,
   532  // sets it and its parents to known (even those which are older than the currently
   533  // validated ones). Return value shows if all hashes, numbers and Tds matched
   534  // correctly to the announced values (otherwise the peer should be dropped).
   535  func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
   536  	var (
   537  		n      *fetcherTreeNode
   538  		header *types.Header
   539  		td     *big.Int
   540  	)
   541  
   542  	for i := len(headers) - 1; ; i-- {
   543  		if i < 0 {
   544  			if n == nil {
   545  				// no more headers and nothing to match
   546  				return true
   547  			}
   548  			// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
   549  			hash, number := header.ParentHash, header.Number.Uint64()-1
   550  			td = f.chain.GetTd(hash, number)
   551  			header = f.chain.GetHeader(hash, number)
   552  			if header == nil || td == nil {
   553  				log.Error("Missing parent of validated header", "hash", hash, "number", number)
   554  				return false
   555  			}
   556  		} else {
   557  			header = headers[i]
   558  			td = tds[i]
   559  		}
   560  		hash := header.Hash()
   561  		number := header.Number.Uint64()
   562  		if n == nil {
   563  			n = fp.nodeByHash[hash]
   564  		}
   565  		if n != nil {
   566  			if n.td == nil {
   567  				// node was unannounced
   568  				if nn := fp.nodeByHash[hash]; nn != nil {
   569  					// if there was already a node with the same hash, continue there and drop this one
   570  					nn.children = append(nn.children, n.children...)
   571  					n.children = nil
   572  					fp.deleteNode(n)
   573  					n = nn
   574  				} else {
   575  					n.hash = hash
   576  					n.td = td
   577  					fp.nodeByHash[hash] = n
   578  				}
   579  			}
   580  			// check if it matches the header
   581  			if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
   582  				// peer has previously made an invalid announcement
   583  				return false
   584  			}
   585  			if n.known {
   586  				// we reached a known node that matched our expectations, return with success
   587  				return true
   588  			}
   589  			n.known = true
   590  			if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
   591  				fp.confirmedTd = td
   592  				fp.bestConfirmed = n
   593  			}
   594  			n = n.parent
   595  			if n == nil {
   596  				return true
   597  			}
   598  		}
   599  	}
   600  }
   601  
   602  // checkSyncedHeaders updates peer's block tree after synchronisation by marking
   603  // downloaded headers as known. If none of the announced headers are found after
   604  // syncing, the peer is dropped.
   605  func (f *lightFetcher) checkSyncedHeaders(p *peer) {
   606  	fp := f.peers[p]
   607  	if fp == nil {
   608  		p.Log().Debug("Unknown peer to check sync headers")
   609  		return
   610  	}
   611  	n := fp.lastAnnounced
   612  	var td *big.Int
   613  	for n != nil {
   614  		if td = f.chain.GetTd(n.hash, n.number); td != nil {
   615  			break
   616  		}
   617  		n = n.parent
   618  	}
   619  	// now n is the latest downloaded header after syncing
   620  	if n == nil {
   621  		p.Log().Debug("Synchronisation failed")
   622  		go f.pm.removePeer(p.id)
   623  	} else {
   624  		header := f.chain.GetHeader(n.hash, n.number)
   625  		f.newHeaders([]*types.Header{header}, []*big.Int{td})
   626  	}
   627  }
   628  
   629  // checkKnownNode checks if a block tree node is known (downloaded and validated)
   630  // If it was not known previously but found in the database, sets its known flag
   631  func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
   632  	if n.known {
   633  		return true
   634  	}
   635  	td := f.chain.GetTd(n.hash, n.number)
   636  	if td == nil {
   637  		return false
   638  	}
   639  	header := f.chain.GetHeader(n.hash, n.number)
   640  	// check the availability of both header and td because reads are not protected by chain db mutex
   641  	// Note: returning false is always safe here
   642  	if header == nil {
   643  		return false
   644  	}
   645  
   646  	fp := f.peers[p]
   647  	if fp == nil {
   648  		p.Log().Debug("Unknown peer to check known nodes")
   649  		return false
   650  	}
   651  	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
   652  		p.Log().Debug("Inconsistent announcement")
   653  		go f.pm.removePeer(p.id)
   654  	}
   655  	if fp.confirmedTd != nil {
   656  		f.updateMaxConfirmedTd(fp.confirmedTd)
   657  	}
   658  	return n.known
   659  }
   660  
   661  // deleteNode deletes a node and its child subtrees from a peer's block tree
   662  func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
   663  	if n.parent != nil {
   664  		for i, nn := range n.parent.children {
   665  			if nn == n {
   666  				n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
   667  				break
   668  			}
   669  		}
   670  	}
   671  	for {
   672  		if n.td != nil {
   673  			delete(fp.nodeByHash, n.hash)
   674  		}
   675  		fp.nodeCnt--
   676  		if len(n.children) == 0 {
   677  			return
   678  		}
   679  		for i, nn := range n.children {
   680  			if i == 0 {
   681  				n = nn
   682  			} else {
   683  				fp.deleteNode(nn)
   684  			}
   685  		}
   686  	}
   687  }
   688  
   689  // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
   690  // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
   691  // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
   692  // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
   693  // and it has also been downloaded from any peer, either before or after the given announcement).
   694  // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
   695  // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
   696  // the current global head).
   697  type updateStatsEntry struct {
   698  	time mclock.AbsTime
   699  	td   *big.Int
   700  	next *updateStatsEntry
   701  }
   702  
   703  // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
   704  // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
   705  // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
   706  // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
   707  // positive block delay value.
   708  func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
   709  	if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
   710  		f.maxConfirmedTd = td
   711  		newEntry := &updateStatsEntry{
   712  			time: mclock.Now(),
   713  			td:   td,
   714  		}
   715  		if f.lastUpdateStats != nil {
   716  			f.lastUpdateStats.next = newEntry
   717  		}
   718  		f.lastUpdateStats = newEntry
   719  		for p := range f.peers {
   720  			f.checkUpdateStats(p, newEntry)
   721  		}
   722  	}
   723  }
   724  
   725  // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
   726  // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
   727  // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
   728  // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
   729  // items are removed from the head of the linked list.
   730  // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
   731  // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
   732  // it can set the new head to newEntry.
   733  func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
   734  	now := mclock.Now()
   735  	fp := f.peers[p]
   736  	if fp == nil {
   737  		p.Log().Debug("Unknown peer to check update stats")
   738  		return
   739  	}
   740  	if newEntry != nil && fp.firstUpdateStats == nil {
   741  		fp.firstUpdateStats = newEntry
   742  	}
   743  	for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
   744  		f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
   745  		fp.firstUpdateStats = fp.firstUpdateStats.next
   746  	}
   747  	if fp.confirmedTd != nil {
   748  		for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
   749  			f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
   750  			fp.firstUpdateStats = fp.firstUpdateStats.next
   751  		}
   752  	}
   753  }