github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/les/fetcher.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  // Package les implements the Light Sberex Subprotocol.
    13  package les
    14  
    15  import (
    16  	"math/big"
    17  	"sync"
    18  	"time"
    19  
    20  	"github.com/Sberex/go-sberex/common"
    21  	"github.com/Sberex/go-sberex/common/mclock"
    22  	"github.com/Sberex/go-sberex/consensus"
    23  	"github.com/Sberex/go-sberex/core"
    24  	"github.com/Sberex/go-sberex/core/types"
    25  	"github.com/Sberex/go-sberex/light"
    26  	"github.com/Sberex/go-sberex/log"
    27  )
    28  
    29  const (
    30  	blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
    31  	maxNodeCount      = 20               // maximum number of fetcherTreeNode entries remembered for each peer
    32  )
    33  
    34  // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
    35  // ODR system to ensure that we only request data related to a certain block from peers who have already processed
    36  // and announced that block.
    37  type lightFetcher struct {
    38  	pm    *ProtocolManager
    39  	odr   *LesOdr
    40  	chain *light.LightChain
    41  
    42  	lock            sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
    43  	maxConfirmedTd  *big.Int
    44  	peers           map[*peer]*fetcherPeerInfo
    45  	lastUpdateStats *updateStatsEntry
    46  	syncing         bool
    47  	syncDone        chan *peer
    48  
    49  	reqMu      sync.RWMutex // reqMu protects access to sent header fetch requests
    50  	requested  map[uint64]fetchRequest
    51  	deliverChn chan fetchResponse
    52  	timeoutChn chan uint64
    53  	requestChn chan bool // true if initiated from outside
    54  }
    55  
    56  // fetcherPeerInfo holds fetcher-specific information about each active peer
    57  type fetcherPeerInfo struct {
    58  	root, lastAnnounced *fetcherTreeNode
    59  	nodeCnt             int
    60  	confirmedTd         *big.Int
    61  	bestConfirmed       *fetcherTreeNode
    62  	nodeByHash          map[common.Hash]*fetcherTreeNode
    63  	firstUpdateStats    *updateStatsEntry
    64  }
    65  
    66  // fetcherTreeNode is a node of a tree that holds information about blocks recently
    67  // announced and confirmed by a certain peer. Each new announce message from a peer
    68  // adds nodes to the tree, based on the previous announced head and the reorg depth.
    69  // There are three possible states for a tree node:
    70  // - announced: not downloaded (known) yet, but we know its head, number and td
    71  // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
    72  // - known: both announced by this peer and downloaded (from any peer).
    73  // This structure makes it possible to always know which peer has a certain block,
    74  // which is necessary for selecting a suitable peer for ODR requests and also for
    75  // canonizing new heads. It also helps to always download the minimum necessary
    76  // amount of headers with a single request.
    77  type fetcherTreeNode struct {
    78  	hash             common.Hash
    79  	number           uint64
    80  	td               *big.Int
    81  	known, requested bool
    82  	parent           *fetcherTreeNode
    83  	children         []*fetcherTreeNode
    84  }
    85  
    86  // fetchRequest represents a header download request
    87  type fetchRequest struct {
    88  	hash    common.Hash
    89  	amount  uint64
    90  	peer    *peer
    91  	sent    mclock.AbsTime
    92  	timeout bool
    93  }
    94  
    95  // fetchResponse represents a header download response
    96  type fetchResponse struct {
    97  	reqID   uint64
    98  	headers []*types.Header
    99  	peer    *peer
   100  }
   101  
   102  // newLightFetcher creates a new light fetcher
   103  func newLightFetcher(pm *ProtocolManager) *lightFetcher {
   104  	f := &lightFetcher{
   105  		pm:             pm,
   106  		chain:          pm.blockchain.(*light.LightChain),
   107  		odr:            pm.odr,
   108  		peers:          make(map[*peer]*fetcherPeerInfo),
   109  		deliverChn:     make(chan fetchResponse, 100),
   110  		requested:      make(map[uint64]fetchRequest),
   111  		timeoutChn:     make(chan uint64),
   112  		requestChn:     make(chan bool, 100),
   113  		syncDone:       make(chan *peer),
   114  		maxConfirmedTd: big.NewInt(0),
   115  	}
   116  	pm.peers.notify(f)
   117  
   118  	f.pm.wg.Add(1)
   119  	go f.syncLoop()
   120  	return f
   121  }
   122  
   123  // syncLoop is the main event loop of the light fetcher
   124  func (f *lightFetcher) syncLoop() {
   125  	requesting := false
   126  	defer f.pm.wg.Done()
   127  	for {
   128  		select {
   129  		case <-f.pm.quitSync:
   130  			return
   131  		// when a new announce is received, request loop keeps running until
   132  		// no further requests are necessary or possible
   133  		case newAnnounce := <-f.requestChn:
   134  			f.lock.Lock()
   135  			s := requesting
   136  			requesting = false
   137  			var (
   138  				rq    *distReq
   139  				reqID uint64
   140  			)
   141  			if !f.syncing && !(newAnnounce && s) {
   142  				rq, reqID = f.nextRequest()
   143  			}
   144  			syncing := f.syncing
   145  			f.lock.Unlock()
   146  
   147  			if rq != nil {
   148  				requesting = true
   149  				_, ok := <-f.pm.reqDist.queue(rq)
   150  				if !ok {
   151  					f.requestChn <- false
   152  				}
   153  
   154  				if !syncing {
   155  					go func() {
   156  						time.Sleep(softRequestTimeout)
   157  						f.reqMu.Lock()
   158  						req, ok := f.requested[reqID]
   159  						if ok {
   160  							req.timeout = true
   161  							f.requested[reqID] = req
   162  						}
   163  						f.reqMu.Unlock()
   164  						// keep starting new requests while possible
   165  						f.requestChn <- false
   166  					}()
   167  				}
   168  			}
   169  		case reqID := <-f.timeoutChn:
   170  			f.reqMu.Lock()
   171  			req, ok := f.requested[reqID]
   172  			if ok {
   173  				delete(f.requested, reqID)
   174  			}
   175  			f.reqMu.Unlock()
   176  			if ok {
   177  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
   178  				req.peer.Log().Debug("Fetching data timed out hard")
   179  				go f.pm.removePeer(req.peer.id)
   180  			}
   181  		case resp := <-f.deliverChn:
   182  			f.reqMu.Lock()
   183  			req, ok := f.requested[resp.reqID]
   184  			if ok && req.peer != resp.peer {
   185  				ok = false
   186  			}
   187  			if ok {
   188  				delete(f.requested, resp.reqID)
   189  			}
   190  			f.reqMu.Unlock()
   191  			if ok {
   192  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
   193  			}
   194  			f.lock.Lock()
   195  			if !ok || !(f.syncing || f.processResponse(req, resp)) {
   196  				resp.peer.Log().Debug("Failed processing response")
   197  				go f.pm.removePeer(resp.peer.id)
   198  			}
   199  			f.lock.Unlock()
   200  		case p := <-f.syncDone:
   201  			f.lock.Lock()
   202  			p.Log().Debug("Done synchronising with peer")
   203  			f.checkSyncedHeaders(p)
   204  			f.syncing = false
   205  			f.lock.Unlock()
   206  		}
   207  	}
   208  }
   209  
   210  // registerPeer adds a new peer to the fetcher's peer set
   211  func (f *lightFetcher) registerPeer(p *peer) {
   212  	p.lock.Lock()
   213  	p.hasBlock = func(hash common.Hash, number uint64) bool {
   214  		return f.peerHasBlock(p, hash, number)
   215  	}
   216  	p.lock.Unlock()
   217  
   218  	f.lock.Lock()
   219  	defer f.lock.Unlock()
   220  
   221  	f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
   222  }
   223  
   224  // unregisterPeer removes a new peer from the fetcher's peer set
   225  func (f *lightFetcher) unregisterPeer(p *peer) {
   226  	p.lock.Lock()
   227  	p.hasBlock = nil
   228  	p.lock.Unlock()
   229  
   230  	f.lock.Lock()
   231  	defer f.lock.Unlock()
   232  
   233  	// check for potential timed out block delay statistics
   234  	f.checkUpdateStats(p, nil)
   235  	delete(f.peers, p)
   236  }
   237  
   238  // announce processes a new announcement message received from a peer, adding new
   239  // nodes to the peer's block tree and removing old nodes if necessary
   240  func (f *lightFetcher) announce(p *peer, head *announceData) {
   241  	f.lock.Lock()
   242  	defer f.lock.Unlock()
   243  	p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
   244  
   245  	fp := f.peers[p]
   246  	if fp == nil {
   247  		p.Log().Debug("Announcement from unknown peer")
   248  		return
   249  	}
   250  
   251  	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
   252  		// announced tds should be strictly monotonic
   253  		p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
   254  		go f.pm.removePeer(p.id)
   255  		return
   256  	}
   257  
   258  	n := fp.lastAnnounced
   259  	for i := uint64(0); i < head.ReorgDepth; i++ {
   260  		if n == nil {
   261  			break
   262  		}
   263  		n = n.parent
   264  	}
   265  	if n != nil {
   266  		// n is now the reorg common ancestor, add a new branch of nodes
   267  		// check if the node count is too high to add new nodes
   268  		locked := false
   269  		for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
   270  			if !locked {
   271  				f.chain.LockChain()
   272  				defer f.chain.UnlockChain()
   273  				locked = true
   274  			}
   275  			// if one of root's children is canonical, keep it, delete other branches and root itself
   276  			var newRoot *fetcherTreeNode
   277  			for i, nn := range fp.root.children {
   278  				if core.GetCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
   279  					fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
   280  					nn.parent = nil
   281  					newRoot = nn
   282  					break
   283  				}
   284  			}
   285  			fp.deleteNode(fp.root)
   286  			if n == fp.root {
   287  				n = newRoot
   288  			}
   289  			fp.root = newRoot
   290  			if newRoot == nil || !f.checkKnownNode(p, newRoot) {
   291  				fp.bestConfirmed = nil
   292  				fp.confirmedTd = nil
   293  			}
   294  
   295  			if n == nil {
   296  				break
   297  			}
   298  		}
   299  		if n != nil {
   300  			for n.number < head.Number {
   301  				nn := &fetcherTreeNode{number: n.number + 1, parent: n}
   302  				n.children = append(n.children, nn)
   303  				n = nn
   304  				fp.nodeCnt++
   305  			}
   306  			n.hash = head.Hash
   307  			n.td = head.Td
   308  			fp.nodeByHash[n.hash] = n
   309  		}
   310  	}
   311  	if n == nil {
   312  		// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
   313  		if fp.root != nil {
   314  			fp.deleteNode(fp.root)
   315  		}
   316  		n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
   317  		fp.root = n
   318  		fp.nodeCnt++
   319  		fp.nodeByHash[n.hash] = n
   320  		fp.bestConfirmed = nil
   321  		fp.confirmedTd = nil
   322  	}
   323  
   324  	f.checkKnownNode(p, n)
   325  	p.lock.Lock()
   326  	p.headInfo = head
   327  	fp.lastAnnounced = n
   328  	p.lock.Unlock()
   329  	f.checkUpdateStats(p, nil)
   330  	f.requestChn <- true
   331  }
   332  
   333  // peerHasBlock returns true if we can assume the peer knows the given block
   334  // based on its announcements
   335  func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool {
   336  	f.lock.Lock()
   337  	defer f.lock.Unlock()
   338  
   339  	if f.syncing {
   340  		// always return true when syncing
   341  		// false positives are acceptable, a more sophisticated condition can be implemented later
   342  		return true
   343  	}
   344  
   345  	fp := f.peers[p]
   346  	if fp == nil || fp.root == nil {
   347  		return false
   348  	}
   349  
   350  	if number >= fp.root.number {
   351  		// it is recent enough that if it is known, is should be in the peer's block tree
   352  		return fp.nodeByHash[hash] != nil
   353  	}
   354  	f.chain.LockChain()
   355  	defer f.chain.UnlockChain()
   356  	// if it's older than the peer's block tree root but it's in the same canonical chain
   357  	// as the root, we can still be sure the peer knows it
   358  	//
   359  	// when syncing, just check if it is part of the known chain, there is nothing better we
   360  	// can do since we do not know the most recent block hash yet
   361  	return core.GetCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && core.GetCanonicalHash(f.pm.chainDb, number) == hash
   362  }
   363  
   364  // requestAmount calculates the amount of headers to be downloaded starting
   365  // from a certain head backwards
   366  func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
   367  	amount := uint64(0)
   368  	nn := n
   369  	for nn != nil && !f.checkKnownNode(p, nn) {
   370  		nn = nn.parent
   371  		amount++
   372  	}
   373  	if nn == nil {
   374  		amount = n.number
   375  	}
   376  	return amount
   377  }
   378  
   379  // requestedID tells if a certain reqID has been requested by the fetcher
   380  func (f *lightFetcher) requestedID(reqID uint64) bool {
   381  	f.reqMu.RLock()
   382  	_, ok := f.requested[reqID]
   383  	f.reqMu.RUnlock()
   384  	return ok
   385  }
   386  
   387  // nextRequest selects the peer and announced head to be requested next, amount
   388  // to be downloaded starting from the head backwards is also returned
   389  func (f *lightFetcher) nextRequest() (*distReq, uint64) {
   390  	var (
   391  		bestHash   common.Hash
   392  		bestAmount uint64
   393  	)
   394  	bestTd := f.maxConfirmedTd
   395  	bestSyncing := false
   396  
   397  	for p, fp := range f.peers {
   398  		for hash, n := range fp.nodeByHash {
   399  			if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) {
   400  				amount := f.requestAmount(p, n)
   401  				if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount {
   402  					bestHash = hash
   403  					bestAmount = amount
   404  					bestTd = n.td
   405  					bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
   406  				}
   407  			}
   408  		}
   409  	}
   410  	if bestTd == f.maxConfirmedTd {
   411  		return nil, 0
   412  	}
   413  
   414  	f.syncing = bestSyncing
   415  
   416  	var rq *distReq
   417  	reqID := genReqID()
   418  	if f.syncing {
   419  		rq = &distReq{
   420  			getCost: func(dp distPeer) uint64 {
   421  				return 0
   422  			},
   423  			canSend: func(dp distPeer) bool {
   424  				p := dp.(*peer)
   425  				f.lock.Lock()
   426  				defer f.lock.Unlock()
   427  
   428  				fp := f.peers[p]
   429  				return fp != nil && fp.nodeByHash[bestHash] != nil
   430  			},
   431  			request: func(dp distPeer) func() {
   432  				go func() {
   433  					p := dp.(*peer)
   434  					p.Log().Debug("Synchronisation started")
   435  					f.pm.synchronise(p)
   436  					f.syncDone <- p
   437  				}()
   438  				return nil
   439  			},
   440  		}
   441  	} else {
   442  		rq = &distReq{
   443  			getCost: func(dp distPeer) uint64 {
   444  				p := dp.(*peer)
   445  				return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   446  			},
   447  			canSend: func(dp distPeer) bool {
   448  				p := dp.(*peer)
   449  				f.lock.Lock()
   450  				defer f.lock.Unlock()
   451  
   452  				fp := f.peers[p]
   453  				if fp == nil {
   454  					return false
   455  				}
   456  				n := fp.nodeByHash[bestHash]
   457  				return n != nil && !n.requested
   458  			},
   459  			request: func(dp distPeer) func() {
   460  				p := dp.(*peer)
   461  				f.lock.Lock()
   462  				fp := f.peers[p]
   463  				if fp != nil {
   464  					n := fp.nodeByHash[bestHash]
   465  					if n != nil {
   466  						n.requested = true
   467  					}
   468  				}
   469  				f.lock.Unlock()
   470  
   471  				cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   472  				p.fcServer.QueueRequest(reqID, cost)
   473  				f.reqMu.Lock()
   474  				f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
   475  				f.reqMu.Unlock()
   476  				go func() {
   477  					time.Sleep(hardRequestTimeout)
   478  					f.timeoutChn <- reqID
   479  				}()
   480  				return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
   481  			},
   482  		}
   483  	}
   484  	return rq, reqID
   485  }
   486  
   487  // deliverHeaders delivers header download request responses for processing
   488  func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
   489  	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
   490  }
   491  
   492  // processResponse processes header download request responses, returns true if successful
   493  func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
   494  	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
   495  		req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
   496  		return false
   497  	}
   498  	headers := make([]*types.Header, req.amount)
   499  	for i, header := range resp.headers {
   500  		headers[int(req.amount)-1-i] = header
   501  	}
   502  	if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
   503  		if err == consensus.ErrFutureBlock {
   504  			return true
   505  		}
   506  		log.Debug("Failed to insert header chain", "err", err)
   507  		return false
   508  	}
   509  	tds := make([]*big.Int, len(headers))
   510  	for i, header := range headers {
   511  		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
   512  		if td == nil {
   513  			log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
   514  			return false
   515  		}
   516  		tds[i] = td
   517  	}
   518  	f.newHeaders(headers, tds)
   519  	return true
   520  }
   521  
   522  // newHeaders updates the block trees of all active peers according to a newly
   523  // downloaded and validated batch or headers
   524  func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
   525  	var maxTd *big.Int
   526  	for p, fp := range f.peers {
   527  		if !f.checkAnnouncedHeaders(fp, headers, tds) {
   528  			p.Log().Debug("Inconsistent announcement")
   529  			go f.pm.removePeer(p.id)
   530  		}
   531  		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
   532  			maxTd = fp.confirmedTd
   533  		}
   534  	}
   535  	if maxTd != nil {
   536  		f.updateMaxConfirmedTd(maxTd)
   537  	}
   538  }
   539  
   540  // checkAnnouncedHeaders updates peer's block tree if necessary after validating
   541  // a batch of headers. It searches for the latest header in the batch that has a
   542  // matching tree node (if any), and if it has not been marked as known already,
   543  // sets it and its parents to known (even those which are older than the currently
   544  // validated ones). Return value shows if all hashes, numbers and Tds matched
   545  // correctly to the announced values (otherwise the peer should be dropped).
   546  func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
   547  	var (
   548  		n      *fetcherTreeNode
   549  		header *types.Header
   550  		td     *big.Int
   551  	)
   552  
   553  	for i := len(headers) - 1; ; i-- {
   554  		if i < 0 {
   555  			if n == nil {
   556  				// no more headers and nothing to match
   557  				return true
   558  			}
   559  			// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
   560  			hash, number := header.ParentHash, header.Number.Uint64()-1
   561  			td = f.chain.GetTd(hash, number)
   562  			header = f.chain.GetHeader(hash, number)
   563  			if header == nil || td == nil {
   564  				log.Error("Missing parent of validated header", "hash", hash, "number", number)
   565  				return false
   566  			}
   567  		} else {
   568  			header = headers[i]
   569  			td = tds[i]
   570  		}
   571  		hash := header.Hash()
   572  		number := header.Number.Uint64()
   573  		if n == nil {
   574  			n = fp.nodeByHash[hash]
   575  		}
   576  		if n != nil {
   577  			if n.td == nil {
   578  				// node was unannounced
   579  				if nn := fp.nodeByHash[hash]; nn != nil {
   580  					// if there was already a node with the same hash, continue there and drop this one
   581  					nn.children = append(nn.children, n.children...)
   582  					n.children = nil
   583  					fp.deleteNode(n)
   584  					n = nn
   585  				} else {
   586  					n.hash = hash
   587  					n.td = td
   588  					fp.nodeByHash[hash] = n
   589  				}
   590  			}
   591  			// check if it matches the header
   592  			if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
   593  				// peer has previously made an invalid announcement
   594  				return false
   595  			}
   596  			if n.known {
   597  				// we reached a known node that matched our expectations, return with success
   598  				return true
   599  			}
   600  			n.known = true
   601  			if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
   602  				fp.confirmedTd = td
   603  				fp.bestConfirmed = n
   604  			}
   605  			n = n.parent
   606  			if n == nil {
   607  				return true
   608  			}
   609  		}
   610  	}
   611  }
   612  
   613  // checkSyncedHeaders updates peer's block tree after synchronisation by marking
   614  // downloaded headers as known. If none of the announced headers are found after
   615  // syncing, the peer is dropped.
   616  func (f *lightFetcher) checkSyncedHeaders(p *peer) {
   617  	fp := f.peers[p]
   618  	if fp == nil {
   619  		p.Log().Debug("Unknown peer to check sync headers")
   620  		return
   621  	}
   622  	n := fp.lastAnnounced
   623  	var td *big.Int
   624  	for n != nil {
   625  		if td = f.chain.GetTd(n.hash, n.number); td != nil {
   626  			break
   627  		}
   628  		n = n.parent
   629  	}
   630  	// now n is the latest downloaded header after syncing
   631  	if n == nil {
   632  		p.Log().Debug("Synchronisation failed")
   633  		go f.pm.removePeer(p.id)
   634  	} else {
   635  		header := f.chain.GetHeader(n.hash, n.number)
   636  		f.newHeaders([]*types.Header{header}, []*big.Int{td})
   637  	}
   638  }
   639  
   640  // checkKnownNode checks if a block tree node is known (downloaded and validated)
   641  // If it was not known previously but found in the database, sets its known flag
   642  func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
   643  	if n.known {
   644  		return true
   645  	}
   646  	td := f.chain.GetTd(n.hash, n.number)
   647  	if td == nil {
   648  		return false
   649  	}
   650  	header := f.chain.GetHeader(n.hash, n.number)
   651  	// check the availability of both header and td because reads are not protected by chain db mutex
   652  	// Note: returning false is always safe here
   653  	if header == nil {
   654  		return false
   655  	}
   656  
   657  	fp := f.peers[p]
   658  	if fp == nil {
   659  		p.Log().Debug("Unknown peer to check known nodes")
   660  		return false
   661  	}
   662  	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
   663  		p.Log().Debug("Inconsistent announcement")
   664  		go f.pm.removePeer(p.id)
   665  	}
   666  	if fp.confirmedTd != nil {
   667  		f.updateMaxConfirmedTd(fp.confirmedTd)
   668  	}
   669  	return n.known
   670  }
   671  
   672  // deleteNode deletes a node and its child subtrees from a peer's block tree
   673  func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
   674  	if n.parent != nil {
   675  		for i, nn := range n.parent.children {
   676  			if nn == n {
   677  				n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
   678  				break
   679  			}
   680  		}
   681  	}
   682  	for {
   683  		if n.td != nil {
   684  			delete(fp.nodeByHash, n.hash)
   685  		}
   686  		fp.nodeCnt--
   687  		if len(n.children) == 0 {
   688  			return
   689  		}
   690  		for i, nn := range n.children {
   691  			if i == 0 {
   692  				n = nn
   693  			} else {
   694  				fp.deleteNode(nn)
   695  			}
   696  		}
   697  	}
   698  }
   699  
   700  // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
   701  // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
   702  // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
   703  // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
   704  // and it has also been downloaded from any peer, either before or after the given announcement).
   705  // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
   706  // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
   707  // the current global head).
   708  type updateStatsEntry struct {
   709  	time mclock.AbsTime
   710  	td   *big.Int
   711  	next *updateStatsEntry
   712  }
   713  
   714  // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
   715  // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
   716  // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
   717  // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
   718  // positive block delay value.
   719  func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
   720  	if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
   721  		f.maxConfirmedTd = td
   722  		newEntry := &updateStatsEntry{
   723  			time: mclock.Now(),
   724  			td:   td,
   725  		}
   726  		if f.lastUpdateStats != nil {
   727  			f.lastUpdateStats.next = newEntry
   728  		}
   729  		f.lastUpdateStats = newEntry
   730  		for p := range f.peers {
   731  			f.checkUpdateStats(p, newEntry)
   732  		}
   733  	}
   734  }
   735  
   736  // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
   737  // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
   738  // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
   739  // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
   740  // items are removed from the head of the linked list.
   741  // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
   742  // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
   743  // it can set the new head to newEntry.
   744  func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
   745  	now := mclock.Now()
   746  	fp := f.peers[p]
   747  	if fp == nil {
   748  		p.Log().Debug("Unknown peer to check update stats")
   749  		return
   750  	}
   751  	if newEntry != nil && fp.firstUpdateStats == nil {
   752  		fp.firstUpdateStats = newEntry
   753  	}
   754  	for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
   755  		f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
   756  		fp.firstUpdateStats = fp.firstUpdateStats.next
   757  	}
   758  	if fp.confirmedTd != nil {
   759  		for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
   760  			f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
   761  			fp.firstUpdateStats = fp.firstUpdateStats.next
   762  		}
   763  	}
   764  }