github.com/gnattishness/bazel-go-ethereum@v0.0.0-20190929123618-7022a154f56d/les/fetcher.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"math/big"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/common/mclock"
    26  	"github.com/ethereum/go-ethereum/consensus"
    27  	"github.com/ethereum/go-ethereum/core/rawdb"
    28  	"github.com/ethereum/go-ethereum/core/types"
    29  	"github.com/ethereum/go-ethereum/light"
    30  	"github.com/ethereum/go-ethereum/log"
    31  )
    32  
    33  const (
    34  	blockDelayTimeout    = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
    35  	maxNodeCount         = 20               // maximum number of fetcherTreeNode entries remembered for each peer
    36  	serverStateAvailable = 100              // number of recent blocks where state availability is assumed
    37  )
    38  
    39  // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
    40  // ODR system to ensure that we only request data related to a certain block from peers who have already processed
    41  // and announced that block.
    42  type lightFetcher struct {
    43  	handler *clientHandler
    44  	chain   *light.LightChain
    45  
    46  	lock            sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
    47  	maxConfirmedTd  *big.Int
    48  	peers           map[*peer]*fetcherPeerInfo
    49  	lastUpdateStats *updateStatsEntry
    50  	syncing         bool
    51  	syncDone        chan *peer
    52  
    53  	reqMu             sync.RWMutex // reqMu protects access to sent header fetch requests
    54  	requested         map[uint64]fetchRequest
    55  	deliverChn        chan fetchResponse
    56  	timeoutChn        chan uint64
    57  	requestTriggered  bool
    58  	requestTrigger    chan struct{}
    59  	lastTrustedHeader *types.Header
    60  
    61  	closeCh chan struct{}
    62  	wg      sync.WaitGroup
    63  }
    64  
    65  // fetcherPeerInfo holds fetcher-specific information about each active peer
    66  type fetcherPeerInfo struct {
    67  	root, lastAnnounced *fetcherTreeNode
    68  	nodeCnt             int
    69  	confirmedTd         *big.Int
    70  	bestConfirmed       *fetcherTreeNode
    71  	nodeByHash          map[common.Hash]*fetcherTreeNode
    72  	firstUpdateStats    *updateStatsEntry
    73  }
    74  
    75  // fetcherTreeNode is a node of a tree that holds information about blocks recently
    76  // announced and confirmed by a certain peer. Each new announce message from a peer
    77  // adds nodes to the tree, based on the previous announced head and the reorg depth.
    78  // There are three possible states for a tree node:
    79  // - announced: not downloaded (known) yet, but we know its head, number and td
    80  // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
    81  // - known: both announced by this peer and downloaded (from any peer).
    82  // This structure makes it possible to always know which peer has a certain block,
    83  // which is necessary for selecting a suitable peer for ODR requests and also for
    84  // canonizing new heads. It also helps to always download the minimum necessary
    85  // amount of headers with a single request.
    86  type fetcherTreeNode struct {
    87  	hash             common.Hash
    88  	number           uint64
    89  	td               *big.Int
    90  	known, requested bool
    91  	parent           *fetcherTreeNode
    92  	children         []*fetcherTreeNode
    93  }
    94  
    95  // fetchRequest represents a header download request
    96  type fetchRequest struct {
    97  	hash    common.Hash
    98  	amount  uint64
    99  	peer    *peer
   100  	sent    mclock.AbsTime
   101  	timeout bool
   102  }
   103  
   104  // fetchResponse represents a header download response
   105  type fetchResponse struct {
   106  	reqID   uint64
   107  	headers []*types.Header
   108  	peer    *peer
   109  }
   110  
   111  // newLightFetcher creates a new light fetcher
   112  func newLightFetcher(h *clientHandler) *lightFetcher {
   113  	f := &lightFetcher{
   114  		handler:        h,
   115  		chain:          h.backend.blockchain,
   116  		peers:          make(map[*peer]*fetcherPeerInfo),
   117  		deliverChn:     make(chan fetchResponse, 100),
   118  		requested:      make(map[uint64]fetchRequest),
   119  		timeoutChn:     make(chan uint64),
   120  		requestTrigger: make(chan struct{}, 1),
   121  		syncDone:       make(chan *peer),
   122  		closeCh:        make(chan struct{}),
   123  		maxConfirmedTd: big.NewInt(0),
   124  	}
   125  	h.backend.peers.notify(f)
   126  
   127  	f.wg.Add(1)
   128  	go f.syncLoop()
   129  	return f
   130  }
   131  
   132  func (f *lightFetcher) close() {
   133  	close(f.closeCh)
   134  	f.wg.Wait()
   135  }
   136  
   137  // syncLoop is the main event loop of the light fetcher
   138  func (f *lightFetcher) syncLoop() {
   139  	defer f.wg.Done()
   140  	for {
   141  		select {
   142  		case <-f.closeCh:
   143  			return
   144  		// request loop keeps running until no further requests are necessary or possible
   145  		case <-f.requestTrigger:
   146  			f.lock.Lock()
   147  			var (
   148  				rq      *distReq
   149  				reqID   uint64
   150  				syncing bool
   151  			)
   152  			if !f.syncing {
   153  				rq, reqID, syncing = f.nextRequest()
   154  			}
   155  			f.requestTriggered = rq != nil
   156  			f.lock.Unlock()
   157  
   158  			if rq != nil {
   159  				if _, ok := <-f.handler.backend.reqDist.queue(rq); ok {
   160  					if syncing {
   161  						f.lock.Lock()
   162  						f.syncing = true
   163  						f.lock.Unlock()
   164  					} else {
   165  						go func() {
   166  							time.Sleep(softRequestTimeout)
   167  							f.reqMu.Lock()
   168  							req, ok := f.requested[reqID]
   169  							if ok {
   170  								req.timeout = true
   171  								f.requested[reqID] = req
   172  							}
   173  							f.reqMu.Unlock()
   174  							// keep starting new requests while possible
   175  							f.requestTrigger <- struct{}{}
   176  						}()
   177  					}
   178  				} else {
   179  					f.requestTrigger <- struct{}{}
   180  				}
   181  			}
   182  		case reqID := <-f.timeoutChn:
   183  			f.reqMu.Lock()
   184  			req, ok := f.requested[reqID]
   185  			if ok {
   186  				delete(f.requested, reqID)
   187  			}
   188  			f.reqMu.Unlock()
   189  			if ok {
   190  				f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
   191  				req.peer.Log().Debug("Fetching data timed out hard")
   192  				go f.handler.removePeer(req.peer.id)
   193  			}
   194  		case resp := <-f.deliverChn:
   195  			f.reqMu.Lock()
   196  			req, ok := f.requested[resp.reqID]
   197  			if ok && req.peer != resp.peer {
   198  				ok = false
   199  			}
   200  			if ok {
   201  				delete(f.requested, resp.reqID)
   202  			}
   203  			f.reqMu.Unlock()
   204  			if ok {
   205  				f.handler.backend.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
   206  			}
   207  			f.lock.Lock()
   208  			if !ok || !(f.syncing || f.processResponse(req, resp)) {
   209  				resp.peer.Log().Debug("Failed processing response")
   210  				go f.handler.removePeer(resp.peer.id)
   211  			}
   212  			f.lock.Unlock()
   213  		case p := <-f.syncDone:
   214  			f.lock.Lock()
   215  			p.Log().Debug("Done synchronising with peer")
   216  			f.checkSyncedHeaders(p)
   217  			f.syncing = false
   218  			f.lock.Unlock()
   219  			f.requestTrigger <- struct{}{} // f.requestTriggered is always true here
   220  		}
   221  	}
   222  }
   223  
   224  // registerPeer adds a new peer to the fetcher's peer set
   225  func (f *lightFetcher) registerPeer(p *peer) {
   226  	p.lock.Lock()
   227  	p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
   228  		return f.peerHasBlock(p, hash, number, hasState)
   229  	}
   230  	p.lock.Unlock()
   231  
   232  	f.lock.Lock()
   233  	defer f.lock.Unlock()
   234  	f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
   235  }
   236  
   237  // unregisterPeer removes a new peer from the fetcher's peer set
   238  func (f *lightFetcher) unregisterPeer(p *peer) {
   239  	p.lock.Lock()
   240  	p.hasBlock = nil
   241  	p.lock.Unlock()
   242  
   243  	f.lock.Lock()
   244  	defer f.lock.Unlock()
   245  
   246  	// check for potential timed out block delay statistics
   247  	f.checkUpdateStats(p, nil)
   248  	delete(f.peers, p)
   249  }
   250  
   251  // announce processes a new announcement message received from a peer, adding new
   252  // nodes to the peer's block tree and removing old nodes if necessary
   253  func (f *lightFetcher) announce(p *peer, head *announceData) {
   254  	f.lock.Lock()
   255  	defer f.lock.Unlock()
   256  	p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
   257  
   258  	fp := f.peers[p]
   259  	if fp == nil {
   260  		p.Log().Debug("Announcement from unknown peer")
   261  		return
   262  	}
   263  
   264  	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
   265  		// announced tds should be strictly monotonic
   266  		p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
   267  		go f.handler.removePeer(p.id)
   268  		return
   269  	}
   270  
   271  	n := fp.lastAnnounced
   272  	for i := uint64(0); i < head.ReorgDepth; i++ {
   273  		if n == nil {
   274  			break
   275  		}
   276  		n = n.parent
   277  	}
   278  	// n is now the reorg common ancestor, add a new branch of nodes
   279  	if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
   280  		// if announced head block height is lower or same as n or too far from it to add
   281  		// intermediate nodes then discard previous announcement info and trigger a resync
   282  		n = nil
   283  		fp.nodeCnt = 0
   284  		fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
   285  	}
   286  	// check if the node count is too high to add new nodes, discard oldest ones if necessary
   287  	if n != nil {
   288  		// n is now the reorg common ancestor, add a new branch of nodes
   289  		// check if the node count is too high to add new nodes
   290  		locked := false
   291  		for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
   292  			if !locked {
   293  				f.chain.LockChain()
   294  				defer f.chain.UnlockChain()
   295  				locked = true
   296  			}
   297  			// if one of root's children is canonical, keep it, delete other branches and root itself
   298  			var newRoot *fetcherTreeNode
   299  			for i, nn := range fp.root.children {
   300  				if rawdb.ReadCanonicalHash(f.handler.backend.chainDb, nn.number) == nn.hash {
   301  					fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
   302  					nn.parent = nil
   303  					newRoot = nn
   304  					break
   305  				}
   306  			}
   307  			fp.deleteNode(fp.root)
   308  			if n == fp.root {
   309  				n = newRoot
   310  			}
   311  			fp.root = newRoot
   312  			if newRoot == nil || !f.checkKnownNode(p, newRoot) {
   313  				fp.bestConfirmed = nil
   314  				fp.confirmedTd = nil
   315  			}
   316  
   317  			if n == nil {
   318  				break
   319  			}
   320  		}
   321  		if n != nil {
   322  			for n.number < head.Number {
   323  				nn := &fetcherTreeNode{number: n.number + 1, parent: n}
   324  				n.children = append(n.children, nn)
   325  				n = nn
   326  				fp.nodeCnt++
   327  			}
   328  			n.hash = head.Hash
   329  			n.td = head.Td
   330  			fp.nodeByHash[n.hash] = n
   331  		}
   332  	}
   333  
   334  	if n == nil {
   335  		// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
   336  		if fp.root != nil {
   337  			fp.deleteNode(fp.root)
   338  		}
   339  		n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
   340  		fp.root = n
   341  		fp.nodeCnt++
   342  		fp.nodeByHash[n.hash] = n
   343  		fp.bestConfirmed = nil
   344  		fp.confirmedTd = nil
   345  	}
   346  
   347  	f.checkKnownNode(p, n)
   348  	p.lock.Lock()
   349  	p.headInfo = head
   350  	fp.lastAnnounced = n
   351  	p.lock.Unlock()
   352  	f.checkUpdateStats(p, nil)
   353  	if !f.requestTriggered {
   354  		f.requestTriggered = true
   355  		f.requestTrigger <- struct{}{}
   356  	}
   357  }
   358  
   359  // peerHasBlock returns true if we can assume the peer knows the given block
   360  // based on its announcements
   361  func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {
   362  	f.lock.Lock()
   363  	defer f.lock.Unlock()
   364  
   365  	fp := f.peers[p]
   366  	if fp == nil || fp.root == nil {
   367  		return false
   368  	}
   369  
   370  	if hasState {
   371  		if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
   372  			return false
   373  		}
   374  	}
   375  
   376  	if f.syncing {
   377  		// always return true when syncing
   378  		// false positives are acceptable, a more sophisticated condition can be implemented later
   379  		return true
   380  	}
   381  
   382  	if number >= fp.root.number {
   383  		// it is recent enough that if it is known, is should be in the peer's block tree
   384  		return fp.nodeByHash[hash] != nil
   385  	}
   386  	f.chain.LockChain()
   387  	defer f.chain.UnlockChain()
   388  	// if it's older than the peer's block tree root but it's in the same canonical chain
   389  	// as the root, we can still be sure the peer knows it
   390  	//
   391  	// when syncing, just check if it is part of the known chain, there is nothing better we
   392  	// can do since we do not know the most recent block hash yet
   393  	return rawdb.ReadCanonicalHash(f.handler.backend.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.handler.backend.chainDb, number) == hash
   394  }
   395  
   396  // requestAmount calculates the amount of headers to be downloaded starting
   397  // from a certain head backwards
   398  func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
   399  	amount := uint64(0)
   400  	nn := n
   401  	for nn != nil && !f.checkKnownNode(p, nn) {
   402  		nn = nn.parent
   403  		amount++
   404  	}
   405  	if nn == nil {
   406  		amount = n.number
   407  	}
   408  	return amount
   409  }
   410  
   411  // requestedID tells if a certain reqID has been requested by the fetcher
   412  func (f *lightFetcher) requestedID(reqID uint64) bool {
   413  	f.reqMu.RLock()
   414  	_, ok := f.requested[reqID]
   415  	f.reqMu.RUnlock()
   416  	return ok
   417  }
   418  
   419  // nextRequest selects the peer and announced head to be requested next, amount
   420  // to be downloaded starting from the head backwards is also returned
   421  func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
   422  	var (
   423  		bestHash    common.Hash
   424  		bestAmount  uint64
   425  		bestTd      *big.Int
   426  		bestSyncing bool
   427  	)
   428  	bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest()
   429  
   430  	if bestTd == f.maxConfirmedTd {
   431  		return nil, 0, false
   432  	}
   433  
   434  	var rq *distReq
   435  	reqID := genReqID()
   436  	if bestSyncing {
   437  		rq = f.newFetcherDistReqForSync(bestHash)
   438  	} else {
   439  		rq = f.newFetcherDistReq(bestHash, reqID, bestAmount)
   440  	}
   441  	return rq, reqID, bestSyncing
   442  }
   443  
   444  // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer.
   445  // It also returns the announced Td (which should be verified after fetching the head),
   446  // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request.
   447  func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) {
   448  	bestTd = f.maxConfirmedTd
   449  	bestSyncing = false
   450  
   451  	for p, fp := range f.peers {
   452  		for hash, n := range fp.nodeByHash {
   453  			if f.checkKnownNode(p, n) || n.requested {
   454  				continue
   455  			}
   456  			// if ulc mode is disabled, isTrustedHash returns true
   457  			amount := f.requestAmount(p, n)
   458  			if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) {
   459  				bestHash = hash
   460  				bestTd = n.td
   461  				bestAmount = amount
   462  				bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
   463  			}
   464  		}
   465  	}
   466  	return
   467  }
   468  
   469  // isTrustedHash checks if the block can be trusted by the minimum trusted fraction.
   470  func (f *lightFetcher) isTrustedHash(hash common.Hash) bool {
   471  	// If ultra light cliet mode is disabled, trust all hashes
   472  	if f.handler.ulc == nil {
   473  		return true
   474  	}
   475  	// Ultra light enabled, only trust after enough confirmations
   476  	var agreed int
   477  	for peer, info := range f.peers {
   478  		if peer.trusted && info.nodeByHash[hash] != nil {
   479  			agreed++
   480  		}
   481  	}
   482  	return 100*agreed/len(f.handler.ulc.keys) >= f.handler.ulc.fraction
   483  }
   484  
   485  func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
   486  	return &distReq{
   487  		getCost: func(dp distPeer) uint64 {
   488  			return 0
   489  		},
   490  		canSend: func(dp distPeer) bool {
   491  			p := dp.(*peer)
   492  			f.lock.Lock()
   493  			defer f.lock.Unlock()
   494  
   495  			if p.onlyAnnounce {
   496  				return false
   497  			}
   498  			fp := f.peers[p]
   499  			return fp != nil && fp.nodeByHash[bestHash] != nil
   500  		},
   501  		request: func(dp distPeer) func() {
   502  			if f.handler.ulc != nil {
   503  				// Keep last trusted header before sync
   504  				f.setLastTrustedHeader(f.chain.CurrentHeader())
   505  			}
   506  			go func() {
   507  				p := dp.(*peer)
   508  				p.Log().Debug("Synchronisation started")
   509  				f.handler.synchronise(p)
   510  				f.syncDone <- p
   511  			}()
   512  			return nil
   513  		},
   514  	}
   515  }
   516  
   517  // newFetcherDistReq creates a new request for the distributor.
   518  func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq {
   519  	return &distReq{
   520  		getCost: func(dp distPeer) uint64 {
   521  			p := dp.(*peer)
   522  			return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   523  		},
   524  		canSend: func(dp distPeer) bool {
   525  			p := dp.(*peer)
   526  			f.lock.Lock()
   527  			defer f.lock.Unlock()
   528  
   529  			if p.onlyAnnounce {
   530  				return false
   531  			}
   532  			fp := f.peers[p]
   533  			if fp == nil {
   534  				return false
   535  			}
   536  			n := fp.nodeByHash[bestHash]
   537  			return n != nil && !n.requested
   538  		},
   539  		request: func(dp distPeer) func() {
   540  			p := dp.(*peer)
   541  			f.lock.Lock()
   542  			fp := f.peers[p]
   543  			if fp != nil {
   544  				n := fp.nodeByHash[bestHash]
   545  				if n != nil {
   546  					n.requested = true
   547  				}
   548  			}
   549  			f.lock.Unlock()
   550  
   551  			cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   552  			p.fcServer.QueuedRequest(reqID, cost)
   553  			f.reqMu.Lock()
   554  			f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
   555  			f.reqMu.Unlock()
   556  			go func() {
   557  				time.Sleep(hardRequestTimeout)
   558  				f.timeoutChn <- reqID
   559  			}()
   560  			return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
   561  		},
   562  	}
   563  }
   564  
   565  // deliverHeaders delivers header download request responses for processing
   566  func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
   567  	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
   568  }
   569  
   570  // processResponse processes header download request responses, returns true if successful
   571  func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
   572  	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
   573  		req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
   574  		return false
   575  	}
   576  	headers := make([]*types.Header, req.amount)
   577  	for i, header := range resp.headers {
   578  		headers[int(req.amount)-1-i] = header
   579  	}
   580  
   581  	if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
   582  		if err == consensus.ErrFutureBlock {
   583  			return true
   584  		}
   585  		log.Debug("Failed to insert header chain", "err", err)
   586  		return false
   587  	}
   588  	tds := make([]*big.Int, len(headers))
   589  	for i, header := range headers {
   590  		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
   591  		if td == nil {
   592  			log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
   593  			return false
   594  		}
   595  		tds[i] = td
   596  	}
   597  	f.newHeaders(headers, tds)
   598  	return true
   599  }
   600  
   601  // newHeaders updates the block trees of all active peers according to a newly
   602  // downloaded and validated batch or headers
   603  func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
   604  	var maxTd *big.Int
   605  
   606  	for p, fp := range f.peers {
   607  		if !f.checkAnnouncedHeaders(fp, headers, tds) {
   608  			p.Log().Debug("Inconsistent announcement")
   609  			go f.handler.removePeer(p.id)
   610  		}
   611  		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
   612  			maxTd = fp.confirmedTd
   613  		}
   614  	}
   615  
   616  	if maxTd != nil {
   617  		f.updateMaxConfirmedTd(maxTd)
   618  	}
   619  }
   620  
   621  // checkAnnouncedHeaders updates peer's block tree if necessary after validating
   622  // a batch of headers. It searches for the latest header in the batch that has a
   623  // matching tree node (if any), and if it has not been marked as known already,
   624  // sets it and its parents to known (even those which are older than the currently
   625  // validated ones). Return value shows if all hashes, numbers and Tds matched
   626  // correctly to the announced values (otherwise the peer should be dropped).
   627  func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
   628  	var (
   629  		n      *fetcherTreeNode
   630  		header *types.Header
   631  		td     *big.Int
   632  	)
   633  
   634  	for i := len(headers) - 1; ; i-- {
   635  		if i < 0 {
   636  			if n == nil {
   637  				// no more headers and nothing to match
   638  				return true
   639  			}
   640  			// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
   641  			hash, number := header.ParentHash, header.Number.Uint64()-1
   642  			td = f.chain.GetTd(hash, number)
   643  			header = f.chain.GetHeader(hash, number)
   644  			if header == nil || td == nil {
   645  				log.Error("Missing parent of validated header", "hash", hash, "number", number)
   646  				return false
   647  			}
   648  		} else {
   649  			header = headers[i]
   650  			td = tds[i]
   651  		}
   652  		hash := header.Hash()
   653  		number := header.Number.Uint64()
   654  		if n == nil {
   655  			n = fp.nodeByHash[hash]
   656  		}
   657  		if n != nil {
   658  			if n.td == nil {
   659  				// node was unannounced
   660  				if nn := fp.nodeByHash[hash]; nn != nil {
   661  					// if there was already a node with the same hash, continue there and drop this one
   662  					nn.children = append(nn.children, n.children...)
   663  					n.children = nil
   664  					fp.deleteNode(n)
   665  					n = nn
   666  				} else {
   667  					n.hash = hash
   668  					n.td = td
   669  					fp.nodeByHash[hash] = n
   670  				}
   671  			}
   672  			// check if it matches the header
   673  			if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
   674  				// peer has previously made an invalid announcement
   675  				return false
   676  			}
   677  			if n.known {
   678  				// we reached a known node that matched our expectations, return with success
   679  				return true
   680  			}
   681  			n.known = true
   682  			if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
   683  				fp.confirmedTd = td
   684  				fp.bestConfirmed = n
   685  			}
   686  			n = n.parent
   687  			if n == nil {
   688  				return true
   689  			}
   690  		}
   691  	}
   692  }
   693  
   694  // checkSyncedHeaders updates peer's block tree after synchronisation by marking
   695  // downloaded headers as known. If none of the announced headers are found after
   696  // syncing, the peer is dropped.
   697  func (f *lightFetcher) checkSyncedHeaders(p *peer) {
   698  	fp := f.peers[p]
   699  	if fp == nil {
   700  		p.Log().Debug("Unknown peer to check sync headers")
   701  		return
   702  	}
   703  	var (
   704  		node = fp.lastAnnounced
   705  		td   *big.Int
   706  	)
   707  	if f.handler.ulc != nil {
   708  		// Roll back untrusted blocks
   709  		h, unapproved := f.lastTrustedTreeNode(p)
   710  		f.chain.Rollback(unapproved)
   711  		node = fp.nodeByHash[h.Hash()]
   712  	}
   713  	// Find last valid block
   714  	for node != nil {
   715  		if td = f.chain.GetTd(node.hash, node.number); td != nil {
   716  			break
   717  		}
   718  		node = node.parent
   719  	}
   720  	// Now node is the latest downloaded/approved header after syncing
   721  	if node == nil {
   722  		p.Log().Debug("Synchronisation failed")
   723  		go f.handler.removePeer(p.id)
   724  		return
   725  	}
   726  	header := f.chain.GetHeader(node.hash, node.number)
   727  	f.newHeaders([]*types.Header{header}, []*big.Int{td})
   728  }
   729  
   730  // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes
   731  func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) {
   732  	unapprovedHashes := make([]common.Hash, 0)
   733  	current := f.chain.CurrentHeader()
   734  
   735  	if f.lastTrustedHeader == nil {
   736  		return current, unapprovedHashes
   737  	}
   738  
   739  	canonical := f.chain.CurrentHeader()
   740  	if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() {
   741  		canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64())
   742  	}
   743  	commonAncestor := rawdb.FindCommonAncestor(f.handler.backend.chainDb, canonical, f.lastTrustedHeader)
   744  	if commonAncestor == nil {
   745  		log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash())
   746  		return current, unapprovedHashes
   747  	}
   748  
   749  	for current.Hash() == commonAncestor.Hash() {
   750  		if f.isTrustedHash(current.Hash()) {
   751  			break
   752  		}
   753  		unapprovedHashes = append(unapprovedHashes, current.Hash())
   754  		current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
   755  	}
   756  	return current, unapprovedHashes
   757  }
   758  
   759  func (f *lightFetcher) setLastTrustedHeader(h *types.Header) {
   760  	f.lock.Lock()
   761  	defer f.lock.Unlock()
   762  	f.lastTrustedHeader = h
   763  }
   764  
   765  // checkKnownNode checks if a block tree node is known (downloaded and validated)
   766  // If it was not known previously but found in the database, sets its known flag
   767  func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
   768  	if n.known {
   769  		return true
   770  	}
   771  	td := f.chain.GetTd(n.hash, n.number)
   772  	if td == nil {
   773  		return false
   774  	}
   775  	header := f.chain.GetHeader(n.hash, n.number)
   776  	// check the availability of both header and td because reads are not protected by chain db mutex
   777  	// Note: returning false is always safe here
   778  	if header == nil {
   779  		return false
   780  	}
   781  
   782  	fp := f.peers[p]
   783  	if fp == nil {
   784  		p.Log().Debug("Unknown peer to check known nodes")
   785  		return false
   786  	}
   787  	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
   788  		p.Log().Debug("Inconsistent announcement")
   789  		go f.handler.removePeer(p.id)
   790  	}
   791  	if fp.confirmedTd != nil {
   792  		f.updateMaxConfirmedTd(fp.confirmedTd)
   793  	}
   794  	return n.known
   795  }
   796  
   797  // deleteNode deletes a node and its child subtrees from a peer's block tree
   798  func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
   799  	if n.parent != nil {
   800  		for i, nn := range n.parent.children {
   801  			if nn == n {
   802  				n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
   803  				break
   804  			}
   805  		}
   806  	}
   807  	for {
   808  		if n.td != nil {
   809  			delete(fp.nodeByHash, n.hash)
   810  		}
   811  		fp.nodeCnt--
   812  		if len(n.children) == 0 {
   813  			return
   814  		}
   815  		for i, nn := range n.children {
   816  			if i == 0 {
   817  				n = nn
   818  			} else {
   819  				fp.deleteNode(nn)
   820  			}
   821  		}
   822  	}
   823  }
   824  
   825  // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
   826  // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
   827  // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
   828  // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
   829  // and it has also been downloaded from any peer, either before or after the given announcement).
   830  // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
   831  // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
   832  // the current global head).
   833  type updateStatsEntry struct {
   834  	time mclock.AbsTime
   835  	td   *big.Int
   836  	next *updateStatsEntry
   837  }
   838  
   839  // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
   840  // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
   841  // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
   842  // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
   843  // positive block delay value.
   844  func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
   845  	if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
   846  		f.maxConfirmedTd = td
   847  		newEntry := &updateStatsEntry{
   848  			time: mclock.Now(),
   849  			td:   td,
   850  		}
   851  		if f.lastUpdateStats != nil {
   852  			f.lastUpdateStats.next = newEntry
   853  		}
   854  
   855  		f.lastUpdateStats = newEntry
   856  		for p := range f.peers {
   857  			f.checkUpdateStats(p, newEntry)
   858  		}
   859  	}
   860  }
   861  
   862  // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
   863  // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
   864  // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
   865  // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
   866  // items are removed from the head of the linked list.
   867  // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
   868  // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
   869  // it can set the new head to newEntry.
   870  func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
   871  	now := mclock.Now()
   872  	fp := f.peers[p]
   873  	if fp == nil {
   874  		p.Log().Debug("Unknown peer to check update stats")
   875  		return
   876  	}
   877  
   878  	if newEntry != nil && fp.firstUpdateStats == nil {
   879  		fp.firstUpdateStats = newEntry
   880  	}
   881  	for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
   882  		f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
   883  		fp.firstUpdateStats = fp.firstUpdateStats.next
   884  	}
   885  	if fp.confirmedTd != nil {
   886  		for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
   887  			f.handler.backend.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
   888  			fp.firstUpdateStats = fp.firstUpdateStats.next
   889  		}
   890  	}
   891  }