github.com/sonm-io/go-ethereum@v1.8.18/les/fetcher.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package les implements the Light Ethereum Subprotocol.
    18  package les
    19  
    20  import (
    21  	"math/big"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/common/mclock"
    27  	"github.com/ethereum/go-ethereum/consensus"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  	"github.com/ethereum/go-ethereum/light"
    31  	"github.com/ethereum/go-ethereum/log"
    32  )
    33  
    34  const (
    35  	blockDelayTimeout    = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
    36  	maxNodeCount         = 20               // maximum number of fetcherTreeNode entries remembered for each peer
    37  	serverStateAvailable = 100              // number of recent blocks where state availability is assumed
    38  )
    39  
    40  // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
    41  // ODR system to ensure that we only request data related to a certain block from peers who have already processed
    42  // and announced that block.
    43  type lightFetcher struct {
    44  	pm    *ProtocolManager
    45  	odr   *LesOdr
    46  	chain *light.LightChain
    47  
    48  	lock            sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
    49  	maxConfirmedTd  *big.Int
    50  	peers           map[*peer]*fetcherPeerInfo
    51  	lastUpdateStats *updateStatsEntry
    52  	syncing         bool
    53  	syncDone        chan *peer
    54  
    55  	reqMu      sync.RWMutex // reqMu protects access to sent header fetch requests
    56  	requested  map[uint64]fetchRequest
    57  	deliverChn chan fetchResponse
    58  	timeoutChn chan uint64
    59  	requestChn chan bool // true if initiated from outside
    60  }
    61  
    62  // fetcherPeerInfo holds fetcher-specific information about each active peer
    63  type fetcherPeerInfo struct {
    64  	root, lastAnnounced *fetcherTreeNode
    65  	nodeCnt             int
    66  	confirmedTd         *big.Int
    67  	bestConfirmed       *fetcherTreeNode
    68  	nodeByHash          map[common.Hash]*fetcherTreeNode
    69  	firstUpdateStats    *updateStatsEntry
    70  }
    71  
    72  // fetcherTreeNode is a node of a tree that holds information about blocks recently
    73  // announced and confirmed by a certain peer. Each new announce message from a peer
    74  // adds nodes to the tree, based on the previous announced head and the reorg depth.
    75  // There are three possible states for a tree node:
    76  // - announced: not downloaded (known) yet, but we know its head, number and td
    77  // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
    78  // - known: both announced by this peer and downloaded (from any peer).
    79  // This structure makes it possible to always know which peer has a certain block,
    80  // which is necessary for selecting a suitable peer for ODR requests and also for
    81  // canonizing new heads. It also helps to always download the minimum necessary
    82  // amount of headers with a single request.
    83  type fetcherTreeNode struct {
    84  	hash             common.Hash
    85  	number           uint64
    86  	td               *big.Int
    87  	known, requested bool
    88  	parent           *fetcherTreeNode
    89  	children         []*fetcherTreeNode
    90  }
    91  
    92  // fetchRequest represents a header download request
    93  type fetchRequest struct {
    94  	hash    common.Hash
    95  	amount  uint64
    96  	peer    *peer
    97  	sent    mclock.AbsTime
    98  	timeout bool
    99  }
   100  
   101  // fetchResponse represents a header download response
   102  type fetchResponse struct {
   103  	reqID   uint64
   104  	headers []*types.Header
   105  	peer    *peer
   106  }
   107  
   108  // newLightFetcher creates a new light fetcher
   109  func newLightFetcher(pm *ProtocolManager) *lightFetcher {
   110  	f := &lightFetcher{
   111  		pm:             pm,
   112  		chain:          pm.blockchain.(*light.LightChain),
   113  		odr:            pm.odr,
   114  		peers:          make(map[*peer]*fetcherPeerInfo),
   115  		deliverChn:     make(chan fetchResponse, 100),
   116  		requested:      make(map[uint64]fetchRequest),
   117  		timeoutChn:     make(chan uint64),
   118  		requestChn:     make(chan bool, 100),
   119  		syncDone:       make(chan *peer),
   120  		maxConfirmedTd: big.NewInt(0),
   121  	}
   122  	pm.peers.notify(f)
   123  
   124  	f.pm.wg.Add(1)
   125  	go f.syncLoop()
   126  	return f
   127  }
   128  
   129  // syncLoop is the main event loop of the light fetcher
   130  func (f *lightFetcher) syncLoop() {
   131  	requesting := false
   132  	defer f.pm.wg.Done()
   133  	for {
   134  		select {
   135  		case <-f.pm.quitSync:
   136  			return
   137  		// when a new announce is received, request loop keeps running until
   138  		// no further requests are necessary or possible
   139  		case newAnnounce := <-f.requestChn:
   140  			f.lock.Lock()
   141  			s := requesting
   142  			requesting = false
   143  			var (
   144  				rq    *distReq
   145  				reqID uint64
   146  			)
   147  			if !f.syncing && !(newAnnounce && s) {
   148  				rq, reqID = f.nextRequest()
   149  			}
   150  			syncing := f.syncing
   151  			f.lock.Unlock()
   152  
   153  			if rq != nil {
   154  				requesting = true
   155  				_, ok := <-f.pm.reqDist.queue(rq)
   156  				if !ok {
   157  					f.requestChn <- false
   158  				}
   159  
   160  				if !syncing {
   161  					go func() {
   162  						time.Sleep(softRequestTimeout)
   163  						f.reqMu.Lock()
   164  						req, ok := f.requested[reqID]
   165  						if ok {
   166  							req.timeout = true
   167  							f.requested[reqID] = req
   168  						}
   169  						f.reqMu.Unlock()
   170  						// keep starting new requests while possible
   171  						f.requestChn <- false
   172  					}()
   173  				}
   174  			}
   175  		case reqID := <-f.timeoutChn:
   176  			f.reqMu.Lock()
   177  			req, ok := f.requested[reqID]
   178  			if ok {
   179  				delete(f.requested, reqID)
   180  			}
   181  			f.reqMu.Unlock()
   182  			if ok {
   183  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
   184  				req.peer.Log().Debug("Fetching data timed out hard")
   185  				go f.pm.removePeer(req.peer.id)
   186  			}
   187  		case resp := <-f.deliverChn:
   188  			f.reqMu.Lock()
   189  			req, ok := f.requested[resp.reqID]
   190  			if ok && req.peer != resp.peer {
   191  				ok = false
   192  			}
   193  			if ok {
   194  				delete(f.requested, resp.reqID)
   195  			}
   196  			f.reqMu.Unlock()
   197  			if ok {
   198  				f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
   199  			}
   200  			f.lock.Lock()
   201  			if !ok || !(f.syncing || f.processResponse(req, resp)) {
   202  				resp.peer.Log().Debug("Failed processing response")
   203  				go f.pm.removePeer(resp.peer.id)
   204  			}
   205  			f.lock.Unlock()
   206  		case p := <-f.syncDone:
   207  			f.lock.Lock()
   208  			p.Log().Debug("Done synchronising with peer")
   209  			f.checkSyncedHeaders(p)
   210  			f.syncing = false
   211  			f.lock.Unlock()
   212  		}
   213  	}
   214  }
   215  
   216  // registerPeer adds a new peer to the fetcher's peer set
   217  func (f *lightFetcher) registerPeer(p *peer) {
   218  	p.lock.Lock()
   219  	p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
   220  		return f.peerHasBlock(p, hash, number, hasState)
   221  	}
   222  	p.lock.Unlock()
   223  
   224  	f.lock.Lock()
   225  	defer f.lock.Unlock()
   226  
   227  	f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
   228  }
   229  
   230  // unregisterPeer removes a new peer from the fetcher's peer set
   231  func (f *lightFetcher) unregisterPeer(p *peer) {
   232  	p.lock.Lock()
   233  	p.hasBlock = nil
   234  	p.lock.Unlock()
   235  
   236  	f.lock.Lock()
   237  	defer f.lock.Unlock()
   238  
   239  	// check for potential timed out block delay statistics
   240  	f.checkUpdateStats(p, nil)
   241  	delete(f.peers, p)
   242  }
   243  
   244  // announce processes a new announcement message received from a peer, adding new
   245  // nodes to the peer's block tree and removing old nodes if necessary
   246  func (f *lightFetcher) announce(p *peer, head *announceData) {
   247  	f.lock.Lock()
   248  	defer f.lock.Unlock()
   249  	p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
   250  
   251  	fp := f.peers[p]
   252  	if fp == nil {
   253  		p.Log().Debug("Announcement from unknown peer")
   254  		return
   255  	}
   256  
   257  	if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
   258  		// announced tds should be strictly monotonic
   259  		p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
   260  		go f.pm.removePeer(p.id)
   261  		return
   262  	}
   263  
   264  	n := fp.lastAnnounced
   265  	for i := uint64(0); i < head.ReorgDepth; i++ {
   266  		if n == nil {
   267  			break
   268  		}
   269  		n = n.parent
   270  	}
   271  	// n is now the reorg common ancestor, add a new branch of nodes
   272  	if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
   273  		// if announced head block height is lower or same as n or too far from it to add
   274  		// intermediate nodes then discard previous announcement info and trigger a resync
   275  		n = nil
   276  		fp.nodeCnt = 0
   277  		fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
   278  	}
   279  	if n != nil {
   280  		// check if the node count is too high to add new nodes, discard oldest ones if necessary
   281  		locked := false
   282  		for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
   283  			if !locked {
   284  				f.chain.LockChain()
   285  				defer f.chain.UnlockChain()
   286  				locked = true
   287  			}
   288  			// if one of root's children is canonical, keep it, delete other branches and root itself
   289  			var newRoot *fetcherTreeNode
   290  			for i, nn := range fp.root.children {
   291  				if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
   292  					fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
   293  					nn.parent = nil
   294  					newRoot = nn
   295  					break
   296  				}
   297  			}
   298  			fp.deleteNode(fp.root)
   299  			if n == fp.root {
   300  				n = newRoot
   301  			}
   302  			fp.root = newRoot
   303  			if newRoot == nil || !f.checkKnownNode(p, newRoot) {
   304  				fp.bestConfirmed = nil
   305  				fp.confirmedTd = nil
   306  			}
   307  
   308  			if n == nil {
   309  				break
   310  			}
   311  		}
   312  		if n != nil {
   313  			for n.number < head.Number {
   314  				nn := &fetcherTreeNode{number: n.number + 1, parent: n}
   315  				n.children = append(n.children, nn)
   316  				n = nn
   317  				fp.nodeCnt++
   318  			}
   319  			n.hash = head.Hash
   320  			n.td = head.Td
   321  			fp.nodeByHash[n.hash] = n
   322  		}
   323  	}
   324  	if n == nil {
   325  		// could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
   326  		if fp.root != nil {
   327  			fp.deleteNode(fp.root)
   328  		}
   329  		n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
   330  		fp.root = n
   331  		fp.nodeCnt++
   332  		fp.nodeByHash[n.hash] = n
   333  		fp.bestConfirmed = nil
   334  		fp.confirmedTd = nil
   335  	}
   336  
   337  	f.checkKnownNode(p, n)
   338  	p.lock.Lock()
   339  	p.headInfo = head
   340  	fp.lastAnnounced = n
   341  	p.lock.Unlock()
   342  	f.checkUpdateStats(p, nil)
   343  	f.requestChn <- true
   344  }
   345  
   346  // peerHasBlock returns true if we can assume the peer knows the given block
   347  // based on its announcements
   348  func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {
   349  	f.lock.Lock()
   350  	defer f.lock.Unlock()
   351  
   352  	fp := f.peers[p]
   353  	if fp == nil || fp.root == nil {
   354  		return false
   355  	}
   356  
   357  	if hasState {
   358  		if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
   359  			return false
   360  		}
   361  	}
   362  
   363  	if f.syncing {
   364  		// always return true when syncing
   365  		// false positives are acceptable, a more sophisticated condition can be implemented later
   366  		return true
   367  	}
   368  
   369  	if number >= fp.root.number {
   370  		// it is recent enough that if it is known, is should be in the peer's block tree
   371  		return fp.nodeByHash[hash] != nil
   372  	}
   373  	f.chain.LockChain()
   374  	defer f.chain.UnlockChain()
   375  	// if it's older than the peer's block tree root but it's in the same canonical chain
   376  	// as the root, we can still be sure the peer knows it
   377  	//
   378  	// when syncing, just check if it is part of the known chain, there is nothing better we
   379  	// can do since we do not know the most recent block hash yet
   380  	return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash
   381  }
   382  
   383  // requestAmount calculates the amount of headers to be downloaded starting
   384  // from a certain head backwards
   385  func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
   386  	amount := uint64(0)
   387  	nn := n
   388  	for nn != nil && !f.checkKnownNode(p, nn) {
   389  		nn = nn.parent
   390  		amount++
   391  	}
   392  	if nn == nil {
   393  		amount = n.number
   394  	}
   395  	return amount
   396  }
   397  
   398  // requestedID tells if a certain reqID has been requested by the fetcher
   399  func (f *lightFetcher) requestedID(reqID uint64) bool {
   400  	f.reqMu.RLock()
   401  	_, ok := f.requested[reqID]
   402  	f.reqMu.RUnlock()
   403  	return ok
   404  }
   405  
   406  // nextRequest selects the peer and announced head to be requested next, amount
   407  // to be downloaded starting from the head backwards is also returned
   408  func (f *lightFetcher) nextRequest() (*distReq, uint64) {
   409  	var (
   410  		bestHash   common.Hash
   411  		bestAmount uint64
   412  	)
   413  	bestTd := f.maxConfirmedTd
   414  	bestSyncing := false
   415  
   416  	for p, fp := range f.peers {
   417  		for hash, n := range fp.nodeByHash {
   418  			if !f.checkKnownNode(p, n) && !n.requested && (bestTd == nil || n.td.Cmp(bestTd) >= 0) {
   419  				amount := f.requestAmount(p, n)
   420  				if bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount {
   421  					bestHash = hash
   422  					bestAmount = amount
   423  					bestTd = n.td
   424  					bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
   425  				}
   426  			}
   427  		}
   428  	}
   429  	if bestTd == f.maxConfirmedTd {
   430  		return nil, 0
   431  	}
   432  
   433  	f.syncing = bestSyncing
   434  
   435  	var rq *distReq
   436  	reqID := genReqID()
   437  	if f.syncing {
   438  		rq = &distReq{
   439  			getCost: func(dp distPeer) uint64 {
   440  				return 0
   441  			},
   442  			canSend: func(dp distPeer) bool {
   443  				p := dp.(*peer)
   444  				f.lock.Lock()
   445  				defer f.lock.Unlock()
   446  
   447  				fp := f.peers[p]
   448  				return fp != nil && fp.nodeByHash[bestHash] != nil
   449  			},
   450  			request: func(dp distPeer) func() {
   451  				go func() {
   452  					p := dp.(*peer)
   453  					p.Log().Debug("Synchronisation started")
   454  					f.pm.synchronise(p)
   455  					f.syncDone <- p
   456  				}()
   457  				return nil
   458  			},
   459  		}
   460  	} else {
   461  		rq = &distReq{
   462  			getCost: func(dp distPeer) uint64 {
   463  				p := dp.(*peer)
   464  				return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   465  			},
   466  			canSend: func(dp distPeer) bool {
   467  				p := dp.(*peer)
   468  				f.lock.Lock()
   469  				defer f.lock.Unlock()
   470  
   471  				fp := f.peers[p]
   472  				if fp == nil {
   473  					return false
   474  				}
   475  				n := fp.nodeByHash[bestHash]
   476  				return n != nil && !n.requested
   477  			},
   478  			request: func(dp distPeer) func() {
   479  				p := dp.(*peer)
   480  				f.lock.Lock()
   481  				fp := f.peers[p]
   482  				if fp != nil {
   483  					n := fp.nodeByHash[bestHash]
   484  					if n != nil {
   485  						n.requested = true
   486  					}
   487  				}
   488  				f.lock.Unlock()
   489  
   490  				cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
   491  				p.fcServer.QueueRequest(reqID, cost)
   492  				f.reqMu.Lock()
   493  				f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
   494  				f.reqMu.Unlock()
   495  				go func() {
   496  					time.Sleep(hardRequestTimeout)
   497  					f.timeoutChn <- reqID
   498  				}()
   499  				return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
   500  			},
   501  		}
   502  	}
   503  	return rq, reqID
   504  }
   505  
   506  // deliverHeaders delivers header download request responses for processing
   507  func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
   508  	f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
   509  }
   510  
   511  // processResponse processes header download request responses, returns true if successful
   512  func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
   513  	if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
   514  		req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
   515  		return false
   516  	}
   517  	headers := make([]*types.Header, req.amount)
   518  	for i, header := range resp.headers {
   519  		headers[int(req.amount)-1-i] = header
   520  	}
   521  	if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
   522  		if err == consensus.ErrFutureBlock {
   523  			return true
   524  		}
   525  		log.Debug("Failed to insert header chain", "err", err)
   526  		return false
   527  	}
   528  	tds := make([]*big.Int, len(headers))
   529  	for i, header := range headers {
   530  		td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
   531  		if td == nil {
   532  			log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
   533  			return false
   534  		}
   535  		tds[i] = td
   536  	}
   537  	f.newHeaders(headers, tds)
   538  	return true
   539  }
   540  
   541  // newHeaders updates the block trees of all active peers according to a newly
   542  // downloaded and validated batch or headers
   543  func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
   544  	var maxTd *big.Int
   545  	for p, fp := range f.peers {
   546  		if !f.checkAnnouncedHeaders(fp, headers, tds) {
   547  			p.Log().Debug("Inconsistent announcement")
   548  			go f.pm.removePeer(p.id)
   549  		}
   550  		if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
   551  			maxTd = fp.confirmedTd
   552  		}
   553  	}
   554  	if maxTd != nil {
   555  		f.updateMaxConfirmedTd(maxTd)
   556  	}
   557  }
   558  
   559  // checkAnnouncedHeaders updates peer's block tree if necessary after validating
   560  // a batch of headers. It searches for the latest header in the batch that has a
   561  // matching tree node (if any), and if it has not been marked as known already,
   562  // sets it and its parents to known (even those which are older than the currently
   563  // validated ones). Return value shows if all hashes, numbers and Tds matched
   564  // correctly to the announced values (otherwise the peer should be dropped).
   565  func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
   566  	var (
   567  		n      *fetcherTreeNode
   568  		header *types.Header
   569  		td     *big.Int
   570  	)
   571  
   572  	for i := len(headers) - 1; ; i-- {
   573  		if i < 0 {
   574  			if n == nil {
   575  				// no more headers and nothing to match
   576  				return true
   577  			}
   578  			// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
   579  			hash, number := header.ParentHash, header.Number.Uint64()-1
   580  			td = f.chain.GetTd(hash, number)
   581  			header = f.chain.GetHeader(hash, number)
   582  			if header == nil || td == nil {
   583  				log.Error("Missing parent of validated header", "hash", hash, "number", number)
   584  				return false
   585  			}
   586  		} else {
   587  			header = headers[i]
   588  			td = tds[i]
   589  		}
   590  		hash := header.Hash()
   591  		number := header.Number.Uint64()
   592  		if n == nil {
   593  			n = fp.nodeByHash[hash]
   594  		}
   595  		if n != nil {
   596  			if n.td == nil {
   597  				// node was unannounced
   598  				if nn := fp.nodeByHash[hash]; nn != nil {
   599  					// if there was already a node with the same hash, continue there and drop this one
   600  					nn.children = append(nn.children, n.children...)
   601  					n.children = nil
   602  					fp.deleteNode(n)
   603  					n = nn
   604  				} else {
   605  					n.hash = hash
   606  					n.td = td
   607  					fp.nodeByHash[hash] = n
   608  				}
   609  			}
   610  			// check if it matches the header
   611  			if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
   612  				// peer has previously made an invalid announcement
   613  				return false
   614  			}
   615  			if n.known {
   616  				// we reached a known node that matched our expectations, return with success
   617  				return true
   618  			}
   619  			n.known = true
   620  			if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
   621  				fp.confirmedTd = td
   622  				fp.bestConfirmed = n
   623  			}
   624  			n = n.parent
   625  			if n == nil {
   626  				return true
   627  			}
   628  		}
   629  	}
   630  }
   631  
   632  // checkSyncedHeaders updates peer's block tree after synchronisation by marking
   633  // downloaded headers as known. If none of the announced headers are found after
   634  // syncing, the peer is dropped.
   635  func (f *lightFetcher) checkSyncedHeaders(p *peer) {
   636  	fp := f.peers[p]
   637  	if fp == nil {
   638  		p.Log().Debug("Unknown peer to check sync headers")
   639  		return
   640  	}
   641  	n := fp.lastAnnounced
   642  	var td *big.Int
   643  	for n != nil {
   644  		if td = f.chain.GetTd(n.hash, n.number); td != nil {
   645  			break
   646  		}
   647  		n = n.parent
   648  	}
   649  	// now n is the latest downloaded header after syncing
   650  	if n == nil {
   651  		p.Log().Debug("Synchronisation failed")
   652  		go f.pm.removePeer(p.id)
   653  	} else {
   654  		header := f.chain.GetHeader(n.hash, n.number)
   655  		f.newHeaders([]*types.Header{header}, []*big.Int{td})
   656  	}
   657  }
   658  
   659  // checkKnownNode checks if a block tree node is known (downloaded and validated)
   660  // If it was not known previously but found in the database, sets its known flag
   661  func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
   662  	if n.known {
   663  		return true
   664  	}
   665  	td := f.chain.GetTd(n.hash, n.number)
   666  	if td == nil {
   667  		return false
   668  	}
   669  	header := f.chain.GetHeader(n.hash, n.number)
   670  	// check the availability of both header and td because reads are not protected by chain db mutex
   671  	// Note: returning false is always safe here
   672  	if header == nil {
   673  		return false
   674  	}
   675  
   676  	fp := f.peers[p]
   677  	if fp == nil {
   678  		p.Log().Debug("Unknown peer to check known nodes")
   679  		return false
   680  	}
   681  	if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
   682  		p.Log().Debug("Inconsistent announcement")
   683  		go f.pm.removePeer(p.id)
   684  	}
   685  	if fp.confirmedTd != nil {
   686  		f.updateMaxConfirmedTd(fp.confirmedTd)
   687  	}
   688  	return n.known
   689  }
   690  
   691  // deleteNode deletes a node and its child subtrees from a peer's block tree
   692  func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
   693  	if n.parent != nil {
   694  		for i, nn := range n.parent.children {
   695  			if nn == n {
   696  				n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
   697  				break
   698  			}
   699  		}
   700  	}
   701  	for {
   702  		if n.td != nil {
   703  			delete(fp.nodeByHash, n.hash)
   704  		}
   705  		fp.nodeCnt--
   706  		if len(n.children) == 0 {
   707  			return
   708  		}
   709  		for i, nn := range n.children {
   710  			if i == 0 {
   711  				n = nn
   712  			} else {
   713  				fp.deleteNode(nn)
   714  			}
   715  		}
   716  	}
   717  }
   718  
   719  // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
   720  // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
   721  // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
   722  // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
   723  // and it has also been downloaded from any peer, either before or after the given announcement).
   724  // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
   725  // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
   726  // the current global head).
   727  type updateStatsEntry struct {
   728  	time mclock.AbsTime
   729  	td   *big.Int
   730  	next *updateStatsEntry
   731  }
   732  
   733  // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
   734  // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
   735  // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
   736  // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
   737  // positive block delay value.
   738  func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
   739  	if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
   740  		f.maxConfirmedTd = td
   741  		newEntry := &updateStatsEntry{
   742  			time: mclock.Now(),
   743  			td:   td,
   744  		}
   745  		if f.lastUpdateStats != nil {
   746  			f.lastUpdateStats.next = newEntry
   747  		}
   748  		f.lastUpdateStats = newEntry
   749  		for p := range f.peers {
   750  			f.checkUpdateStats(p, newEntry)
   751  		}
   752  	}
   753  }
   754  
   755  // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
   756  // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
   757  // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
   758  // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
   759  // items are removed from the head of the linked list.
   760  // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
   761  // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
   762  // it can set the new head to newEntry.
   763  func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
   764  	now := mclock.Now()
   765  	fp := f.peers[p]
   766  	if fp == nil {
   767  		p.Log().Debug("Unknown peer to check update stats")
   768  		return
   769  	}
   770  	if newEntry != nil && fp.firstUpdateStats == nil {
   771  		fp.firstUpdateStats = newEntry
   772  	}
   773  	for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
   774  		f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
   775  		fp.firstUpdateStats = fp.firstUpdateStats.next
   776  	}
   777  	if fp.confirmedTd != nil {
   778  		for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
   779  			f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
   780  			fp.firstUpdateStats = fp.firstUpdateStats.next
   781  		}
   782  	}
   783  }