github.com/snowblossomcoin/go-ethereum@v1.9.25/les/server_handler.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"encoding/binary"
    22  	"encoding/json"
    23  	"errors"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/common/mclock"
    30  	"github.com/ethereum/go-ethereum/core"
    31  	"github.com/ethereum/go-ethereum/core/rawdb"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/ethdb"
    35  	lps "github.com/ethereum/go-ethereum/les/lespay/server"
    36  	"github.com/ethereum/go-ethereum/light"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/metrics"
    39  	"github.com/ethereum/go-ethereum/p2p"
    40  	"github.com/ethereum/go-ethereum/p2p/enode"
    41  	"github.com/ethereum/go-ethereum/p2p/nodestate"
    42  	"github.com/ethereum/go-ethereum/rlp"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  )
    45  
    46  const (
    47  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    48  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    49  	ethVersion        = 63              // equivalent eth version for the downloader
    50  
    51  	MaxHeaderFetch           = 192 // Amount of block headers to be fetched per retrieval request
    52  	MaxBodyFetch             = 32  // Amount of block bodies to be fetched per retrieval request
    53  	MaxReceiptFetch          = 128 // Amount of transaction receipts to allow fetching per request
    54  	MaxCodeFetch             = 64  // Amount of contract codes to allow fetching per request
    55  	MaxProofsFetch           = 64  // Amount of merkle proofs to be fetched per retrieval request
    56  	MaxHelperTrieProofsFetch = 64  // Amount of helper tries to be fetched per retrieval request
    57  	MaxTxSend                = 64  // Amount of transactions to be send per request
    58  	MaxTxStatus              = 256 // Amount of transactions to queried per request
    59  )
    60  
    61  var (
    62  	errTooManyInvalidRequest = errors.New("too many invalid requests made")
    63  	errFullClientPool        = errors.New("client pool is full")
    64  )
    65  
    66  // serverHandler is responsible for serving light client and process
    67  // all incoming light requests.
    68  type serverHandler struct {
    69  	blockchain *core.BlockChain
    70  	chainDb    ethdb.Database
    71  	txpool     *core.TxPool
    72  	server     *LesServer
    73  
    74  	closeCh chan struct{}  // Channel used to exit all background routines of handler.
    75  	wg      sync.WaitGroup // WaitGroup used to track all background routines of handler.
    76  	synced  func() bool    // Callback function used to determine whether local node is synced.
    77  
    78  	// Testing fields
    79  	addTxsSync bool
    80  }
    81  
    82  func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler {
    83  	handler := &serverHandler{
    84  		server:     server,
    85  		blockchain: blockchain,
    86  		chainDb:    chainDb,
    87  		txpool:     txpool,
    88  		closeCh:    make(chan struct{}),
    89  		synced:     synced,
    90  	}
    91  	return handler
    92  }
    93  
    94  // start starts the server handler.
    95  func (h *serverHandler) start() {
    96  	h.wg.Add(1)
    97  	go h.broadcastLoop()
    98  }
    99  
   100  // stop stops the server handler.
   101  func (h *serverHandler) stop() {
   102  	close(h.closeCh)
   103  	h.wg.Wait()
   104  }
   105  
   106  // runPeer is the p2p protocol run function for the given version.
   107  func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
   108  	peer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version)))
   109  	defer peer.close()
   110  	h.wg.Add(1)
   111  	defer h.wg.Done()
   112  	return h.handle(peer)
   113  }
   114  
   115  func (h *serverHandler) handle(p *clientPeer) error {
   116  	p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
   117  
   118  	// Execute the LES handshake
   119  	var (
   120  		head   = h.blockchain.CurrentHeader()
   121  		hash   = head.Hash()
   122  		number = head.Number.Uint64()
   123  		td     = h.blockchain.GetTd(hash, number)
   124  	)
   125  	if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), h.server); err != nil {
   126  		p.Log().Debug("Light Ethereum handshake failed", "err", err)
   127  		return err
   128  	}
   129  	// Reject the duplicated peer, otherwise register it to peerset.
   130  	var registered bool
   131  	if err := h.server.ns.Operation(func() {
   132  		if h.server.ns.GetField(p.Node(), clientPeerField) != nil {
   133  			registered = true
   134  		} else {
   135  			h.server.ns.SetFieldSub(p.Node(), clientPeerField, p)
   136  		}
   137  	}); err != nil {
   138  		return err
   139  	}
   140  	if registered {
   141  		return errAlreadyRegistered
   142  	}
   143  
   144  	defer func() {
   145  		h.server.ns.SetField(p.Node(), clientPeerField, nil)
   146  		if p.fcClient != nil { // is nil when connecting another server
   147  			p.fcClient.Disconnect()
   148  		}
   149  	}()
   150  	if p.server {
   151  		// connected to another server, no messages expected, just wait for disconnection
   152  		_, err := p.rw.ReadMsg()
   153  		return err
   154  	}
   155  	// Reject light clients if server is not synced.
   156  	//
   157  	// Put this checking here, so that "non-synced" les-server peers are still allowed
   158  	// to keep the connection.
   159  	if !h.synced() {
   160  		p.Log().Debug("Light server not synced, rejecting peer")
   161  		return p2p.DiscRequested
   162  	}
   163  	// Disconnect the inbound peer if it's rejected by clientPool
   164  	if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil {
   165  		p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
   166  		return errFullClientPool
   167  	}
   168  	p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
   169  	if p.balance == nil {
   170  		return p2p.DiscRequested
   171  	}
   172  	activeCount, _ := h.server.clientPool.pp.Active()
   173  	clientConnectionGauge.Update(int64(activeCount))
   174  
   175  	var wg sync.WaitGroup // Wait group used to track all in-flight task routines.
   176  
   177  	connectedAt := mclock.Now()
   178  	defer func() {
   179  		wg.Wait() // Ensure all background task routines have exited.
   180  		h.server.clientPool.disconnect(p)
   181  		p.balance = nil
   182  		activeCount, _ := h.server.clientPool.pp.Active()
   183  		clientConnectionGauge.Update(int64(activeCount))
   184  		connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
   185  	}()
   186  	// Mark the peer starts to be served.
   187  	atomic.StoreUint32(&p.serving, 1)
   188  	defer atomic.StoreUint32(&p.serving, 0)
   189  
   190  	// Spawn a main loop to handle all incoming messages.
   191  	for {
   192  		select {
   193  		case err := <-p.errCh:
   194  			p.Log().Debug("Failed to send light ethereum response", "err", err)
   195  			return err
   196  		default:
   197  		}
   198  		if err := h.handleMsg(p, &wg); err != nil {
   199  			p.Log().Debug("Light Ethereum message handling failed", "err", err)
   200  			return err
   201  		}
   202  	}
   203  }
   204  
   205  // handleMsg is invoked whenever an inbound message is received from a remote
   206  // peer. The remote connection is torn down upon returning any error.
   207  func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
   208  	// Read the next message from the remote peer, and ensure it's fully consumed
   209  	msg, err := p.rw.ReadMsg()
   210  	if err != nil {
   211  		return err
   212  	}
   213  	p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
   214  
   215  	// Discard large message which exceeds the limitation.
   216  	if msg.Size > ProtocolMaxMsgSize {
   217  		clientErrorMeter.Mark(1)
   218  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   219  	}
   220  	defer msg.Discard()
   221  
   222  	var (
   223  		maxCost uint64
   224  		task    *servingTask
   225  	)
   226  	p.responseCount++
   227  	responseCount := p.responseCount
   228  	// accept returns an indicator whether the request can be served.
   229  	// If so, deduct the max cost from the flow control buffer.
   230  	accept := func(reqID, reqCnt, maxCnt uint64) bool {
   231  		// Short circuit if the peer is already frozen or the request is invalid.
   232  		inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0)
   233  		if p.isFrozen() || reqCnt == 0 || reqCnt > maxCnt {
   234  			p.fcClient.OneTimeCost(inSizeCost)
   235  			return false
   236  		}
   237  		// Prepaid max cost units before request been serving.
   238  		maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt)
   239  		accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
   240  		if !accepted {
   241  			p.freeze()
   242  			p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
   243  			p.fcClient.OneTimeCost(inSizeCost)
   244  			return false
   245  		}
   246  		// Create a multi-stage task, estimate the time it takes for the task to
   247  		// execute, and cache it in the request service queue.
   248  		factor := h.server.costTracker.globalFactor()
   249  		if factor < 0.001 {
   250  			factor = 1
   251  			p.Log().Error("Invalid global cost factor", "factor", factor)
   252  		}
   253  		maxTime := uint64(float64(maxCost) / factor)
   254  		task = h.server.servingQueue.newTask(p, maxTime, priority)
   255  		if task.start() {
   256  			return true
   257  		}
   258  		p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)
   259  		return false
   260  	}
   261  	// sendResponse sends back the response and updates the flow control statistic.
   262  	sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) {
   263  		p.responseLock.Lock()
   264  		defer p.responseLock.Unlock()
   265  
   266  		// Short circuit if the client is already frozen.
   267  		if p.isFrozen() {
   268  			realCost := h.server.costTracker.realCost(servingTime, msg.Size, 0)
   269  			p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
   270  			return
   271  		}
   272  		// Positive correction buffer value with real cost.
   273  		var replySize uint32
   274  		if reply != nil {
   275  			replySize = reply.size()
   276  		}
   277  		var realCost uint64
   278  		if h.server.costTracker.testing {
   279  			realCost = maxCost // Assign a fake cost for testing purpose
   280  		} else {
   281  			realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize)
   282  			if realCost > maxCost {
   283  				realCost = maxCost
   284  			}
   285  		}
   286  		bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
   287  		if amount != 0 {
   288  			// Feed cost tracker request serving statistic.
   289  			h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost)
   290  			// Reduce priority "balance" for the specific peer.
   291  			p.balance.RequestServed(realCost)
   292  		}
   293  		if reply != nil {
   294  			p.queueSend(func() {
   295  				if err := reply.send(bv); err != nil {
   296  					select {
   297  					case p.errCh <- err:
   298  					default:
   299  					}
   300  				}
   301  			})
   302  		}
   303  	}
   304  	switch msg.Code {
   305  	case GetBlockHeadersMsg:
   306  		p.Log().Trace("Received block header request")
   307  		if metrics.EnabledExpensive {
   308  			miscInHeaderPacketsMeter.Mark(1)
   309  			miscInHeaderTrafficMeter.Mark(int64(msg.Size))
   310  		}
   311  		var req struct {
   312  			ReqID uint64
   313  			Query getBlockHeadersData
   314  		}
   315  		if err := msg.Decode(&req); err != nil {
   316  			clientErrorMeter.Mark(1)
   317  			return errResp(ErrDecode, "%v: %v", msg, err)
   318  		}
   319  		query := req.Query
   320  		if accept(req.ReqID, query.Amount, MaxHeaderFetch) {
   321  			wg.Add(1)
   322  			go func() {
   323  				defer wg.Done()
   324  				hashMode := query.Origin.Hash != (common.Hash{})
   325  				first := true
   326  				maxNonCanonical := uint64(100)
   327  
   328  				// Gather headers until the fetch or network limits is reached
   329  				var (
   330  					bytes   common.StorageSize
   331  					headers []*types.Header
   332  					unknown bool
   333  				)
   334  				for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit {
   335  					if !first && !task.waitOrStop() {
   336  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   337  						return
   338  					}
   339  					// Retrieve the next header satisfying the query
   340  					var origin *types.Header
   341  					if hashMode {
   342  						if first {
   343  							origin = h.blockchain.GetHeaderByHash(query.Origin.Hash)
   344  							if origin != nil {
   345  								query.Origin.Number = origin.Number.Uint64()
   346  							}
   347  						} else {
   348  							origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   349  						}
   350  					} else {
   351  						origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
   352  					}
   353  					if origin == nil {
   354  						break
   355  					}
   356  					headers = append(headers, origin)
   357  					bytes += estHeaderRlpSize
   358  
   359  					// Advance to the next header of the query
   360  					switch {
   361  					case hashMode && query.Reverse:
   362  						// Hash based traversal towards the genesis block
   363  						ancestor := query.Skip + 1
   364  						if ancestor == 0 {
   365  							unknown = true
   366  						} else {
   367  							query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   368  							unknown = query.Origin.Hash == common.Hash{}
   369  						}
   370  					case hashMode && !query.Reverse:
   371  						// Hash based traversal towards the leaf block
   372  						var (
   373  							current = origin.Number.Uint64()
   374  							next    = current + query.Skip + 1
   375  						)
   376  						if next <= current {
   377  							infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   378  							p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   379  							unknown = true
   380  						} else {
   381  							if header := h.blockchain.GetHeaderByNumber(next); header != nil {
   382  								nextHash := header.Hash()
   383  								expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   384  								if expOldHash == query.Origin.Hash {
   385  									query.Origin.Hash, query.Origin.Number = nextHash, next
   386  								} else {
   387  									unknown = true
   388  								}
   389  							} else {
   390  								unknown = true
   391  							}
   392  						}
   393  					case query.Reverse:
   394  						// Number based traversal towards the genesis block
   395  						if query.Origin.Number >= query.Skip+1 {
   396  							query.Origin.Number -= query.Skip + 1
   397  						} else {
   398  							unknown = true
   399  						}
   400  
   401  					case !query.Reverse:
   402  						// Number based traversal towards the leaf block
   403  						query.Origin.Number += query.Skip + 1
   404  					}
   405  					first = false
   406  				}
   407  				reply := p.replyBlockHeaders(req.ReqID, headers)
   408  				sendResponse(req.ReqID, query.Amount, reply, task.done())
   409  				if metrics.EnabledExpensive {
   410  					miscOutHeaderPacketsMeter.Mark(1)
   411  					miscOutHeaderTrafficMeter.Mark(int64(reply.size()))
   412  					miscServingTimeHeaderTimer.Update(time.Duration(task.servingTime))
   413  				}
   414  			}()
   415  		}
   416  
   417  	case GetBlockBodiesMsg:
   418  		p.Log().Trace("Received block bodies request")
   419  		if metrics.EnabledExpensive {
   420  			miscInBodyPacketsMeter.Mark(1)
   421  			miscInBodyTrafficMeter.Mark(int64(msg.Size))
   422  		}
   423  		var req struct {
   424  			ReqID  uint64
   425  			Hashes []common.Hash
   426  		}
   427  		if err := msg.Decode(&req); err != nil {
   428  			clientErrorMeter.Mark(1)
   429  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   430  		}
   431  		var (
   432  			bytes  int
   433  			bodies []rlp.RawValue
   434  		)
   435  		reqCnt := len(req.Hashes)
   436  		if accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) {
   437  			wg.Add(1)
   438  			go func() {
   439  				defer wg.Done()
   440  				for i, hash := range req.Hashes {
   441  					if i != 0 && !task.waitOrStop() {
   442  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   443  						return
   444  					}
   445  					if bytes >= softResponseLimit {
   446  						break
   447  					}
   448  					body := h.blockchain.GetBodyRLP(hash)
   449  					if body == nil {
   450  						p.bumpInvalid()
   451  						continue
   452  					}
   453  					bodies = append(bodies, body)
   454  					bytes += len(body)
   455  				}
   456  				reply := p.replyBlockBodiesRLP(req.ReqID, bodies)
   457  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   458  				if metrics.EnabledExpensive {
   459  					miscOutBodyPacketsMeter.Mark(1)
   460  					miscOutBodyTrafficMeter.Mark(int64(reply.size()))
   461  					miscServingTimeBodyTimer.Update(time.Duration(task.servingTime))
   462  				}
   463  			}()
   464  		}
   465  
   466  	case GetCodeMsg:
   467  		p.Log().Trace("Received code request")
   468  		if metrics.EnabledExpensive {
   469  			miscInCodePacketsMeter.Mark(1)
   470  			miscInCodeTrafficMeter.Mark(int64(msg.Size))
   471  		}
   472  		var req struct {
   473  			ReqID uint64
   474  			Reqs  []CodeReq
   475  		}
   476  		if err := msg.Decode(&req); err != nil {
   477  			clientErrorMeter.Mark(1)
   478  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   479  		}
   480  		var (
   481  			bytes int
   482  			data  [][]byte
   483  		)
   484  		reqCnt := len(req.Reqs)
   485  		if accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) {
   486  			wg.Add(1)
   487  			go func() {
   488  				defer wg.Done()
   489  				for i, request := range req.Reqs {
   490  					if i != 0 && !task.waitOrStop() {
   491  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   492  						return
   493  					}
   494  					// Look up the root hash belonging to the request
   495  					header := h.blockchain.GetHeaderByHash(request.BHash)
   496  					if header == nil {
   497  						p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash)
   498  						p.bumpInvalid()
   499  						continue
   500  					}
   501  					// Refuse to search stale state data in the database since looking for
   502  					// a non-exist key is kind of expensive.
   503  					local := h.blockchain.CurrentHeader().Number.Uint64()
   504  					if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
   505  						p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
   506  						p.bumpInvalid()
   507  						continue
   508  					}
   509  					triedb := h.blockchain.StateCache().TrieDB()
   510  
   511  					account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey))
   512  					if err != nil {
   513  						p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
   514  						p.bumpInvalid()
   515  						continue
   516  					}
   517  					code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash))
   518  					if err != nil {
   519  						p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err)
   520  						continue
   521  					}
   522  					// Accumulate the code and abort if enough data was retrieved
   523  					data = append(data, code)
   524  					if bytes += len(code); bytes >= softResponseLimit {
   525  						break
   526  					}
   527  				}
   528  				reply := p.replyCode(req.ReqID, data)
   529  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   530  				if metrics.EnabledExpensive {
   531  					miscOutCodePacketsMeter.Mark(1)
   532  					miscOutCodeTrafficMeter.Mark(int64(reply.size()))
   533  					miscServingTimeCodeTimer.Update(time.Duration(task.servingTime))
   534  				}
   535  			}()
   536  		}
   537  
   538  	case GetReceiptsMsg:
   539  		p.Log().Trace("Received receipts request")
   540  		if metrics.EnabledExpensive {
   541  			miscInReceiptPacketsMeter.Mark(1)
   542  			miscInReceiptTrafficMeter.Mark(int64(msg.Size))
   543  		}
   544  		var req struct {
   545  			ReqID  uint64
   546  			Hashes []common.Hash
   547  		}
   548  		if err := msg.Decode(&req); err != nil {
   549  			clientErrorMeter.Mark(1)
   550  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   551  		}
   552  		var (
   553  			bytes    int
   554  			receipts []rlp.RawValue
   555  		)
   556  		reqCnt := len(req.Hashes)
   557  		if accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) {
   558  			wg.Add(1)
   559  			go func() {
   560  				defer wg.Done()
   561  				for i, hash := range req.Hashes {
   562  					if i != 0 && !task.waitOrStop() {
   563  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   564  						return
   565  					}
   566  					if bytes >= softResponseLimit {
   567  						break
   568  					}
   569  					// Retrieve the requested block's receipts, skipping if unknown to us
   570  					results := h.blockchain.GetReceiptsByHash(hash)
   571  					if results == nil {
   572  						if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   573  							p.bumpInvalid()
   574  							continue
   575  						}
   576  					}
   577  					// If known, encode and queue for response packet
   578  					if encoded, err := rlp.EncodeToBytes(results); err != nil {
   579  						log.Error("Failed to encode receipt", "err", err)
   580  					} else {
   581  						receipts = append(receipts, encoded)
   582  						bytes += len(encoded)
   583  					}
   584  				}
   585  				reply := p.replyReceiptsRLP(req.ReqID, receipts)
   586  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   587  				if metrics.EnabledExpensive {
   588  					miscOutReceiptPacketsMeter.Mark(1)
   589  					miscOutReceiptTrafficMeter.Mark(int64(reply.size()))
   590  					miscServingTimeReceiptTimer.Update(time.Duration(task.servingTime))
   591  				}
   592  			}()
   593  		}
   594  
   595  	case GetProofsV2Msg:
   596  		p.Log().Trace("Received les/2 proofs request")
   597  		if metrics.EnabledExpensive {
   598  			miscInTrieProofPacketsMeter.Mark(1)
   599  			miscInTrieProofTrafficMeter.Mark(int64(msg.Size))
   600  		}
   601  		var req struct {
   602  			ReqID uint64
   603  			Reqs  []ProofReq
   604  		}
   605  		if err := msg.Decode(&req); err != nil {
   606  			clientErrorMeter.Mark(1)
   607  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   608  		}
   609  		// Gather state data until the fetch or network limits is reached
   610  		var (
   611  			lastBHash common.Hash
   612  			root      common.Hash
   613  		)
   614  		reqCnt := len(req.Reqs)
   615  		if accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
   616  			wg.Add(1)
   617  			go func() {
   618  				defer wg.Done()
   619  				nodes := light.NewNodeSet()
   620  
   621  				for i, request := range req.Reqs {
   622  					if i != 0 && !task.waitOrStop() {
   623  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   624  						return
   625  					}
   626  					// Look up the root hash belonging to the request
   627  					var (
   628  						header *types.Header
   629  						trie   state.Trie
   630  					)
   631  					if request.BHash != lastBHash {
   632  						root, lastBHash = common.Hash{}, request.BHash
   633  
   634  						if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil {
   635  							p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash)
   636  							p.bumpInvalid()
   637  							continue
   638  						}
   639  						// Refuse to search stale state data in the database since looking for
   640  						// a non-exist key is kind of expensive.
   641  						local := h.blockchain.CurrentHeader().Number.Uint64()
   642  						if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
   643  							p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
   644  							p.bumpInvalid()
   645  							continue
   646  						}
   647  						root = header.Root
   648  					}
   649  					// If a header lookup failed (non existent), ignore subsequent requests for the same header
   650  					if root == (common.Hash{}) {
   651  						p.bumpInvalid()
   652  						continue
   653  					}
   654  					// Open the account or storage trie for the request
   655  					statedb := h.blockchain.StateCache()
   656  
   657  					switch len(request.AccKey) {
   658  					case 0:
   659  						// No account key specified, open an account trie
   660  						trie, err = statedb.OpenTrie(root)
   661  						if trie == nil || err != nil {
   662  							p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err)
   663  							continue
   664  						}
   665  					default:
   666  						// Account key specified, open a storage trie
   667  						account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey))
   668  						if err != nil {
   669  							p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
   670  							p.bumpInvalid()
   671  							continue
   672  						}
   673  						trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)
   674  						if trie == nil || err != nil {
   675  							p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err)
   676  							continue
   677  						}
   678  					}
   679  					// Prove the user's request from the account or stroage trie
   680  					if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil {
   681  						p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
   682  						continue
   683  					}
   684  					if nodes.DataSize() >= softResponseLimit {
   685  						break
   686  					}
   687  				}
   688  				reply := p.replyProofsV2(req.ReqID, nodes.NodeList())
   689  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   690  				if metrics.EnabledExpensive {
   691  					miscOutTrieProofPacketsMeter.Mark(1)
   692  					miscOutTrieProofTrafficMeter.Mark(int64(reply.size()))
   693  					miscServingTimeTrieProofTimer.Update(time.Duration(task.servingTime))
   694  				}
   695  			}()
   696  		}
   697  
   698  	case GetHelperTrieProofsMsg:
   699  		p.Log().Trace("Received helper trie proof request")
   700  		if metrics.EnabledExpensive {
   701  			miscInHelperTriePacketsMeter.Mark(1)
   702  			miscInHelperTrieTrafficMeter.Mark(int64(msg.Size))
   703  		}
   704  		var req struct {
   705  			ReqID uint64
   706  			Reqs  []HelperTrieReq
   707  		}
   708  		if err := msg.Decode(&req); err != nil {
   709  			clientErrorMeter.Mark(1)
   710  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   711  		}
   712  		// Gather state data until the fetch or network limits is reached
   713  		var (
   714  			auxBytes int
   715  			auxData  [][]byte
   716  		)
   717  		reqCnt := len(req.Reqs)
   718  		if accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
   719  			wg.Add(1)
   720  			go func() {
   721  				defer wg.Done()
   722  				var (
   723  					lastIdx  uint64
   724  					lastType uint
   725  					root     common.Hash
   726  					auxTrie  *trie.Trie
   727  				)
   728  				nodes := light.NewNodeSet()
   729  				for i, request := range req.Reqs {
   730  					if i != 0 && !task.waitOrStop() {
   731  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   732  						return
   733  					}
   734  					if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx {
   735  						auxTrie, lastType, lastIdx = nil, request.Type, request.TrieIdx
   736  
   737  						var prefix string
   738  						if root, prefix = h.getHelperTrie(request.Type, request.TrieIdx); root != (common.Hash{}) {
   739  							auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
   740  						}
   741  					}
   742  					if request.AuxReq == auxRoot {
   743  						var data []byte
   744  						if root != (common.Hash{}) {
   745  							data = root[:]
   746  						}
   747  						auxData = append(auxData, data)
   748  						auxBytes += len(data)
   749  					} else {
   750  						if auxTrie != nil {
   751  							auxTrie.Prove(request.Key, request.FromLevel, nodes)
   752  						}
   753  						if request.AuxReq != 0 {
   754  							data := h.getAuxiliaryHeaders(request)
   755  							auxData = append(auxData, data)
   756  							auxBytes += len(data)
   757  						}
   758  					}
   759  					if nodes.DataSize()+auxBytes >= softResponseLimit {
   760  						break
   761  					}
   762  				}
   763  				reply := p.replyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
   764  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   765  				if metrics.EnabledExpensive {
   766  					miscOutHelperTriePacketsMeter.Mark(1)
   767  					miscOutHelperTrieTrafficMeter.Mark(int64(reply.size()))
   768  					miscServingTimeHelperTrieTimer.Update(time.Duration(task.servingTime))
   769  				}
   770  			}()
   771  		}
   772  
   773  	case SendTxV2Msg:
   774  		p.Log().Trace("Received new transactions")
   775  		if metrics.EnabledExpensive {
   776  			miscInTxsPacketsMeter.Mark(1)
   777  			miscInTxsTrafficMeter.Mark(int64(msg.Size))
   778  		}
   779  		var req struct {
   780  			ReqID uint64
   781  			Txs   []*types.Transaction
   782  		}
   783  		if err := msg.Decode(&req); err != nil {
   784  			clientErrorMeter.Mark(1)
   785  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   786  		}
   787  		reqCnt := len(req.Txs)
   788  		if accept(req.ReqID, uint64(reqCnt), MaxTxSend) {
   789  			wg.Add(1)
   790  			go func() {
   791  				defer wg.Done()
   792  				stats := make([]light.TxStatus, len(req.Txs))
   793  				for i, tx := range req.Txs {
   794  					if i != 0 && !task.waitOrStop() {
   795  						return
   796  					}
   797  					hash := tx.Hash()
   798  					stats[i] = h.txStatus(hash)
   799  					if stats[i].Status == core.TxStatusUnknown {
   800  						addFn := h.txpool.AddRemotes
   801  						// Add txs synchronously for testing purpose
   802  						if h.addTxsSync {
   803  							addFn = h.txpool.AddRemotesSync
   804  						}
   805  						if errs := addFn([]*types.Transaction{tx}); errs[0] != nil {
   806  							stats[i].Error = errs[0].Error()
   807  							continue
   808  						}
   809  						stats[i] = h.txStatus(hash)
   810  					}
   811  				}
   812  				reply := p.replyTxStatus(req.ReqID, stats)
   813  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   814  				if metrics.EnabledExpensive {
   815  					miscOutTxsPacketsMeter.Mark(1)
   816  					miscOutTxsTrafficMeter.Mark(int64(reply.size()))
   817  					miscServingTimeTxTimer.Update(time.Duration(task.servingTime))
   818  				}
   819  			}()
   820  		}
   821  
   822  	case GetTxStatusMsg:
   823  		p.Log().Trace("Received transaction status query request")
   824  		if metrics.EnabledExpensive {
   825  			miscInTxStatusPacketsMeter.Mark(1)
   826  			miscInTxStatusTrafficMeter.Mark(int64(msg.Size))
   827  		}
   828  		var req struct {
   829  			ReqID  uint64
   830  			Hashes []common.Hash
   831  		}
   832  		if err := msg.Decode(&req); err != nil {
   833  			clientErrorMeter.Mark(1)
   834  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   835  		}
   836  		reqCnt := len(req.Hashes)
   837  		if accept(req.ReqID, uint64(reqCnt), MaxTxStatus) {
   838  			wg.Add(1)
   839  			go func() {
   840  				defer wg.Done()
   841  				stats := make([]light.TxStatus, len(req.Hashes))
   842  				for i, hash := range req.Hashes {
   843  					if i != 0 && !task.waitOrStop() {
   844  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   845  						return
   846  					}
   847  					stats[i] = h.txStatus(hash)
   848  				}
   849  				reply := p.replyTxStatus(req.ReqID, stats)
   850  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   851  				if metrics.EnabledExpensive {
   852  					miscOutTxStatusPacketsMeter.Mark(1)
   853  					miscOutTxStatusTrafficMeter.Mark(int64(reply.size()))
   854  					miscServingTimeTxStatusTimer.Update(time.Duration(task.servingTime))
   855  				}
   856  			}()
   857  		}
   858  
   859  	default:
   860  		p.Log().Trace("Received invalid message", "code", msg.Code)
   861  		clientErrorMeter.Mark(1)
   862  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   863  	}
   864  	// If the client has made too much invalid request(e.g. request a non-existent data),
   865  	// reject them to prevent SPAM attack.
   866  	if p.getInvalid() > maxRequestErrors {
   867  		clientErrorMeter.Mark(1)
   868  		return errTooManyInvalidRequest
   869  	}
   870  	return nil
   871  }
   872  
   873  // getAccount retrieves an account from the state based on root.
   874  func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {
   875  	trie, err := trie.New(root, triedb)
   876  	if err != nil {
   877  		return state.Account{}, err
   878  	}
   879  	blob, err := trie.TryGet(hash[:])
   880  	if err != nil {
   881  		return state.Account{}, err
   882  	}
   883  	var account state.Account
   884  	if err = rlp.DecodeBytes(blob, &account); err != nil {
   885  		return state.Account{}, err
   886  	}
   887  	return account, nil
   888  }
   889  
   890  // getHelperTrie returns the post-processed trie root for the given trie ID and section index
   891  func (h *serverHandler) getHelperTrie(typ uint, index uint64) (common.Hash, string) {
   892  	switch typ {
   893  	case htCanonical:
   894  		sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1)
   895  		return light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix
   896  	case htBloomBits:
   897  		sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1)
   898  		return light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix
   899  	}
   900  	return common.Hash{}, ""
   901  }
   902  
   903  // getAuxiliaryHeaders returns requested auxiliary headers for the CHT request.
   904  func (h *serverHandler) getAuxiliaryHeaders(req HelperTrieReq) []byte {
   905  	if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 {
   906  		blockNum := binary.BigEndian.Uint64(req.Key)
   907  		hash := rawdb.ReadCanonicalHash(h.chainDb, blockNum)
   908  		return rawdb.ReadHeaderRLP(h.chainDb, hash, blockNum)
   909  	}
   910  	return nil
   911  }
   912  
   913  // txStatus returns the status of a specified transaction.
   914  func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus {
   915  	var stat light.TxStatus
   916  	// Looking the transaction in txpool first.
   917  	stat.Status = h.txpool.Status([]common.Hash{hash})[0]
   918  
   919  	// If the transaction is unknown to the pool, try looking it up locally.
   920  	if stat.Status == core.TxStatusUnknown {
   921  		lookup := h.blockchain.GetTransactionLookup(hash)
   922  		if lookup != nil {
   923  			stat.Status = core.TxStatusIncluded
   924  			stat.Lookup = lookup
   925  		}
   926  	}
   927  	return stat
   928  }
   929  
   930  // broadcastLoop broadcasts new block information to all connected light
   931  // clients. According to the agreement between client and server, server should
   932  // only broadcast new announcement if the total difficulty is higher than the
   933  // last one. Besides server will add the signature if client requires.
   934  func (h *serverHandler) broadcastLoop() {
   935  	defer h.wg.Done()
   936  
   937  	headCh := make(chan core.ChainHeadEvent, 10)
   938  	headSub := h.blockchain.SubscribeChainHeadEvent(headCh)
   939  	defer headSub.Unsubscribe()
   940  
   941  	var (
   942  		lastHead *types.Header
   943  		lastTd   = common.Big0
   944  	)
   945  	for {
   946  		select {
   947  		case ev := <-headCh:
   948  			header := ev.Block.Header()
   949  			hash, number := header.Hash(), header.Number.Uint64()
   950  			td := h.blockchain.GetTd(hash, number)
   951  			if td == nil || td.Cmp(lastTd) <= 0 {
   952  				continue
   953  			}
   954  			var reorg uint64
   955  			if lastHead != nil {
   956  				reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64()
   957  			}
   958  			lastHead, lastTd = header, td
   959  			log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
   960  			h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg})
   961  		case <-h.closeCh:
   962  			return
   963  		}
   964  	}
   965  }
   966  
   967  // broadcaster sends new header announcements to active client peers
   968  type broadcaster struct {
   969  	ns                           *nodestate.NodeStateMachine
   970  	privateKey                   *ecdsa.PrivateKey
   971  	lastAnnounce, signedAnnounce announceData
   972  }
   973  
   974  // newBroadcaster creates a new broadcaster
   975  func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster {
   976  	b := &broadcaster{ns: ns}
   977  	ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   978  		if newState.Equals(priorityPoolSetup.ActiveFlag) {
   979  			// send last announcement to activated peers
   980  			b.sendTo(node)
   981  		}
   982  	})
   983  	return b
   984  }
   985  
   986  // setSignerKey sets the signer key for signed announcements. Should be called before
   987  // starting the protocol handler.
   988  func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) {
   989  	b.privateKey = privateKey
   990  }
   991  
   992  // broadcast sends the given announcements to all active peers
   993  func (b *broadcaster) broadcast(announce announceData) {
   994  	b.ns.Operation(func() {
   995  		// iterate in an Operation to ensure that the active set does not change while iterating
   996  		b.lastAnnounce = announce
   997  		b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
   998  			b.sendTo(node)
   999  		})
  1000  	})
  1001  }
  1002  
  1003  // sendTo sends the most recent announcement to the given node unless the same or higher Td
  1004  // announcement has already been sent.
  1005  func (b *broadcaster) sendTo(node *enode.Node) {
  1006  	if b.lastAnnounce.Td == nil {
  1007  		return
  1008  	}
  1009  	if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil {
  1010  		if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 {
  1011  			announce := b.lastAnnounce
  1012  			switch p.announceType {
  1013  			case announceTypeSimple:
  1014  				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
  1015  					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
  1016  				} else {
  1017  					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
  1018  				}
  1019  			case announceTypeSigned:
  1020  				if b.signedAnnounce.Hash != b.lastAnnounce.Hash {
  1021  					b.signedAnnounce = b.lastAnnounce
  1022  					b.signedAnnounce.sign(b.privateKey)
  1023  				}
  1024  				announce := b.signedAnnounce
  1025  				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
  1026  					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
  1027  				} else {
  1028  					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
  1029  				}
  1030  			}
  1031  			p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td}
  1032  		}
  1033  	}
  1034  }