github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/les/server_handler.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"crypto/ecdsa"
    21  	"encoding/binary"
    22  	"encoding/json"
    23  	"errors"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/kisexp/xdchain/common"
    29  	"github.com/kisexp/xdchain/common/mclock"
    30  	"github.com/kisexp/xdchain/core"
    31  	"github.com/kisexp/xdchain/core/forkid"
    32  	"github.com/kisexp/xdchain/core/rawdb"
    33  	"github.com/kisexp/xdchain/core/state"
    34  	"github.com/kisexp/xdchain/core/types"
    35  	"github.com/kisexp/xdchain/ethdb"
    36  	lps "github.com/kisexp/xdchain/les/lespay/server"
    37  	"github.com/kisexp/xdchain/light"
    38  	"github.com/kisexp/xdchain/log"
    39  	"github.com/kisexp/xdchain/metrics"
    40  	"github.com/kisexp/xdchain/p2p"
    41  	"github.com/kisexp/xdchain/p2p/enode"
    42  	"github.com/kisexp/xdchain/p2p/nodestate"
    43  	"github.com/kisexp/xdchain/rlp"
    44  	"github.com/kisexp/xdchain/trie"
    45  )
    46  
    47  const (
    48  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    49  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    50  	ethVersion        = 63              // equivalent eth version for the downloader
    51  
    52  	MaxHeaderFetch           = 192 // Amount of block headers to be fetched per retrieval request
    53  	MaxBodyFetch             = 32  // Amount of block bodies to be fetched per retrieval request
    54  	MaxReceiptFetch          = 128 // Amount of transaction receipts to allow fetching per request
    55  	MaxCodeFetch             = 64  // Amount of contract codes to allow fetching per request
    56  	MaxProofsFetch           = 64  // Amount of merkle proofs to be fetched per retrieval request
    57  	MaxHelperTrieProofsFetch = 64  // Amount of helper tries to be fetched per retrieval request
    58  	MaxTxSend                = 64  // Amount of transactions to be send per request
    59  	MaxTxStatus              = 256 // Amount of transactions to queried per request
    60  )
    61  
    62  var (
    63  	errTooManyInvalidRequest = errors.New("too many invalid requests made")
    64  	errFullClientPool        = errors.New("client pool is full")
    65  )
    66  
    67  // serverHandler is responsible for serving light client and process
    68  // all incoming light requests.
    69  type serverHandler struct {
    70  	forkFilter forkid.Filter
    71  	blockchain *core.BlockChain
    72  	chainDb    ethdb.Database
    73  	txpool     *core.TxPool
    74  	server     *LesServer
    75  
    76  	closeCh chan struct{}  // Channel used to exit all background routines of handler.
    77  	wg      sync.WaitGroup // WaitGroup used to track all background routines of handler.
    78  	synced  func() bool    // Callback function used to determine whether local node is synced.
    79  
    80  	// Testing fields
    81  	addTxsSync bool
    82  }
    83  
    84  func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler {
    85  	handler := &serverHandler{
    86  		forkFilter: forkid.NewFilter(blockchain),
    87  		server:     server,
    88  		blockchain: blockchain,
    89  		chainDb:    chainDb,
    90  		txpool:     txpool,
    91  		closeCh:    make(chan struct{}),
    92  		synced:     synced,
    93  	}
    94  	return handler
    95  }
    96  
    97  // start starts the server handler.
    98  func (h *serverHandler) start() {
    99  	h.wg.Add(1)
   100  	go h.broadcastLoop()
   101  }
   102  
   103  // stop stops the server handler.
   104  func (h *serverHandler) stop() {
   105  	close(h.closeCh)
   106  	h.wg.Wait()
   107  }
   108  
   109  // runPeer is the p2p protocol run function for the given version.
   110  func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
   111  	peer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version)))
   112  	defer peer.close()
   113  	h.wg.Add(1)
   114  	defer h.wg.Done()
   115  	return h.handle(peer)
   116  }
   117  
   118  func (h *serverHandler) handle(p *clientPeer) error {
   119  	p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
   120  
   121  	// Execute the LES handshake
   122  	var (
   123  		head   = h.blockchain.CurrentHeader()
   124  		hash   = head.Hash()
   125  		number = head.Number.Uint64()
   126  		td     = h.blockchain.GetTd(hash, number)
   127  		forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64())
   128  	)
   129  	if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
   130  		p.Log().Debug("Light Ethereum handshake failed", "err", err)
   131  		return err
   132  	}
   133  	// Reject the duplicated peer, otherwise register it to peerset.
   134  	var registered bool
   135  	if err := h.server.ns.Operation(func() {
   136  		if h.server.ns.GetField(p.Node(), clientPeerField) != nil {
   137  			registered = true
   138  		} else {
   139  			h.server.ns.SetFieldSub(p.Node(), clientPeerField, p)
   140  		}
   141  	}); err != nil {
   142  		return err
   143  	}
   144  	if registered {
   145  		return errAlreadyRegistered
   146  	}
   147  
   148  	defer func() {
   149  		h.server.ns.SetField(p.Node(), clientPeerField, nil)
   150  		if p.fcClient != nil { // is nil when connecting another server
   151  			p.fcClient.Disconnect()
   152  		}
   153  	}()
   154  	if p.server {
   155  		// connected to another server, no messages expected, just wait for disconnection
   156  		_, err := p.rw.ReadMsg()
   157  		return err
   158  	}
   159  	// Reject light clients if server is not synced.
   160  	//
   161  	// Put this checking here, so that "non-synced" les-server peers are still allowed
   162  	// to keep the connection.
   163  	if !h.synced() {
   164  		p.Log().Debug("Light server not synced, rejecting peer")
   165  		return p2p.DiscRequested
   166  	}
   167  	// Disconnect the inbound peer if it's rejected by clientPool
   168  	if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil {
   169  		p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool)
   170  		return errFullClientPool
   171  	}
   172  	p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance)
   173  	if p.balance == nil {
   174  		return p2p.DiscRequested
   175  	}
   176  	activeCount, _ := h.server.clientPool.pp.Active()
   177  	clientConnectionGauge.Update(int64(activeCount))
   178  
   179  	var wg sync.WaitGroup // Wait group used to track all in-flight task routines.
   180  
   181  	connectedAt := mclock.Now()
   182  	defer func() {
   183  		wg.Wait() // Ensure all background task routines have exited.
   184  		h.server.clientPool.disconnect(p)
   185  		p.balance = nil
   186  		activeCount, _ := h.server.clientPool.pp.Active()
   187  		clientConnectionGauge.Update(int64(activeCount))
   188  		connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
   189  	}()
   190  	// Mark the peer starts to be served.
   191  	atomic.StoreUint32(&p.serving, 1)
   192  	defer atomic.StoreUint32(&p.serving, 0)
   193  
   194  	// Spawn a main loop to handle all incoming messages.
   195  	for {
   196  		select {
   197  		case err := <-p.errCh:
   198  			p.Log().Debug("Failed to send light ethereum response", "err", err)
   199  			return err
   200  		default:
   201  		}
   202  		if err := h.handleMsg(p, &wg); err != nil {
   203  			p.Log().Debug("Light Ethereum message handling failed", "err", err)
   204  			return err
   205  		}
   206  	}
   207  }
   208  
   209  // handleMsg is invoked whenever an inbound message is received from a remote
   210  // peer. The remote connection is torn down upon returning any error.
   211  func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
   212  	// Read the next message from the remote peer, and ensure it's fully consumed
   213  	msg, err := p.rw.ReadMsg()
   214  	if err != nil {
   215  		return err
   216  	}
   217  	p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
   218  
   219  	// Discard large message which exceeds the limitation.
   220  	if msg.Size > ProtocolMaxMsgSize {
   221  		clientErrorMeter.Mark(1)
   222  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   223  	}
   224  	defer msg.Discard()
   225  
   226  	var (
   227  		maxCost uint64
   228  		task    *servingTask
   229  	)
   230  	p.responseCount++
   231  	responseCount := p.responseCount
   232  	// accept returns an indicator whether the request can be served.
   233  	// If so, deduct the max cost from the flow control buffer.
   234  	accept := func(reqID, reqCnt, maxCnt uint64) bool {
   235  		// Short circuit if the peer is already frozen or the request is invalid.
   236  		inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0)
   237  		if p.isFrozen() || reqCnt == 0 || reqCnt > maxCnt {
   238  			p.fcClient.OneTimeCost(inSizeCost)
   239  			return false
   240  		}
   241  		// Prepaid max cost units before request been serving.
   242  		maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt)
   243  		accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
   244  		if !accepted {
   245  			p.freeze()
   246  			p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
   247  			p.fcClient.OneTimeCost(inSizeCost)
   248  			return false
   249  		}
   250  		// Create a multi-stage task, estimate the time it takes for the task to
   251  		// execute, and cache it in the request service queue.
   252  		factor := h.server.costTracker.globalFactor()
   253  		if factor < 0.001 {
   254  			factor = 1
   255  			p.Log().Error("Invalid global cost factor", "factor", factor)
   256  		}
   257  		maxTime := uint64(float64(maxCost) / factor)
   258  		task = h.server.servingQueue.newTask(p, maxTime, priority)
   259  		if task.start() {
   260  			return true
   261  		}
   262  		p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)
   263  		return false
   264  	}
   265  	// sendResponse sends back the response and updates the flow control statistic.
   266  	sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) {
   267  		p.responseLock.Lock()
   268  		defer p.responseLock.Unlock()
   269  
   270  		// Short circuit if the client is already frozen.
   271  		if p.isFrozen() {
   272  			realCost := h.server.costTracker.realCost(servingTime, msg.Size, 0)
   273  			p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
   274  			return
   275  		}
   276  		// Positive correction buffer value with real cost.
   277  		var replySize uint32
   278  		if reply != nil {
   279  			replySize = reply.size()
   280  		}
   281  		var realCost uint64
   282  		if h.server.costTracker.testing {
   283  			realCost = maxCost // Assign a fake cost for testing purpose
   284  		} else {
   285  			realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize)
   286  			if realCost > maxCost {
   287  				realCost = maxCost
   288  			}
   289  		}
   290  		bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
   291  		if amount != 0 {
   292  			// Feed cost tracker request serving statistic.
   293  			h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost)
   294  			// Reduce priority "balance" for the specific peer.
   295  			p.balance.RequestServed(realCost)
   296  		}
   297  		if reply != nil {
   298  			p.queueSend(func() {
   299  				if err := reply.send(bv); err != nil {
   300  					select {
   301  					case p.errCh <- err:
   302  					default:
   303  					}
   304  				}
   305  			})
   306  		}
   307  	}
   308  	switch msg.Code {
   309  	case GetBlockHeadersMsg:
   310  		p.Log().Trace("Received block header request")
   311  		if metrics.EnabledExpensive {
   312  			miscInHeaderPacketsMeter.Mark(1)
   313  			miscInHeaderTrafficMeter.Mark(int64(msg.Size))
   314  		}
   315  		var req struct {
   316  			ReqID uint64
   317  			Query getBlockHeadersData
   318  		}
   319  		if err := msg.Decode(&req); err != nil {
   320  			clientErrorMeter.Mark(1)
   321  			return errResp(ErrDecode, "%v: %v", msg, err)
   322  		}
   323  		query := req.Query
   324  		if accept(req.ReqID, query.Amount, MaxHeaderFetch) {
   325  			wg.Add(1)
   326  			go func() {
   327  				defer wg.Done()
   328  				hashMode := query.Origin.Hash != (common.Hash{})
   329  				first := true
   330  				maxNonCanonical := uint64(100)
   331  
   332  				// Gather headers until the fetch or network limits is reached
   333  				var (
   334  					bytes   common.StorageSize
   335  					headers []*types.Header
   336  					unknown bool
   337  				)
   338  				for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit {
   339  					if !first && !task.waitOrStop() {
   340  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   341  						return
   342  					}
   343  					// Retrieve the next header satisfying the query
   344  					var origin *types.Header
   345  					if hashMode {
   346  						if first {
   347  							origin = h.blockchain.GetHeaderByHash(query.Origin.Hash)
   348  							if origin != nil {
   349  								query.Origin.Number = origin.Number.Uint64()
   350  							}
   351  						} else {
   352  							origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   353  						}
   354  					} else {
   355  						origin = h.blockchain.GetHeaderByNumber(query.Origin.Number)
   356  					}
   357  					if origin == nil {
   358  						break
   359  					}
   360  					headers = append(headers, origin)
   361  					bytes += estHeaderRlpSize
   362  
   363  					// Advance to the next header of the query
   364  					switch {
   365  					case hashMode && query.Reverse:
   366  						// Hash based traversal towards the genesis block
   367  						ancestor := query.Skip + 1
   368  						if ancestor == 0 {
   369  							unknown = true
   370  						} else {
   371  							query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   372  							unknown = query.Origin.Hash == common.Hash{}
   373  						}
   374  					case hashMode && !query.Reverse:
   375  						// Hash based traversal towards the leaf block
   376  						var (
   377  							current = origin.Number.Uint64()
   378  							next    = current + query.Skip + 1
   379  						)
   380  						if next <= current {
   381  							infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   382  							p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   383  							unknown = true
   384  						} else {
   385  							if header := h.blockchain.GetHeaderByNumber(next); header != nil {
   386  								nextHash := header.Hash()
   387  								expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   388  								if expOldHash == query.Origin.Hash {
   389  									query.Origin.Hash, query.Origin.Number = nextHash, next
   390  								} else {
   391  									unknown = true
   392  								}
   393  							} else {
   394  								unknown = true
   395  							}
   396  						}
   397  					case query.Reverse:
   398  						// Number based traversal towards the genesis block
   399  						if query.Origin.Number >= query.Skip+1 {
   400  							query.Origin.Number -= query.Skip + 1
   401  						} else {
   402  							unknown = true
   403  						}
   404  
   405  					case !query.Reverse:
   406  						// Number based traversal towards the leaf block
   407  						query.Origin.Number += query.Skip + 1
   408  					}
   409  					first = false
   410  				}
   411  				reply := p.replyBlockHeaders(req.ReqID, headers)
   412  				sendResponse(req.ReqID, query.Amount, reply, task.done())
   413  				if metrics.EnabledExpensive {
   414  					miscOutHeaderPacketsMeter.Mark(1)
   415  					miscOutHeaderTrafficMeter.Mark(int64(reply.size()))
   416  					miscServingTimeHeaderTimer.Update(time.Duration(task.servingTime))
   417  				}
   418  			}()
   419  		}
   420  
   421  	case GetBlockBodiesMsg:
   422  		p.Log().Trace("Received block bodies request")
   423  		if metrics.EnabledExpensive {
   424  			miscInBodyPacketsMeter.Mark(1)
   425  			miscInBodyTrafficMeter.Mark(int64(msg.Size))
   426  		}
   427  		var req struct {
   428  			ReqID  uint64
   429  			Hashes []common.Hash
   430  		}
   431  		if err := msg.Decode(&req); err != nil {
   432  			clientErrorMeter.Mark(1)
   433  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   434  		}
   435  		var (
   436  			bytes  int
   437  			bodies []rlp.RawValue
   438  		)
   439  		reqCnt := len(req.Hashes)
   440  		if accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) {
   441  			wg.Add(1)
   442  			go func() {
   443  				defer wg.Done()
   444  				for i, hash := range req.Hashes {
   445  					if i != 0 && !task.waitOrStop() {
   446  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   447  						return
   448  					}
   449  					if bytes >= softResponseLimit {
   450  						break
   451  					}
   452  					body := h.blockchain.GetBodyRLP(hash)
   453  					if body == nil {
   454  						p.bumpInvalid()
   455  						continue
   456  					}
   457  					bodies = append(bodies, body)
   458  					bytes += len(body)
   459  				}
   460  				reply := p.replyBlockBodiesRLP(req.ReqID, bodies)
   461  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   462  				if metrics.EnabledExpensive {
   463  					miscOutBodyPacketsMeter.Mark(1)
   464  					miscOutBodyTrafficMeter.Mark(int64(reply.size()))
   465  					miscServingTimeBodyTimer.Update(time.Duration(task.servingTime))
   466  				}
   467  			}()
   468  		}
   469  
   470  	case GetCodeMsg:
   471  		p.Log().Trace("Received code request")
   472  		if metrics.EnabledExpensive {
   473  			miscInCodePacketsMeter.Mark(1)
   474  			miscInCodeTrafficMeter.Mark(int64(msg.Size))
   475  		}
   476  		var req struct {
   477  			ReqID uint64
   478  			Reqs  []CodeReq
   479  		}
   480  		if err := msg.Decode(&req); err != nil {
   481  			clientErrorMeter.Mark(1)
   482  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   483  		}
   484  		var (
   485  			bytes int
   486  			data  [][]byte
   487  		)
   488  		reqCnt := len(req.Reqs)
   489  		if accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) {
   490  			wg.Add(1)
   491  			go func() {
   492  				defer wg.Done()
   493  				for i, request := range req.Reqs {
   494  					if i != 0 && !task.waitOrStop() {
   495  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   496  						return
   497  					}
   498  					// Look up the root hash belonging to the request
   499  					header := h.blockchain.GetHeaderByHash(request.BHash)
   500  					if header == nil {
   501  						p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash)
   502  						p.bumpInvalid()
   503  						continue
   504  					}
   505  					// Refuse to search stale state data in the database since looking for
   506  					// a non-exist key is kind of expensive.
   507  					local := h.blockchain.CurrentHeader().Number.Uint64()
   508  					if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
   509  						p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
   510  						p.bumpInvalid()
   511  						continue
   512  					}
   513  					statedb := h.blockchain.StateCache()
   514  					triedb := statedb.TrieDB()
   515  
   516  					account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey))
   517  					if err != nil {
   518  						p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
   519  						p.bumpInvalid()
   520  						continue
   521  					}
   522  					code, err := statedb.ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash))
   523  					if err != nil {
   524  						p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err)
   525  						continue
   526  					}
   527  					// Accumulate the code and abort if enough data was retrieved
   528  					data = append(data, code)
   529  					if bytes += len(code); bytes >= softResponseLimit {
   530  						break
   531  					}
   532  				}
   533  				reply := p.replyCode(req.ReqID, data)
   534  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   535  				if metrics.EnabledExpensive {
   536  					miscOutCodePacketsMeter.Mark(1)
   537  					miscOutCodeTrafficMeter.Mark(int64(reply.size()))
   538  					miscServingTimeCodeTimer.Update(time.Duration(task.servingTime))
   539  				}
   540  			}()
   541  		}
   542  
   543  	case GetReceiptsMsg:
   544  		p.Log().Trace("Received receipts request")
   545  		if metrics.EnabledExpensive {
   546  			miscInReceiptPacketsMeter.Mark(1)
   547  			miscInReceiptTrafficMeter.Mark(int64(msg.Size))
   548  		}
   549  		var req struct {
   550  			ReqID  uint64
   551  			Hashes []common.Hash
   552  		}
   553  		if err := msg.Decode(&req); err != nil {
   554  			clientErrorMeter.Mark(1)
   555  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   556  		}
   557  		var (
   558  			bytes    int
   559  			receipts []rlp.RawValue
   560  		)
   561  		reqCnt := len(req.Hashes)
   562  		if accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) {
   563  			wg.Add(1)
   564  			go func() {
   565  				defer wg.Done()
   566  				for i, hash := range req.Hashes {
   567  					if i != 0 && !task.waitOrStop() {
   568  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   569  						return
   570  					}
   571  					if bytes >= softResponseLimit {
   572  						break
   573  					}
   574  					// Retrieve the requested block's receipts, skipping if unknown to us
   575  					results := h.blockchain.GetReceiptsByHash(hash)
   576  					if results == nil {
   577  						if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   578  							p.bumpInvalid()
   579  							continue
   580  						}
   581  					}
   582  					// If known, encode and queue for response packet
   583  					if encoded, err := rlp.EncodeToBytes(results); err != nil {
   584  						log.Error("Failed to encode receipt", "err", err)
   585  					} else {
   586  						receipts = append(receipts, encoded)
   587  						bytes += len(encoded)
   588  					}
   589  				}
   590  				reply := p.replyReceiptsRLP(req.ReqID, receipts)
   591  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   592  				if metrics.EnabledExpensive {
   593  					miscOutReceiptPacketsMeter.Mark(1)
   594  					miscOutReceiptTrafficMeter.Mark(int64(reply.size()))
   595  					miscServingTimeReceiptTimer.Update(time.Duration(task.servingTime))
   596  				}
   597  			}()
   598  		}
   599  
   600  	case GetProofsV2Msg:
   601  		p.Log().Trace("Received les/2 proofs request")
   602  		if metrics.EnabledExpensive {
   603  			miscInTrieProofPacketsMeter.Mark(1)
   604  			miscInTrieProofTrafficMeter.Mark(int64(msg.Size))
   605  		}
   606  		var req struct {
   607  			ReqID uint64
   608  			Reqs  []ProofReq
   609  		}
   610  		if err := msg.Decode(&req); err != nil {
   611  			clientErrorMeter.Mark(1)
   612  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   613  		}
   614  		// Gather state data until the fetch or network limits is reached
   615  		var (
   616  			lastBHash common.Hash
   617  			root      common.Hash
   618  			header    *types.Header
   619  		)
   620  		reqCnt := len(req.Reqs)
   621  		if accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
   622  			wg.Add(1)
   623  			go func() {
   624  				defer wg.Done()
   625  				nodes := light.NewNodeSet()
   626  
   627  				for i, request := range req.Reqs {
   628  					if i != 0 && !task.waitOrStop() {
   629  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   630  						return
   631  					}
   632  					// Look up the root hash belonging to the request
   633  					if request.BHash != lastBHash {
   634  						root, lastBHash = common.Hash{}, request.BHash
   635  
   636  						if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil {
   637  							p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash)
   638  							p.bumpInvalid()
   639  							continue
   640  						}
   641  						// Refuse to search stale state data in the database since looking for
   642  						// a non-exist key is kind of expensive.
   643  						local := h.blockchain.CurrentHeader().Number.Uint64()
   644  						if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local {
   645  							p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
   646  							p.bumpInvalid()
   647  							continue
   648  						}
   649  						root = header.Root
   650  					}
   651  					// If a header lookup failed (non existent), ignore subsequent requests for the same header
   652  					if root == (common.Hash{}) {
   653  						p.bumpInvalid()
   654  						continue
   655  					}
   656  					// Open the account or storage trie for the request
   657  					statedb := h.blockchain.StateCache()
   658  
   659  					var trie state.Trie
   660  					switch len(request.AccKey) {
   661  					case 0:
   662  						// No account key specified, open an account trie
   663  						trie, err = statedb.OpenTrie(root)
   664  						if trie == nil || err != nil {
   665  							p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err)
   666  							continue
   667  						}
   668  					default:
   669  						// Account key specified, open a storage trie
   670  						account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey))
   671  						if err != nil {
   672  							p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err)
   673  							p.bumpInvalid()
   674  							continue
   675  						}
   676  						trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)
   677  						if trie == nil || err != nil {
   678  							p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err)
   679  							continue
   680  						}
   681  					}
   682  					// Prove the user's request from the account or stroage trie
   683  					if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil {
   684  						p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
   685  						continue
   686  					}
   687  					if nodes.DataSize() >= softResponseLimit {
   688  						break
   689  					}
   690  				}
   691  				reply := p.replyProofsV2(req.ReqID, nodes.NodeList())
   692  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   693  				if metrics.EnabledExpensive {
   694  					miscOutTrieProofPacketsMeter.Mark(1)
   695  					miscOutTrieProofTrafficMeter.Mark(int64(reply.size()))
   696  					miscServingTimeTrieProofTimer.Update(time.Duration(task.servingTime))
   697  				}
   698  			}()
   699  		}
   700  
   701  	case GetHelperTrieProofsMsg:
   702  		p.Log().Trace("Received helper trie proof request")
   703  		if metrics.EnabledExpensive {
   704  			miscInHelperTriePacketsMeter.Mark(1)
   705  			miscInHelperTrieTrafficMeter.Mark(int64(msg.Size))
   706  		}
   707  		var req struct {
   708  			ReqID uint64
   709  			Reqs  []HelperTrieReq
   710  		}
   711  		if err := msg.Decode(&req); err != nil {
   712  			clientErrorMeter.Mark(1)
   713  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   714  		}
   715  		// Gather state data until the fetch or network limits is reached
   716  		var (
   717  			auxBytes int
   718  			auxData  [][]byte
   719  		)
   720  		reqCnt := len(req.Reqs)
   721  		if accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
   722  			wg.Add(1)
   723  			go func() {
   724  				defer wg.Done()
   725  				var (
   726  					lastIdx  uint64
   727  					lastType uint
   728  					root     common.Hash
   729  					auxTrie  *trie.Trie
   730  				)
   731  				nodes := light.NewNodeSet()
   732  				for i, request := range req.Reqs {
   733  					if i != 0 && !task.waitOrStop() {
   734  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   735  						return
   736  					}
   737  					if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx {
   738  						auxTrie, lastType, lastIdx = nil, request.Type, request.TrieIdx
   739  
   740  						var prefix string
   741  						if root, prefix = h.getHelperTrie(request.Type, request.TrieIdx); root != (common.Hash{}) {
   742  							auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
   743  						}
   744  					}
   745  					if request.AuxReq == auxRoot {
   746  						var data []byte
   747  						if root != (common.Hash{}) {
   748  							data = root[:]
   749  						}
   750  						auxData = append(auxData, data)
   751  						auxBytes += len(data)
   752  					} else {
   753  						if auxTrie != nil {
   754  							auxTrie.Prove(request.Key, request.FromLevel, nodes)
   755  						}
   756  						if request.AuxReq != 0 {
   757  							data := h.getAuxiliaryHeaders(request)
   758  							auxData = append(auxData, data)
   759  							auxBytes += len(data)
   760  						}
   761  					}
   762  					if nodes.DataSize()+auxBytes >= softResponseLimit {
   763  						break
   764  					}
   765  				}
   766  				reply := p.replyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
   767  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   768  				if metrics.EnabledExpensive {
   769  					miscOutHelperTriePacketsMeter.Mark(1)
   770  					miscOutHelperTrieTrafficMeter.Mark(int64(reply.size()))
   771  					miscServingTimeHelperTrieTimer.Update(time.Duration(task.servingTime))
   772  				}
   773  			}()
   774  		}
   775  
   776  	case SendTxV2Msg:
   777  		p.Log().Trace("Received new transactions")
   778  		if metrics.EnabledExpensive {
   779  			miscInTxsPacketsMeter.Mark(1)
   780  			miscInTxsTrafficMeter.Mark(int64(msg.Size))
   781  		}
   782  		var req struct {
   783  			ReqID uint64
   784  			Txs   []*types.Transaction
   785  		}
   786  		if err := msg.Decode(&req); err != nil {
   787  			clientErrorMeter.Mark(1)
   788  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   789  		}
   790  		reqCnt := len(req.Txs)
   791  		if accept(req.ReqID, uint64(reqCnt), MaxTxSend) {
   792  			wg.Add(1)
   793  			go func() {
   794  				defer wg.Done()
   795  				stats := make([]light.TxStatus, len(req.Txs))
   796  				for i, tx := range req.Txs {
   797  					if i != 0 && !task.waitOrStop() {
   798  						return
   799  					}
   800  					hash := tx.Hash()
   801  					stats[i] = h.txStatus(hash)
   802  					if stats[i].Status == core.TxStatusUnknown {
   803  						addFn := h.txpool.AddRemotes
   804  						// Add txs synchronously for testing purpose
   805  						if h.addTxsSync {
   806  							addFn = h.txpool.AddRemotesSync
   807  						}
   808  						if errs := addFn([]*types.Transaction{tx}); errs[0] != nil {
   809  							stats[i].Error = errs[0].Error()
   810  							continue
   811  						}
   812  						stats[i] = h.txStatus(hash)
   813  					}
   814  				}
   815  				reply := p.replyTxStatus(req.ReqID, stats)
   816  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   817  				if metrics.EnabledExpensive {
   818  					miscOutTxsPacketsMeter.Mark(1)
   819  					miscOutTxsTrafficMeter.Mark(int64(reply.size()))
   820  					miscServingTimeTxTimer.Update(time.Duration(task.servingTime))
   821  				}
   822  			}()
   823  		}
   824  
   825  	case GetTxStatusMsg:
   826  		p.Log().Trace("Received transaction status query request")
   827  		if metrics.EnabledExpensive {
   828  			miscInTxStatusPacketsMeter.Mark(1)
   829  			miscInTxStatusTrafficMeter.Mark(int64(msg.Size))
   830  		}
   831  		var req struct {
   832  			ReqID  uint64
   833  			Hashes []common.Hash
   834  		}
   835  		if err := msg.Decode(&req); err != nil {
   836  			clientErrorMeter.Mark(1)
   837  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   838  		}
   839  		reqCnt := len(req.Hashes)
   840  		if accept(req.ReqID, uint64(reqCnt), MaxTxStatus) {
   841  			wg.Add(1)
   842  			go func() {
   843  				defer wg.Done()
   844  				stats := make([]light.TxStatus, len(req.Hashes))
   845  				for i, hash := range req.Hashes {
   846  					if i != 0 && !task.waitOrStop() {
   847  						sendResponse(req.ReqID, 0, nil, task.servingTime)
   848  						return
   849  					}
   850  					stats[i] = h.txStatus(hash)
   851  				}
   852  				reply := p.replyTxStatus(req.ReqID, stats)
   853  				sendResponse(req.ReqID, uint64(reqCnt), reply, task.done())
   854  				if metrics.EnabledExpensive {
   855  					miscOutTxStatusPacketsMeter.Mark(1)
   856  					miscOutTxStatusTrafficMeter.Mark(int64(reply.size()))
   857  					miscServingTimeTxStatusTimer.Update(time.Duration(task.servingTime))
   858  				}
   859  			}()
   860  		}
   861  
   862  	default:
   863  		p.Log().Trace("Received invalid message", "code", msg.Code)
   864  		clientErrorMeter.Mark(1)
   865  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   866  	}
   867  	// If the client has made too much invalid request(e.g. request a non-existent data),
   868  	// reject them to prevent SPAM attack.
   869  	if p.getInvalid() > maxRequestErrors {
   870  		clientErrorMeter.Mark(1)
   871  		return errTooManyInvalidRequest
   872  	}
   873  	return nil
   874  }
   875  
   876  // getAccount retrieves an account from the state based on root.
   877  func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {
   878  	trie, err := trie.New(root, triedb)
   879  	if err != nil {
   880  		return state.Account{}, err
   881  	}
   882  	blob, err := trie.TryGet(hash[:])
   883  	if err != nil {
   884  		return state.Account{}, err
   885  	}
   886  	var account state.Account
   887  	if err = rlp.DecodeBytes(blob, &account); err != nil {
   888  		return state.Account{}, err
   889  	}
   890  	return account, nil
   891  }
   892  
   893  // getHelperTrie returns the post-processed trie root for the given trie ID and section index
   894  func (h *serverHandler) getHelperTrie(typ uint, index uint64) (common.Hash, string) {
   895  	switch typ {
   896  	case htCanonical:
   897  		sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1)
   898  		return light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix
   899  	case htBloomBits:
   900  		sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1)
   901  		return light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix
   902  	}
   903  	return common.Hash{}, ""
   904  }
   905  
   906  // getAuxiliaryHeaders returns requested auxiliary headers for the CHT request.
   907  func (h *serverHandler) getAuxiliaryHeaders(req HelperTrieReq) []byte {
   908  	if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 {
   909  		blockNum := binary.BigEndian.Uint64(req.Key)
   910  		hash := rawdb.ReadCanonicalHash(h.chainDb, blockNum)
   911  		return rawdb.ReadHeaderRLP(h.chainDb, hash, blockNum)
   912  	}
   913  	return nil
   914  }
   915  
   916  // txStatus returns the status of a specified transaction.
   917  func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus {
   918  	var stat light.TxStatus
   919  	// Looking the transaction in txpool first.
   920  	stat.Status = h.txpool.Status([]common.Hash{hash})[0]
   921  
   922  	// If the transaction is unknown to the pool, try looking it up locally.
   923  	if stat.Status == core.TxStatusUnknown {
   924  		lookup := h.blockchain.GetTransactionLookup(hash)
   925  		if lookup != nil {
   926  			stat.Status = core.TxStatusIncluded
   927  			stat.Lookup = lookup
   928  		}
   929  	}
   930  	return stat
   931  }
   932  
   933  // broadcastLoop broadcasts new block information to all connected light
   934  // clients. According to the agreement between client and server, server should
   935  // only broadcast new announcement if the total difficulty is higher than the
   936  // last one. Besides server will add the signature if client requires.
   937  func (h *serverHandler) broadcastLoop() {
   938  	defer h.wg.Done()
   939  
   940  	headCh := make(chan core.ChainHeadEvent, 10)
   941  	headSub := h.blockchain.SubscribeChainHeadEvent(headCh)
   942  	defer headSub.Unsubscribe()
   943  
   944  	var (
   945  		lastHead *types.Header
   946  		lastTd   = common.Big0
   947  	)
   948  	for {
   949  		select {
   950  		case ev := <-headCh:
   951  			header := ev.Block.Header()
   952  			hash, number := header.Hash(), header.Number.Uint64()
   953  			td := h.blockchain.GetTd(hash, number)
   954  			if td == nil || td.Cmp(lastTd) <= 0 {
   955  				continue
   956  			}
   957  			var reorg uint64
   958  			if lastHead != nil {
   959  				reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64()
   960  			}
   961  			lastHead, lastTd = header, td
   962  			log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
   963  			h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg})
   964  		case <-h.closeCh:
   965  			return
   966  		}
   967  	}
   968  }
   969  
   970  // broadcaster sends new header announcements to active client peers
   971  type broadcaster struct {
   972  	ns                           *nodestate.NodeStateMachine
   973  	privateKey                   *ecdsa.PrivateKey
   974  	lastAnnounce, signedAnnounce announceData
   975  }
   976  
   977  // newBroadcaster creates a new broadcaster
   978  func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster {
   979  	b := &broadcaster{ns: ns}
   980  	ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
   981  		if newState.Equals(priorityPoolSetup.ActiveFlag) {
   982  			// send last announcement to activated peers
   983  			b.sendTo(node)
   984  		}
   985  	})
   986  	return b
   987  }
   988  
   989  // setSignerKey sets the signer key for signed announcements. Should be called before
   990  // starting the protocol handler.
   991  func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) {
   992  	b.privateKey = privateKey
   993  }
   994  
   995  // broadcast sends the given announcements to all active peers
   996  func (b *broadcaster) broadcast(announce announceData) {
   997  	b.ns.Operation(func() {
   998  		// iterate in an Operation to ensure that the active set does not change while iterating
   999  		b.lastAnnounce = announce
  1000  		b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
  1001  			b.sendTo(node)
  1002  		})
  1003  	})
  1004  }
  1005  
  1006  // sendTo sends the most recent announcement to the given node unless the same or higher Td
  1007  // announcement has already been sent.
  1008  func (b *broadcaster) sendTo(node *enode.Node) {
  1009  	if b.lastAnnounce.Td == nil {
  1010  		return
  1011  	}
  1012  	if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil {
  1013  		if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 {
  1014  			announce := b.lastAnnounce
  1015  			switch p.announceType {
  1016  			case announceTypeSimple:
  1017  				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
  1018  					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
  1019  				} else {
  1020  					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
  1021  				}
  1022  			case announceTypeSigned:
  1023  				if b.signedAnnounce.Hash != b.lastAnnounce.Hash {
  1024  					b.signedAnnounce = b.lastAnnounce
  1025  					b.signedAnnounce.sign(b.privateKey)
  1026  				}
  1027  				announce := b.signedAnnounce
  1028  				if !p.queueSend(func() { p.sendAnnounce(announce) }) {
  1029  					log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash)
  1030  				} else {
  1031  					log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash)
  1032  				}
  1033  			}
  1034  			p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td}
  1035  		}
  1036  	}
  1037  }