github.com/yuanzimu/bsc@v1.1.4/eth/protocols/snap/handler.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/core"
    26  	"github.com/ethereum/go-ethereum/core/state"
    27  	"github.com/ethereum/go-ethereum/light"
    28  	"github.com/ethereum/go-ethereum/log"
    29  	"github.com/ethereum/go-ethereum/metrics"
    30  	"github.com/ethereum/go-ethereum/p2p"
    31  	"github.com/ethereum/go-ethereum/p2p/enode"
    32  	"github.com/ethereum/go-ethereum/p2p/enr"
    33  	"github.com/ethereum/go-ethereum/rlp"
    34  	"github.com/ethereum/go-ethereum/trie"
    35  )
    36  
    37  const (
    38  	// softResponseLimit is the target maximum size of replies to data retrievals.
    39  	softResponseLimit = 2 * 1024 * 1024
    40  
    41  	// maxCodeLookups is the maximum number of bytecodes to serve. This number is
    42  	// there to limit the number of disk lookups.
    43  	maxCodeLookups = 1024
    44  
    45  	// stateLookupSlack defines the ratio by how much a state response can exceed
    46  	// the requested limit in order to try and avoid breaking up contracts into
    47  	// multiple packages and proving them.
    48  	stateLookupSlack = 0.1
    49  
    50  	// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
    51  	// number is there to limit the number of disk lookups.
    52  	maxTrieNodeLookups = 1024
    53  
    54  	// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
    55  	// If we spend too much time, then it's a fairly high chance of timing out
    56  	// at the remote side, which means all the work is in vain.
    57  	maxTrieNodeTimeSpent = 5 * time.Second
    58  )
    59  
    60  // Handler is a callback to invoke from an outside runner after the boilerplate
    61  // exchanges have passed.
    62  type Handler func(peer *Peer) error
    63  
    64  // Backend defines the data retrieval methods to serve remote requests and the
    65  // callback methods to invoke on remote deliveries.
    66  type Backend interface {
    67  	// Chain retrieves the blockchain object to serve data.
    68  	Chain() *core.BlockChain
    69  
    70  	// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
    71  	// should do any peer maintenance work, handshakes and validations. If all
    72  	// is passed, control should be given back to the `handler` to process the
    73  	// inbound messages going forward.
    74  	RunPeer(peer *Peer, handler Handler) error
    75  
    76  	// PeerInfo retrieves all known `snap` information about a peer.
    77  	PeerInfo(id enode.ID) interface{}
    78  
    79  	// Handle is a callback to be invoked when a data packet is received from
    80  	// the remote peer. Only packets not consumed by the protocol handler will
    81  	// be forwarded to the backend.
    82  	Handle(peer *Peer, packet Packet) error
    83  }
    84  
    85  // MakeProtocols constructs the P2P protocol definitions for `snap`.
    86  func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
    87  	// Filter the discovery iterator for nodes advertising snap support.
    88  	dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
    89  		var snap enrEntry
    90  		return n.Load(&snap) == nil
    91  	})
    92  
    93  	protocols := make([]p2p.Protocol, len(ProtocolVersions))
    94  	for i, version := range ProtocolVersions {
    95  		version := version // Closure
    96  
    97  		protocols[i] = p2p.Protocol{
    98  			Name:    ProtocolName,
    99  			Version: version,
   100  			Length:  protocolLengths[version],
   101  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   102  				return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
   103  					return handle(backend, peer)
   104  				})
   105  			},
   106  			NodeInfo: func() interface{} {
   107  				return nodeInfo(backend.Chain())
   108  			},
   109  			PeerInfo: func(id enode.ID) interface{} {
   110  				return backend.PeerInfo(id)
   111  			},
   112  			Attributes:     []enr.Entry{&enrEntry{}},
   113  			DialCandidates: dnsdisc,
   114  		}
   115  	}
   116  	return protocols
   117  }
   118  
   119  // handle is the callback invoked to manage the life cycle of a `snap` peer.
   120  // When this function terminates, the peer is disconnected.
   121  func handle(backend Backend, peer *Peer) error {
   122  	for {
   123  		if err := handleMessage(backend, peer); err != nil {
   124  			peer.Log().Debug("Message handling failed in `snap`", "err", err)
   125  			return err
   126  		}
   127  	}
   128  }
   129  
   130  // handleMessage is invoked whenever an inbound message is received from a
   131  // remote peer on the `snap` protocol. The remote connection is torn down upon
   132  // returning any error.
   133  func handleMessage(backend Backend, peer *Peer) error {
   134  	// Read the next message from the remote peer, and ensure it's fully consumed
   135  	msg, err := peer.rw.ReadMsg()
   136  	if err != nil {
   137  		return err
   138  	}
   139  	if msg.Size > maxMessageSize {
   140  		return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
   141  	}
   142  	defer msg.Discard()
   143  	start := time.Now()
   144  	// Track the emount of time it takes to serve the request and run the handler
   145  	if metrics.Enabled {
   146  		h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
   147  		defer func(start time.Time) {
   148  			sampler := func() metrics.Sample {
   149  				return metrics.ResettingSample(
   150  					metrics.NewExpDecaySample(1028, 0.015),
   151  				)
   152  			}
   153  			metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
   154  		}(start)
   155  	}
   156  	// Handle the message depending on its contents
   157  	switch {
   158  	case msg.Code == GetAccountRangeMsg:
   159  		// Decode the account retrieval request
   160  		var req GetAccountRangePacket
   161  		if err := msg.Decode(&req); err != nil {
   162  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   163  		}
   164  		if req.Bytes > softResponseLimit {
   165  			req.Bytes = softResponseLimit
   166  		}
   167  		// Retrieve the requested state and bail out if non existent
   168  		tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
   169  		if err != nil {
   170  			return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
   171  		}
   172  		it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
   173  		if err != nil {
   174  			return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
   175  		}
   176  		// Iterate over the requested range and pile accounts up
   177  		var (
   178  			accounts []*AccountData
   179  			size     uint64
   180  			last     common.Hash
   181  		)
   182  		for it.Next() && size < req.Bytes {
   183  			hash, account := it.Hash(), common.CopyBytes(it.Account())
   184  
   185  			// Track the returned interval for the Merkle proofs
   186  			last = hash
   187  
   188  			// Assemble the reply item
   189  			size += uint64(common.HashLength + len(account))
   190  			accounts = append(accounts, &AccountData{
   191  				Hash: hash,
   192  				Body: account,
   193  			})
   194  			// If we've exceeded the request threshold, abort
   195  			if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
   196  				break
   197  			}
   198  		}
   199  		it.Release()
   200  
   201  		// Generate the Merkle proofs for the first and last account
   202  		proof := light.NewNodeSet()
   203  		if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
   204  			log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
   205  			return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
   206  		}
   207  		if last != (common.Hash{}) {
   208  			if err := tr.Prove(last[:], 0, proof); err != nil {
   209  				log.Warn("Failed to prove account range", "last", last, "err", err)
   210  				return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
   211  			}
   212  		}
   213  		var proofs [][]byte
   214  		for _, blob := range proof.NodeList() {
   215  			proofs = append(proofs, blob)
   216  		}
   217  		// Send back anything accumulated
   218  		return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
   219  			ID:       req.ID,
   220  			Accounts: accounts,
   221  			Proof:    proofs,
   222  		})
   223  
   224  	case msg.Code == AccountRangeMsg:
   225  		// A range of accounts arrived to one of our previous requests
   226  		res := new(AccountRangePacket)
   227  		if err := msg.Decode(res); err != nil {
   228  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   229  		}
   230  		// Ensure the range is monotonically increasing
   231  		for i := 1; i < len(res.Accounts); i++ {
   232  			if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
   233  				return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
   234  			}
   235  		}
   236  		requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
   237  
   238  		return backend.Handle(peer, res)
   239  
   240  	case msg.Code == GetStorageRangesMsg:
   241  		// Decode the storage retrieval request
   242  		var req GetStorageRangesPacket
   243  		if err := msg.Decode(&req); err != nil {
   244  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   245  		}
   246  		if req.Bytes > softResponseLimit {
   247  			req.Bytes = softResponseLimit
   248  		}
   249  		// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
   250  		// TODO(karalabe):   - Logging locally is not ideal as remote faulst annoy the local user
   251  		// TODO(karalabe):   - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
   252  
   253  		// Calculate the hard limit at which to abort, even if mid storage trie
   254  		hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
   255  
   256  		// Retrieve storage ranges until the packet limit is reached
   257  		var (
   258  			slots  [][]*StorageData
   259  			proofs [][]byte
   260  			size   uint64
   261  		)
   262  		for _, account := range req.Accounts {
   263  			// If we've exceeded the requested data limit, abort without opening
   264  			// a new storage range (that we'd need to prove due to exceeded size)
   265  			if size >= req.Bytes {
   266  				break
   267  			}
   268  			// The first account might start from a different origin and end sooner
   269  			var origin common.Hash
   270  			if len(req.Origin) > 0 {
   271  				origin, req.Origin = common.BytesToHash(req.Origin), nil
   272  			}
   273  			var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   274  			if len(req.Limit) > 0 {
   275  				limit, req.Limit = common.BytesToHash(req.Limit), nil
   276  			}
   277  			// Retrieve the requested state and bail out if non existent
   278  			it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
   279  			if err != nil {
   280  				return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   281  			}
   282  			// Iterate over the requested range and pile slots up
   283  			var (
   284  				storage []*StorageData
   285  				last    common.Hash
   286  				abort   bool
   287  			)
   288  			for it.Next() {
   289  				if size >= hardLimit {
   290  					abort = true
   291  					break
   292  				}
   293  				hash, slot := it.Hash(), common.CopyBytes(it.Slot())
   294  
   295  				// Track the returned interval for the Merkle proofs
   296  				last = hash
   297  
   298  				// Assemble the reply item
   299  				size += uint64(common.HashLength + len(slot))
   300  				storage = append(storage, &StorageData{
   301  					Hash: hash,
   302  					Body: slot,
   303  				})
   304  				// If we've exceeded the request threshold, abort
   305  				if bytes.Compare(hash[:], limit[:]) >= 0 {
   306  					break
   307  				}
   308  			}
   309  			slots = append(slots, storage)
   310  			it.Release()
   311  
   312  			// Generate the Merkle proofs for the first and last storage slot, but
   313  			// only if the response was capped. If the entire storage trie included
   314  			// in the response, no need for any proofs.
   315  			if origin != (common.Hash{}) || abort {
   316  				// Request started at a non-zero hash or was capped prematurely, add
   317  				// the endpoint Merkle proofs
   318  				accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
   319  				if err != nil {
   320  					return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   321  				}
   322  				var acc state.Account
   323  				if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
   324  					return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   325  				}
   326  				stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
   327  				if err != nil {
   328  					return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   329  				}
   330  				proof := light.NewNodeSet()
   331  				if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   332  					log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
   333  					return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   334  				}
   335  				if last != (common.Hash{}) {
   336  					if err := stTrie.Prove(last[:], 0, proof); err != nil {
   337  						log.Warn("Failed to prove storage range", "last", last, "err", err)
   338  						return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
   339  					}
   340  				}
   341  				for _, blob := range proof.NodeList() {
   342  					proofs = append(proofs, blob)
   343  				}
   344  				// Proof terminates the reply as proofs are only added if a node
   345  				// refuses to serve more data (exception when a contract fetch is
   346  				// finishing, but that's that).
   347  				break
   348  			}
   349  		}
   350  		// Send back anything accumulated
   351  		return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
   352  			ID:    req.ID,
   353  			Slots: slots,
   354  			Proof: proofs,
   355  		})
   356  
   357  	case msg.Code == StorageRangesMsg:
   358  		// A range of storage slots arrived to one of our previous requests
   359  		res := new(StorageRangesPacket)
   360  		if err := msg.Decode(res); err != nil {
   361  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   362  		}
   363  		// Ensure the ranges are monotonically increasing
   364  		for i, slots := range res.Slots {
   365  			for j := 1; j < len(slots); j++ {
   366  				if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
   367  					return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
   368  				}
   369  			}
   370  		}
   371  		requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
   372  
   373  		return backend.Handle(peer, res)
   374  
   375  	case msg.Code == GetByteCodesMsg:
   376  		// Decode bytecode retrieval request
   377  		var req GetByteCodesPacket
   378  		if err := msg.Decode(&req); err != nil {
   379  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   380  		}
   381  		if req.Bytes > softResponseLimit {
   382  			req.Bytes = softResponseLimit
   383  		}
   384  		if len(req.Hashes) > maxCodeLookups {
   385  			req.Hashes = req.Hashes[:maxCodeLookups]
   386  		}
   387  		// Retrieve bytecodes until the packet size limit is reached
   388  		var (
   389  			codes [][]byte
   390  			bytes uint64
   391  		)
   392  		for _, hash := range req.Hashes {
   393  			if hash == emptyCode {
   394  				// Peers should not request the empty code, but if they do, at
   395  				// least sent them back a correct response without db lookups
   396  				codes = append(codes, []byte{})
   397  			} else if blob, err := backend.Chain().ContractCode(hash); err == nil {
   398  				codes = append(codes, blob)
   399  				bytes += uint64(len(blob))
   400  			}
   401  			if bytes > req.Bytes {
   402  				break
   403  			}
   404  		}
   405  		// Send back anything accumulated
   406  		return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
   407  			ID:    req.ID,
   408  			Codes: codes,
   409  		})
   410  
   411  	case msg.Code == ByteCodesMsg:
   412  		// A batch of byte codes arrived to one of our previous requests
   413  		res := new(ByteCodesPacket)
   414  		if err := msg.Decode(res); err != nil {
   415  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   416  		}
   417  		requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
   418  
   419  		return backend.Handle(peer, res)
   420  
   421  	case msg.Code == GetTrieNodesMsg:
   422  		// Decode trie node retrieval request
   423  		var req GetTrieNodesPacket
   424  		if err := msg.Decode(&req); err != nil {
   425  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   426  		}
   427  		if req.Bytes > softResponseLimit {
   428  			req.Bytes = softResponseLimit
   429  		}
   430  		// Make sure we have the state associated with the request
   431  		triedb := backend.Chain().StateCache().TrieDB()
   432  
   433  		accTrie, err := trie.NewSecure(req.Root, triedb)
   434  		if err != nil {
   435  			// We don't have the requested state available, bail out
   436  			return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
   437  		}
   438  		snap := backend.Chain().Snapshots().Snapshot(req.Root)
   439  		if snap == nil {
   440  			// We don't have the requested state snapshotted yet, bail out.
   441  			// In reality we could still serve using the account and storage
   442  			// tries only, but let's protect the node a bit while it's doing
   443  			// snapshot generation.
   444  			return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
   445  		}
   446  		// Retrieve trie nodes until the packet size limit is reached
   447  		var (
   448  			nodes [][]byte
   449  			bytes uint64
   450  			loads int // Trie hash expansions to cound database reads
   451  		)
   452  		for _, pathset := range req.Paths {
   453  			switch len(pathset) {
   454  			case 0:
   455  				// Ensure we penalize invalid requests
   456  				return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
   457  
   458  			case 1:
   459  				// If we're only retrieving an account trie node, fetch it directly
   460  				blob, resolved, err := accTrie.TryGetNode(pathset[0])
   461  				loads += resolved // always account database reads, even for failures
   462  				if err != nil {
   463  					break
   464  				}
   465  				nodes = append(nodes, blob)
   466  				bytes += uint64(len(blob))
   467  
   468  			default:
   469  				// Storage slots requested, open the storage trie and retrieve from there
   470  				account, err := snap.Account(common.BytesToHash(pathset[0]))
   471  				loads++ // always account database reads, even for failures
   472  				if err != nil {
   473  					break
   474  				}
   475  				stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
   476  				loads++ // always account database reads, even for failures
   477  				if err != nil {
   478  					break
   479  				}
   480  				for _, path := range pathset[1:] {
   481  					blob, resolved, err := stTrie.TryGetNode(path)
   482  					loads += resolved // always account database reads, even for failures
   483  					if err != nil {
   484  						break
   485  					}
   486  					nodes = append(nodes, blob)
   487  					bytes += uint64(len(blob))
   488  
   489  					// Sanity check limits to avoid DoS on the store trie loads
   490  					if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   491  						break
   492  					}
   493  				}
   494  			}
   495  			// Abort request processing if we've exceeded our limits
   496  			if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   497  				break
   498  			}
   499  		}
   500  		// Send back anything accumulated
   501  		return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
   502  			ID:    req.ID,
   503  			Nodes: nodes,
   504  		})
   505  
   506  	case msg.Code == TrieNodesMsg:
   507  		// A batch of trie nodes arrived to one of our previous requests
   508  		res := new(TrieNodesPacket)
   509  		if err := msg.Decode(res); err != nil {
   510  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   511  		}
   512  		requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
   513  
   514  		return backend.Handle(peer, res)
   515  
   516  	default:
   517  		return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
   518  	}
   519  }
   520  
   521  // NodeInfo represents a short summary of the `snap` sub-protocol metadata
   522  // known about the host peer.
   523  type NodeInfo struct{}
   524  
   525  // nodeInfo retrieves some `snap` protocol metadata about the running host node.
   526  func nodeInfo(chain *core.BlockChain) *NodeInfo {
   527  	return &NodeInfo{}
   528  }