github.com/ethereum/go-ethereum@v1.16.1/eth/protocols/snap/handler.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/core"
    26  	"github.com/ethereum/go-ethereum/core/rawdb"
    27  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    28  	"github.com/ethereum/go-ethereum/core/types"
    29  	"github.com/ethereum/go-ethereum/log"
    30  	"github.com/ethereum/go-ethereum/metrics"
    31  	"github.com/ethereum/go-ethereum/p2p"
    32  	"github.com/ethereum/go-ethereum/p2p/enode"
    33  	"github.com/ethereum/go-ethereum/p2p/enr"
    34  	"github.com/ethereum/go-ethereum/trie"
    35  	"github.com/ethereum/go-ethereum/trie/trienode"
    36  	"github.com/ethereum/go-ethereum/triedb/database"
    37  )
    38  
    39  const (
    40  	// softResponseLimit is the target maximum size of replies to data retrievals.
    41  	softResponseLimit = 2 * 1024 * 1024
    42  
    43  	// maxCodeLookups is the maximum number of bytecodes to serve. This number is
    44  	// there to limit the number of disk lookups.
    45  	maxCodeLookups = 1024
    46  
    47  	// stateLookupSlack defines the ratio by how much a state response can exceed
    48  	// the requested limit in order to try and avoid breaking up contracts into
    49  	// multiple packages and proving them.
    50  	stateLookupSlack = 0.1
    51  
    52  	// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
    53  	// number is there to limit the number of disk lookups.
    54  	maxTrieNodeLookups = 1024
    55  
    56  	// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
    57  	// If we spend too much time, then it's a fairly high chance of timing out
    58  	// at the remote side, which means all the work is in vain.
    59  	maxTrieNodeTimeSpent = 5 * time.Second
    60  )
    61  
    62  // Handler is a callback to invoke from an outside runner after the boilerplate
    63  // exchanges have passed.
    64  type Handler func(peer *Peer) error
    65  
    66  // Backend defines the data retrieval methods to serve remote requests and the
    67  // callback methods to invoke on remote deliveries.
    68  type Backend interface {
    69  	// Chain retrieves the blockchain object to serve data.
    70  	Chain() *core.BlockChain
    71  
    72  	// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
    73  	// should do any peer maintenance work, handshakes and validations. If all
    74  	// is passed, control should be given back to the `handler` to process the
    75  	// inbound messages going forward.
    76  	RunPeer(peer *Peer, handler Handler) error
    77  
    78  	// PeerInfo retrieves all known `snap` information about a peer.
    79  	PeerInfo(id enode.ID) interface{}
    80  
    81  	// Handle is a callback to be invoked when a data packet is received from
    82  	// the remote peer. Only packets not consumed by the protocol handler will
    83  	// be forwarded to the backend.
    84  	Handle(peer *Peer, packet Packet) error
    85  }
    86  
    87  // MakeProtocols constructs the P2P protocol definitions for `snap`.
    88  func MakeProtocols(backend Backend) []p2p.Protocol {
    89  	protocols := make([]p2p.Protocol, len(ProtocolVersions))
    90  	for i, version := range ProtocolVersions {
    91  		protocols[i] = p2p.Protocol{
    92  			Name:    ProtocolName,
    93  			Version: version,
    94  			Length:  protocolLengths[version],
    95  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
    96  				return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
    97  					return Handle(backend, peer)
    98  				})
    99  			},
   100  			NodeInfo: func() interface{} {
   101  				return nodeInfo(backend.Chain())
   102  			},
   103  			PeerInfo: func(id enode.ID) interface{} {
   104  				return backend.PeerInfo(id)
   105  			},
   106  			Attributes: []enr.Entry{&enrEntry{}},
   107  		}
   108  	}
   109  	return protocols
   110  }
   111  
   112  // Handle is the callback invoked to manage the life cycle of a `snap` peer.
   113  // When this function terminates, the peer is disconnected.
   114  func Handle(backend Backend, peer *Peer) error {
   115  	for {
   116  		if err := HandleMessage(backend, peer); err != nil {
   117  			peer.Log().Debug("Message handling failed in `snap`", "err", err)
   118  			return err
   119  		}
   120  	}
   121  }
   122  
   123  // HandleMessage is invoked whenever an inbound message is received from a
   124  // remote peer on the `snap` protocol. The remote connection is torn down upon
   125  // returning any error.
   126  func HandleMessage(backend Backend, peer *Peer) error {
   127  	// Read the next message from the remote peer, and ensure it's fully consumed
   128  	msg, err := peer.rw.ReadMsg()
   129  	if err != nil {
   130  		return err
   131  	}
   132  	if msg.Size > maxMessageSize {
   133  		return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
   134  	}
   135  	defer msg.Discard()
   136  	start := time.Now()
   137  	// Track the amount of time it takes to serve the request and run the handler
   138  	if metrics.Enabled() {
   139  		h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
   140  		defer func(start time.Time) {
   141  			sampler := func() metrics.Sample {
   142  				return metrics.ResettingSample(
   143  					metrics.NewExpDecaySample(1028, 0.015),
   144  				)
   145  			}
   146  			metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
   147  		}(start)
   148  	}
   149  	// Handle the message depending on its contents
   150  	switch {
   151  	case msg.Code == GetAccountRangeMsg:
   152  		// Decode the account retrieval request
   153  		var req GetAccountRangePacket
   154  		if err := msg.Decode(&req); err != nil {
   155  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   156  		}
   157  		// Service the request, potentially returning nothing in case of errors
   158  		accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
   159  
   160  		// Send back anything accumulated (or empty in case of errors)
   161  		return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
   162  			ID:       req.ID,
   163  			Accounts: accounts,
   164  			Proof:    proofs,
   165  		})
   166  
   167  	case msg.Code == AccountRangeMsg:
   168  		// A range of accounts arrived to one of our previous requests
   169  		res := new(AccountRangePacket)
   170  		if err := msg.Decode(res); err != nil {
   171  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   172  		}
   173  		// Ensure the range is monotonically increasing
   174  		for i := 1; i < len(res.Accounts); i++ {
   175  			if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
   176  				return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
   177  			}
   178  		}
   179  		requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
   180  
   181  		return backend.Handle(peer, res)
   182  
   183  	case msg.Code == GetStorageRangesMsg:
   184  		// Decode the storage retrieval request
   185  		var req GetStorageRangesPacket
   186  		if err := msg.Decode(&req); err != nil {
   187  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   188  		}
   189  		// Service the request, potentially returning nothing in case of errors
   190  		slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
   191  
   192  		// Send back anything accumulated (or empty in case of errors)
   193  		return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
   194  			ID:    req.ID,
   195  			Slots: slots,
   196  			Proof: proofs,
   197  		})
   198  
   199  	case msg.Code == StorageRangesMsg:
   200  		// A range of storage slots arrived to one of our previous requests
   201  		res := new(StorageRangesPacket)
   202  		if err := msg.Decode(res); err != nil {
   203  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   204  		}
   205  		// Ensure the ranges are monotonically increasing
   206  		for i, slots := range res.Slots {
   207  			for j := 1; j < len(slots); j++ {
   208  				if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
   209  					return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
   210  				}
   211  			}
   212  		}
   213  		requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
   214  
   215  		return backend.Handle(peer, res)
   216  
   217  	case msg.Code == GetByteCodesMsg:
   218  		// Decode bytecode retrieval request
   219  		var req GetByteCodesPacket
   220  		if err := msg.Decode(&req); err != nil {
   221  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   222  		}
   223  		// Service the request, potentially returning nothing in case of errors
   224  		codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
   225  
   226  		// Send back anything accumulated (or empty in case of errors)
   227  		return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
   228  			ID:    req.ID,
   229  			Codes: codes,
   230  		})
   231  
   232  	case msg.Code == ByteCodesMsg:
   233  		// A batch of byte codes arrived to one of our previous requests
   234  		res := new(ByteCodesPacket)
   235  		if err := msg.Decode(res); err != nil {
   236  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   237  		}
   238  		requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
   239  
   240  		return backend.Handle(peer, res)
   241  
   242  	case msg.Code == GetTrieNodesMsg:
   243  		// Decode trie node retrieval request
   244  		var req GetTrieNodesPacket
   245  		if err := msg.Decode(&req); err != nil {
   246  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   247  		}
   248  		// Service the request, potentially returning nothing in case of errors
   249  		nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
   250  		if err != nil {
   251  			return err
   252  		}
   253  		// Send back anything accumulated (or empty in case of errors)
   254  		return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
   255  			ID:    req.ID,
   256  			Nodes: nodes,
   257  		})
   258  
   259  	case msg.Code == TrieNodesMsg:
   260  		// A batch of trie nodes arrived to one of our previous requests
   261  		res := new(TrieNodesPacket)
   262  		if err := msg.Decode(res); err != nil {
   263  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   264  		}
   265  		requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
   266  
   267  		return backend.Handle(peer, res)
   268  
   269  	default:
   270  		return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
   271  	}
   272  }
   273  
   274  // ServiceGetAccountRangeQuery assembles the response to an account range query.
   275  // It is exposed to allow external packages to test protocol behavior.
   276  func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
   277  	if req.Bytes > softResponseLimit {
   278  		req.Bytes = softResponseLimit
   279  	}
   280  	// Retrieve the requested state and bail out if non existent
   281  	tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB())
   282  	if err != nil {
   283  		return nil, nil
   284  	}
   285  	// Temporary solution: using the snapshot interface for both cases.
   286  	// This can be removed once the hash scheme is deprecated.
   287  	var it snapshot.AccountIterator
   288  	if chain.TrieDB().Scheme() == rawdb.HashScheme {
   289  		// The snapshot is assumed to be available in hash mode if
   290  		// the SNAP protocol is enabled.
   291  		it, err = chain.Snapshots().AccountIterator(req.Root, req.Origin)
   292  	} else {
   293  		it, err = chain.TrieDB().AccountIterator(req.Root, req.Origin)
   294  	}
   295  	if err != nil {
   296  		return nil, nil
   297  	}
   298  	// Iterate over the requested range and pile accounts up
   299  	var (
   300  		accounts []*AccountData
   301  		size     uint64
   302  		last     common.Hash
   303  	)
   304  	for it.Next() {
   305  		hash, account := it.Hash(), common.CopyBytes(it.Account())
   306  
   307  		// Track the returned interval for the Merkle proofs
   308  		last = hash
   309  
   310  		// Assemble the reply item
   311  		size += uint64(common.HashLength + len(account))
   312  		accounts = append(accounts, &AccountData{
   313  			Hash: hash,
   314  			Body: account,
   315  		})
   316  		// If we've exceeded the request threshold, abort
   317  		if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
   318  			break
   319  		}
   320  		if size > req.Bytes {
   321  			break
   322  		}
   323  	}
   324  	it.Release()
   325  
   326  	// Generate the Merkle proofs for the first and last account
   327  	proof := trienode.NewProofSet()
   328  	if err := tr.Prove(req.Origin[:], proof); err != nil {
   329  		log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
   330  		return nil, nil
   331  	}
   332  	if last != (common.Hash{}) {
   333  		if err := tr.Prove(last[:], proof); err != nil {
   334  			log.Warn("Failed to prove account range", "last", last, "err", err)
   335  			return nil, nil
   336  		}
   337  	}
   338  	return accounts, proof.List()
   339  }
   340  
   341  func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
   342  	if req.Bytes > softResponseLimit {
   343  		req.Bytes = softResponseLimit
   344  	}
   345  	// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
   346  	// TODO(karalabe):   - Logging locally is not ideal as remote faults annoy the local user
   347  	// TODO(karalabe):   - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
   348  
   349  	// Calculate the hard limit at which to abort, even if mid storage trie
   350  	hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
   351  
   352  	// Retrieve storage ranges until the packet limit is reached
   353  	var (
   354  		slots  [][]*StorageData
   355  		proofs [][]byte
   356  		size   uint64
   357  	)
   358  	for _, account := range req.Accounts {
   359  		// If we've exceeded the requested data limit, abort without opening
   360  		// a new storage range (that we'd need to prove due to exceeded size)
   361  		if size >= req.Bytes {
   362  			break
   363  		}
   364  		// The first account might start from a different origin and end sooner
   365  		var origin common.Hash
   366  		if len(req.Origin) > 0 {
   367  			origin, req.Origin = common.BytesToHash(req.Origin), nil
   368  		}
   369  		var limit = common.MaxHash
   370  		if len(req.Limit) > 0 {
   371  			limit, req.Limit = common.BytesToHash(req.Limit), nil
   372  		}
   373  		// Retrieve the requested state and bail out if non existent
   374  		var (
   375  			err error
   376  			it  snapshot.StorageIterator
   377  		)
   378  		// Temporary solution: using the snapshot interface for both cases.
   379  		// This can be removed once the hash scheme is deprecated.
   380  		if chain.TrieDB().Scheme() == rawdb.HashScheme {
   381  			// The snapshot is assumed to be available in hash mode if
   382  			// the SNAP protocol is enabled.
   383  			it, err = chain.Snapshots().StorageIterator(req.Root, account, origin)
   384  		} else {
   385  			it, err = chain.TrieDB().StorageIterator(req.Root, account, origin)
   386  		}
   387  		if err != nil {
   388  			return nil, nil
   389  		}
   390  		// Iterate over the requested range and pile slots up
   391  		var (
   392  			storage []*StorageData
   393  			last    common.Hash
   394  			abort   bool
   395  		)
   396  		for it.Next() {
   397  			if size >= hardLimit {
   398  				abort = true
   399  				break
   400  			}
   401  			hash, slot := it.Hash(), common.CopyBytes(it.Slot())
   402  
   403  			// Track the returned interval for the Merkle proofs
   404  			last = hash
   405  
   406  			// Assemble the reply item
   407  			size += uint64(common.HashLength + len(slot))
   408  			storage = append(storage, &StorageData{
   409  				Hash: hash,
   410  				Body: slot,
   411  			})
   412  			// If we've exceeded the request threshold, abort
   413  			if bytes.Compare(hash[:], limit[:]) >= 0 {
   414  				break
   415  			}
   416  		}
   417  		if len(storage) > 0 {
   418  			slots = append(slots, storage)
   419  		}
   420  		it.Release()
   421  
   422  		// Generate the Merkle proofs for the first and last storage slot, but
   423  		// only if the response was capped. If the entire storage trie included
   424  		// in the response, no need for any proofs.
   425  		if origin != (common.Hash{}) || (abort && len(storage) > 0) {
   426  			// Request started at a non-zero hash or was capped prematurely, add
   427  			// the endpoint Merkle proofs
   428  			accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB())
   429  			if err != nil {
   430  				return nil, nil
   431  			}
   432  			acc, err := accTrie.GetAccountByHash(account)
   433  			if err != nil || acc == nil {
   434  				return nil, nil
   435  			}
   436  			id := trie.StorageTrieID(req.Root, account, acc.Root)
   437  			stTrie, err := trie.NewStateTrie(id, chain.TrieDB())
   438  			if err != nil {
   439  				return nil, nil
   440  			}
   441  			proof := trienode.NewProofSet()
   442  			if err := stTrie.Prove(origin[:], proof); err != nil {
   443  				log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
   444  				return nil, nil
   445  			}
   446  			if last != (common.Hash{}) {
   447  				if err := stTrie.Prove(last[:], proof); err != nil {
   448  					log.Warn("Failed to prove storage range", "last", last, "err", err)
   449  					return nil, nil
   450  				}
   451  			}
   452  			proofs = append(proofs, proof.List()...)
   453  			// Proof terminates the reply as proofs are only added if a node
   454  			// refuses to serve more data (exception when a contract fetch is
   455  			// finishing, but that's that).
   456  			break
   457  		}
   458  	}
   459  	return slots, proofs
   460  }
   461  
   462  // ServiceGetByteCodesQuery assembles the response to a byte codes query.
   463  // It is exposed to allow external packages to test protocol behavior.
   464  func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
   465  	if req.Bytes > softResponseLimit {
   466  		req.Bytes = softResponseLimit
   467  	}
   468  	if len(req.Hashes) > maxCodeLookups {
   469  		req.Hashes = req.Hashes[:maxCodeLookups]
   470  	}
   471  	// Retrieve bytecodes until the packet size limit is reached
   472  	var (
   473  		codes [][]byte
   474  		bytes uint64
   475  	)
   476  	for _, hash := range req.Hashes {
   477  		if hash == types.EmptyCodeHash {
   478  			// Peers should not request the empty code, but if they do, at
   479  			// least sent them back a correct response without db lookups
   480  			codes = append(codes, []byte{})
   481  		} else if blob := chain.ContractCodeWithPrefix(hash); len(blob) > 0 {
   482  			codes = append(codes, blob)
   483  			bytes += uint64(len(blob))
   484  		}
   485  		if bytes > req.Bytes {
   486  			break
   487  		}
   488  	}
   489  	return codes
   490  }
   491  
   492  // ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
   493  // It is exposed to allow external packages to test protocol behavior.
   494  func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
   495  	if req.Bytes > softResponseLimit {
   496  		req.Bytes = softResponseLimit
   497  	}
   498  	// Make sure we have the state associated with the request
   499  	triedb := chain.TrieDB()
   500  
   501  	accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb)
   502  	if err != nil {
   503  		// We don't have the requested state available, bail out
   504  		return nil, nil
   505  	}
   506  	// The 'reader' might be nil, in which case we cannot serve storage slots
   507  	// via snapshot.
   508  	var reader database.StateReader
   509  	if chain.Snapshots() != nil {
   510  		reader = chain.Snapshots().Snapshot(req.Root)
   511  	}
   512  	if reader == nil {
   513  		reader, _ = triedb.StateReader(req.Root)
   514  	}
   515  	// Retrieve trie nodes until the packet size limit is reached
   516  	var (
   517  		nodes [][]byte
   518  		bytes uint64
   519  		loads int // Trie hash expansions to count database reads
   520  	)
   521  	for _, pathset := range req.Paths {
   522  		switch len(pathset) {
   523  		case 0:
   524  			// Ensure we penalize invalid requests
   525  			return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
   526  
   527  		case 1:
   528  			// If we're only retrieving an account trie node, fetch it directly
   529  			blob, resolved, err := accTrie.GetNode(pathset[0])
   530  			loads += resolved // always account database reads, even for failures
   531  			if err != nil {
   532  				break
   533  			}
   534  			nodes = append(nodes, blob)
   535  			bytes += uint64(len(blob))
   536  
   537  		default:
   538  			var stRoot common.Hash
   539  
   540  			// Storage slots requested, open the storage trie and retrieve from there
   541  			if reader == nil {
   542  				// We don't have the requested state snapshotted yet (or it is stale),
   543  				// but can look up the account via the trie instead.
   544  				account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0]))
   545  				loads += 8 // We don't know the exact cost of lookup, this is an estimate
   546  				if err != nil || account == nil {
   547  					break
   548  				}
   549  				stRoot = account.Root
   550  			} else {
   551  				account, err := reader.Account(common.BytesToHash(pathset[0]))
   552  				loads++ // always account database reads, even for failures
   553  				if err != nil || account == nil {
   554  					break
   555  				}
   556  				stRoot = common.BytesToHash(account.Root)
   557  			}
   558  			id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot)
   559  			stTrie, err := trie.NewStateTrie(id, triedb)
   560  			loads++ // always account database reads, even for failures
   561  			if err != nil {
   562  				break
   563  			}
   564  			for _, path := range pathset[1:] {
   565  				blob, resolved, err := stTrie.GetNode(path)
   566  				loads += resolved // always account database reads, even for failures
   567  				if err != nil {
   568  					break
   569  				}
   570  				nodes = append(nodes, blob)
   571  				bytes += uint64(len(blob))
   572  
   573  				// Sanity check limits to avoid DoS on the store trie loads
   574  				if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   575  					break
   576  				}
   577  			}
   578  		}
   579  		// Abort request processing if we've exceeded our limits
   580  		if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   581  			break
   582  		}
   583  	}
   584  	return nodes, nil
   585  }
   586  
   587  // NodeInfo represents a short summary of the `snap` sub-protocol metadata
   588  // known about the host peer.
   589  type NodeInfo struct{}
   590  
   591  // nodeInfo retrieves some `snap` protocol metadata about the running host node.
   592  func nodeInfo(chain *core.BlockChain) *NodeInfo {
   593  	return &NodeInfo{}
   594  }