github.com/theQRL/go-zond@v0.1.1/zond/protocols/snap/handler.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/theQRL/go-zond/common"
    25  	"github.com/theQRL/go-zond/core"
    26  	"github.com/theQRL/go-zond/core/types"
    27  	"github.com/theQRL/go-zond/light"
    28  	"github.com/theQRL/go-zond/log"
    29  	"github.com/theQRL/go-zond/metrics"
    30  	"github.com/theQRL/go-zond/p2p"
    31  	"github.com/theQRL/go-zond/p2p/enode"
    32  	"github.com/theQRL/go-zond/p2p/enr"
    33  	"github.com/theQRL/go-zond/trie"
    34  )
    35  
    36  const (
    37  	// softResponseLimit is the target maximum size of replies to data retrievals.
    38  	softResponseLimit = 2 * 1024 * 1024
    39  
    40  	// maxCodeLookups is the maximum number of bytecodes to serve. This number is
    41  	// there to limit the number of disk lookups.
    42  	maxCodeLookups = 1024
    43  
    44  	// stateLookupSlack defines the ratio by how much a state response can exceed
    45  	// the requested limit in order to try and avoid breaking up contracts into
    46  	// multiple packages and proving them.
    47  	stateLookupSlack = 0.1
    48  
    49  	// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
    50  	// number is there to limit the number of disk lookups.
    51  	maxTrieNodeLookups = 1024
    52  
    53  	// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
    54  	// If we spend too much time, then it's a fairly high chance of timing out
    55  	// at the remote side, which means all the work is in vain.
    56  	maxTrieNodeTimeSpent = 5 * time.Second
    57  )
    58  
    59  // Handler is a callback to invoke from an outside runner after the boilerplate
    60  // exchanges have passed.
    61  type Handler func(peer *Peer) error
    62  
    63  // Backend defines the data retrieval methods to serve remote requests and the
    64  // callback methods to invoke on remote deliveries.
    65  type Backend interface {
    66  	// Chain retrieves the blockchain object to serve data.
    67  	Chain() *core.BlockChain
    68  
    69  	// RunPeer is invoked when a peer joins on the `zond` protocol. The handler
    70  	// should do any peer maintenance work, handshakes and validations. If all
    71  	// is passed, control should be given back to the `handler` to process the
    72  	// inbound messages going forward.
    73  	RunPeer(peer *Peer, handler Handler) error
    74  
    75  	// PeerInfo retrieves all known `snap` information about a peer.
    76  	PeerInfo(id enode.ID) interface{}
    77  
    78  	// Handle is a callback to be invoked when a data packet is received from
    79  	// the remote peer. Only packets not consumed by the protocol handler will
    80  	// be forwarded to the backend.
    81  	Handle(peer *Peer, packet Packet) error
    82  }
    83  
    84  // MakeProtocols constructs the P2P protocol definitions for `snap`.
    85  func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
    86  	// Filter the discovery iterator for nodes advertising snap support.
    87  	dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
    88  		var snap enrEntry
    89  		return n.Load(&snap) == nil
    90  	})
    91  
    92  	protocols := make([]p2p.Protocol, len(ProtocolVersions))
    93  	for i, version := range ProtocolVersions {
    94  		version := version // Closure
    95  
    96  		protocols[i] = p2p.Protocol{
    97  			Name:    ProtocolName,
    98  			Version: version,
    99  			Length:  protocolLengths[version],
   100  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   101  				return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
   102  					return Handle(backend, peer)
   103  				})
   104  			},
   105  			NodeInfo: func() interface{} {
   106  				return nodeInfo(backend.Chain())
   107  			},
   108  			PeerInfo: func(id enode.ID) interface{} {
   109  				return backend.PeerInfo(id)
   110  			},
   111  			Attributes:     []enr.Entry{&enrEntry{}},
   112  			DialCandidates: dnsdisc,
   113  		}
   114  	}
   115  	return protocols
   116  }
   117  
   118  // Handle is the callback invoked to manage the life cycle of a `snap` peer.
   119  // When this function terminates, the peer is disconnected.
   120  func Handle(backend Backend, peer *Peer) error {
   121  	for {
   122  		if err := HandleMessage(backend, peer); err != nil {
   123  			peer.Log().Debug("Message handling failed in `snap`", "err", err)
   124  			return err
   125  		}
   126  	}
   127  }
   128  
   129  // HandleMessage is invoked whenever an inbound message is received from a
   130  // remote peer on the `snap` protocol. The remote connection is torn down upon
   131  // returning any error.
   132  func HandleMessage(backend Backend, peer *Peer) error {
   133  	// Read the next message from the remote peer, and ensure it's fully consumed
   134  	msg, err := peer.rw.ReadMsg()
   135  	if err != nil {
   136  		return err
   137  	}
   138  	if msg.Size > maxMessageSize {
   139  		return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
   140  	}
   141  	defer msg.Discard()
   142  	start := time.Now()
   143  	// Track the amount of time it takes to serve the request and run the handler
   144  	if metrics.Enabled {
   145  		h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
   146  		defer func(start time.Time) {
   147  			sampler := func() metrics.Sample {
   148  				return metrics.ResettingSample(
   149  					metrics.NewExpDecaySample(1028, 0.015),
   150  				)
   151  			}
   152  			metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
   153  		}(start)
   154  	}
   155  	// Handle the message depending on its contents
   156  	switch {
   157  	case msg.Code == GetAccountRangeMsg:
   158  		// Decode the account retrieval request
   159  		var req GetAccountRangePacket
   160  		if err := msg.Decode(&req); err != nil {
   161  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   162  		}
   163  		// Service the request, potentially returning nothing in case of errors
   164  		accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
   165  
   166  		// Send back anything accumulated (or empty in case of errors)
   167  		return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
   168  			ID:       req.ID,
   169  			Accounts: accounts,
   170  			Proof:    proofs,
   171  		})
   172  
   173  	case msg.Code == AccountRangeMsg:
   174  		// A range of accounts arrived to one of our previous requests
   175  		res := new(AccountRangePacket)
   176  		if err := msg.Decode(res); err != nil {
   177  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   178  		}
   179  		// Ensure the range is monotonically increasing
   180  		for i := 1; i < len(res.Accounts); i++ {
   181  			if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
   182  				return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
   183  			}
   184  		}
   185  		requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
   186  
   187  		return backend.Handle(peer, res)
   188  
   189  	case msg.Code == GetStorageRangesMsg:
   190  		// Decode the storage retrieval request
   191  		var req GetStorageRangesPacket
   192  		if err := msg.Decode(&req); err != nil {
   193  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   194  		}
   195  		// Service the request, potentially returning nothing in case of errors
   196  		slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
   197  
   198  		// Send back anything accumulated (or empty in case of errors)
   199  		return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
   200  			ID:    req.ID,
   201  			Slots: slots,
   202  			Proof: proofs,
   203  		})
   204  
   205  	case msg.Code == StorageRangesMsg:
   206  		// A range of storage slots arrived to one of our previous requests
   207  		res := new(StorageRangesPacket)
   208  		if err := msg.Decode(res); err != nil {
   209  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   210  		}
   211  		// Ensure the ranges are monotonically increasing
   212  		for i, slots := range res.Slots {
   213  			for j := 1; j < len(slots); j++ {
   214  				if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
   215  					return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
   216  				}
   217  			}
   218  		}
   219  		requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
   220  
   221  		return backend.Handle(peer, res)
   222  
   223  	case msg.Code == GetByteCodesMsg:
   224  		// Decode bytecode retrieval request
   225  		var req GetByteCodesPacket
   226  		if err := msg.Decode(&req); err != nil {
   227  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   228  		}
   229  		// Service the request, potentially returning nothing in case of errors
   230  		codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
   231  
   232  		// Send back anything accumulated (or empty in case of errors)
   233  		return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
   234  			ID:    req.ID,
   235  			Codes: codes,
   236  		})
   237  
   238  	case msg.Code == ByteCodesMsg:
   239  		// A batch of byte codes arrived to one of our previous requests
   240  		res := new(ByteCodesPacket)
   241  		if err := msg.Decode(res); err != nil {
   242  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   243  		}
   244  		requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
   245  
   246  		return backend.Handle(peer, res)
   247  
   248  	case msg.Code == GetTrieNodesMsg:
   249  		// Decode trie node retrieval request
   250  		var req GetTrieNodesPacket
   251  		if err := msg.Decode(&req); err != nil {
   252  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   253  		}
   254  		// Service the request, potentially returning nothing in case of errors
   255  		nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
   256  		if err != nil {
   257  			return err
   258  		}
   259  		// Send back anything accumulated (or empty in case of errors)
   260  		return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
   261  			ID:    req.ID,
   262  			Nodes: nodes,
   263  		})
   264  
   265  	case msg.Code == TrieNodesMsg:
   266  		// A batch of trie nodes arrived to one of our previous requests
   267  		res := new(TrieNodesPacket)
   268  		if err := msg.Decode(res); err != nil {
   269  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   270  		}
   271  		requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
   272  
   273  		return backend.Handle(peer, res)
   274  
   275  	default:
   276  		return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
   277  	}
   278  }
   279  
   280  // ServiceGetAccountRangeQuery assembles the response to an account range query.
   281  // It is exposed to allow external packages to test protocol behavior.
   282  func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
   283  	if req.Bytes > softResponseLimit {
   284  		req.Bytes = softResponseLimit
   285  	}
   286  	// Retrieve the requested state and bail out if non existent
   287  	tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB())
   288  	if err != nil {
   289  		return nil, nil
   290  	}
   291  	it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin)
   292  	if err != nil {
   293  		return nil, nil
   294  	}
   295  	// Iterate over the requested range and pile accounts up
   296  	var (
   297  		accounts []*AccountData
   298  		size     uint64
   299  		last     common.Hash
   300  	)
   301  	for it.Next() {
   302  		hash, account := it.Hash(), common.CopyBytes(it.Account())
   303  
   304  		// Track the returned interval for the Merkle proofs
   305  		last = hash
   306  
   307  		// Assemble the reply item
   308  		size += uint64(common.HashLength + len(account))
   309  		accounts = append(accounts, &AccountData{
   310  			Hash: hash,
   311  			Body: account,
   312  		})
   313  		// If we've exceeded the request threshold, abort
   314  		if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
   315  			break
   316  		}
   317  		if size > req.Bytes {
   318  			break
   319  		}
   320  	}
   321  	it.Release()
   322  
   323  	// Generate the Merkle proofs for the first and last account
   324  	proof := light.NewNodeSet()
   325  	if err := tr.Prove(req.Origin[:], proof); err != nil {
   326  		log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
   327  		return nil, nil
   328  	}
   329  	if last != (common.Hash{}) {
   330  		if err := tr.Prove(last[:], proof); err != nil {
   331  			log.Warn("Failed to prove account range", "last", last, "err", err)
   332  			return nil, nil
   333  		}
   334  	}
   335  	var proofs [][]byte
   336  	for _, blob := range proof.NodeList() {
   337  		proofs = append(proofs, blob)
   338  	}
   339  	return accounts, proofs
   340  }
   341  
   342  func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
   343  	if req.Bytes > softResponseLimit {
   344  		req.Bytes = softResponseLimit
   345  	}
   346  	// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
   347  	// TODO(karalabe):   - Logging locally is not ideal as remote faults annoy the local user
   348  	// TODO(karalabe):   - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
   349  
   350  	// Calculate the hard limit at which to abort, even if mid storage trie
   351  	hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
   352  
   353  	// Retrieve storage ranges until the packet limit is reached
   354  	var (
   355  		slots  [][]*StorageData
   356  		proofs [][]byte
   357  		size   uint64
   358  	)
   359  	for _, account := range req.Accounts {
   360  		// If we've exceeded the requested data limit, abort without opening
   361  		// a new storage range (that we'd need to prove due to exceeded size)
   362  		if size >= req.Bytes {
   363  			break
   364  		}
   365  		// The first account might start from a different origin and end sooner
   366  		var origin common.Hash
   367  		if len(req.Origin) > 0 {
   368  			origin, req.Origin = common.BytesToHash(req.Origin), nil
   369  		}
   370  		var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   371  		if len(req.Limit) > 0 {
   372  			limit, req.Limit = common.BytesToHash(req.Limit), nil
   373  		}
   374  		// Retrieve the requested state and bail out if non existent
   375  		it, err := chain.Snapshots().StorageIterator(req.Root, account, origin)
   376  		if err != nil {
   377  			return nil, nil
   378  		}
   379  		// Iterate over the requested range and pile slots up
   380  		var (
   381  			storage []*StorageData
   382  			last    common.Hash
   383  			abort   bool
   384  		)
   385  		for it.Next() {
   386  			if size >= hardLimit {
   387  				abort = true
   388  				break
   389  			}
   390  			hash, slot := it.Hash(), common.CopyBytes(it.Slot())
   391  
   392  			// Track the returned interval for the Merkle proofs
   393  			last = hash
   394  
   395  			// Assemble the reply item
   396  			size += uint64(common.HashLength + len(slot))
   397  			storage = append(storage, &StorageData{
   398  				Hash: hash,
   399  				Body: slot,
   400  			})
   401  			// If we've exceeded the request threshold, abort
   402  			if bytes.Compare(hash[:], limit[:]) >= 0 {
   403  				break
   404  			}
   405  		}
   406  		if len(storage) > 0 {
   407  			slots = append(slots, storage)
   408  		}
   409  		it.Release()
   410  
   411  		// Generate the Merkle proofs for the first and last storage slot, but
   412  		// only if the response was capped. If the entire storage trie included
   413  		// in the response, no need for any proofs.
   414  		if origin != (common.Hash{}) || (abort && len(storage) > 0) {
   415  			// Request started at a non-zero hash or was capped prematurely, add
   416  			// the endpoint Merkle proofs
   417  			accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB())
   418  			if err != nil {
   419  				return nil, nil
   420  			}
   421  			acc, err := accTrie.GetAccountByHash(account)
   422  			if err != nil || acc == nil {
   423  				return nil, nil
   424  			}
   425  			id := trie.StorageTrieID(req.Root, account, acc.Root)
   426  			stTrie, err := trie.NewStateTrie(id, chain.TrieDB())
   427  			if err != nil {
   428  				return nil, nil
   429  			}
   430  			proof := light.NewNodeSet()
   431  			if err := stTrie.Prove(origin[:], proof); err != nil {
   432  				log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
   433  				return nil, nil
   434  			}
   435  			if last != (common.Hash{}) {
   436  				if err := stTrie.Prove(last[:], proof); err != nil {
   437  					log.Warn("Failed to prove storage range", "last", last, "err", err)
   438  					return nil, nil
   439  				}
   440  			}
   441  			for _, blob := range proof.NodeList() {
   442  				proofs = append(proofs, blob)
   443  			}
   444  			// Proof terminates the reply as proofs are only added if a node
   445  			// refuses to serve more data (exception when a contract fetch is
   446  			// finishing, but that's that).
   447  			break
   448  		}
   449  	}
   450  	return slots, proofs
   451  }
   452  
   453  // ServiceGetByteCodesQuery assembles the response to a byte codes query.
   454  // It is exposed to allow external packages to test protocol behavior.
   455  func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
   456  	if req.Bytes > softResponseLimit {
   457  		req.Bytes = softResponseLimit
   458  	}
   459  	if len(req.Hashes) > maxCodeLookups {
   460  		req.Hashes = req.Hashes[:maxCodeLookups]
   461  	}
   462  	// Retrieve bytecodes until the packet size limit is reached
   463  	var (
   464  		codes [][]byte
   465  		bytes uint64
   466  	)
   467  	for _, hash := range req.Hashes {
   468  		if hash == types.EmptyCodeHash {
   469  			// Peers should not request the empty code, but if they do, at
   470  			// least sent them back a correct response without db lookups
   471  			codes = append(codes, []byte{})
   472  		} else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
   473  			codes = append(codes, blob)
   474  			bytes += uint64(len(blob))
   475  		}
   476  		if bytes > req.Bytes {
   477  			break
   478  		}
   479  	}
   480  	return codes
   481  }
   482  
   483  // ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
   484  // It is exposed to allow external packages to test protocol behavior.
   485  func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
   486  	if req.Bytes > softResponseLimit {
   487  		req.Bytes = softResponseLimit
   488  	}
   489  	// Make sure we have the state associated with the request
   490  	triedb := chain.TrieDB()
   491  
   492  	accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb)
   493  	if err != nil {
   494  		// We don't have the requested state available, bail out
   495  		return nil, nil
   496  	}
   497  	// The 'snap' might be nil, in which case we cannot serve storage slots.
   498  	snap := chain.Snapshots().Snapshot(req.Root)
   499  	// Retrieve trie nodes until the packet size limit is reached
   500  	var (
   501  		nodes [][]byte
   502  		bytes uint64
   503  		loads int // Trie hash expansions to count database reads
   504  	)
   505  	for _, pathset := range req.Paths {
   506  		switch len(pathset) {
   507  		case 0:
   508  			// Ensure we penalize invalid requests
   509  			return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
   510  
   511  		case 1:
   512  			// If we're only retrieving an account trie node, fetch it directly
   513  			blob, resolved, err := accTrie.GetNode(pathset[0])
   514  			loads += resolved // always account database reads, even for failures
   515  			if err != nil {
   516  				break
   517  			}
   518  			nodes = append(nodes, blob)
   519  			bytes += uint64(len(blob))
   520  
   521  		default:
   522  			var stRoot common.Hash
   523  			// Storage slots requested, open the storage trie and retrieve from there
   524  			if snap == nil {
   525  				// We don't have the requested state snapshotted yet (or it is stale),
   526  				// but can look up the account via the trie instead.
   527  				account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0]))
   528  				loads += 8 // We don't know the exact cost of lookup, this is an estimate
   529  				if err != nil || account == nil {
   530  					break
   531  				}
   532  				stRoot = account.Root
   533  			} else {
   534  				account, err := snap.Account(common.BytesToHash(pathset[0]))
   535  				loads++ // always account database reads, even for failures
   536  				if err != nil || account == nil {
   537  					break
   538  				}
   539  				stRoot = common.BytesToHash(account.Root)
   540  			}
   541  			id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot)
   542  			stTrie, err := trie.NewStateTrie(id, triedb)
   543  			loads++ // always account database reads, even for failures
   544  			if err != nil {
   545  				break
   546  			}
   547  			for _, path := range pathset[1:] {
   548  				blob, resolved, err := stTrie.GetNode(path)
   549  				loads += resolved // always account database reads, even for failures
   550  				if err != nil {
   551  					break
   552  				}
   553  				nodes = append(nodes, blob)
   554  				bytes += uint64(len(blob))
   555  
   556  				// Sanity check limits to avoid DoS on the store trie loads
   557  				if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   558  					break
   559  				}
   560  			}
   561  		}
   562  		// Abort request processing if we've exceeded our limits
   563  		if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   564  			break
   565  		}
   566  	}
   567  	return nodes, nil
   568  }
   569  
   570  // NodeInfo represents a short summary of the `snap` sub-protocol metadata
   571  // known about the host peer.
   572  type NodeInfo struct{}
   573  
   574  // nodeInfo retrieves some `snap` protocol metadata about the running host node.
   575  func nodeInfo(chain *core.BlockChain) *NodeInfo {
   576  	return &NodeInfo{}
   577  }