github.com/MikyChow/arbitrum-go-ethereum@v0.0.0-20230306102812-078da49636de/eth/protocols/snap/handler.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snap
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/MikyChow/arbitrum-go-ethereum/common"
    25  	"github.com/MikyChow/arbitrum-go-ethereum/core"
    26  	"github.com/MikyChow/arbitrum-go-ethereum/light"
    27  	"github.com/MikyChow/arbitrum-go-ethereum/log"
    28  	"github.com/MikyChow/arbitrum-go-ethereum/metrics"
    29  	"github.com/MikyChow/arbitrum-go-ethereum/p2p"
    30  	"github.com/MikyChow/arbitrum-go-ethereum/p2p/enode"
    31  	"github.com/MikyChow/arbitrum-go-ethereum/p2p/enr"
    32  	"github.com/MikyChow/arbitrum-go-ethereum/trie"
    33  )
    34  
    35  const (
    36  	// softResponseLimit is the target maximum size of replies to data retrievals.
    37  	softResponseLimit = 2 * 1024 * 1024
    38  
    39  	// maxCodeLookups is the maximum number of bytecodes to serve. This number is
    40  	// there to limit the number of disk lookups.
    41  	maxCodeLookups = 1024
    42  
    43  	// stateLookupSlack defines the ratio by how much a state response can exceed
    44  	// the requested limit in order to try and avoid breaking up contracts into
    45  	// multiple packages and proving them.
    46  	stateLookupSlack = 0.1
    47  
    48  	// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
    49  	// number is there to limit the number of disk lookups.
    50  	maxTrieNodeLookups = 1024
    51  
    52  	// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
    53  	// If we spend too much time, then it's a fairly high chance of timing out
    54  	// at the remote side, which means all the work is in vain.
    55  	maxTrieNodeTimeSpent = 5 * time.Second
    56  )
    57  
    58  // Handler is a callback to invoke from an outside runner after the boilerplate
    59  // exchanges have passed.
    60  type Handler func(peer *Peer) error
    61  
    62  // Backend defines the data retrieval methods to serve remote requests and the
    63  // callback methods to invoke on remote deliveries.
    64  type Backend interface {
    65  	// Chain retrieves the blockchain object to serve data.
    66  	Chain() *core.BlockChain
    67  
    68  	// RunPeer is invoked when a peer joins on the `eth` protocol. The handler
    69  	// should do any peer maintenance work, handshakes and validations. If all
    70  	// is passed, control should be given back to the `handler` to process the
    71  	// inbound messages going forward.
    72  	RunPeer(peer *Peer, handler Handler) error
    73  
    74  	// PeerInfo retrieves all known `snap` information about a peer.
    75  	PeerInfo(id enode.ID) interface{}
    76  
    77  	// Handle is a callback to be invoked when a data packet is received from
    78  	// the remote peer. Only packets not consumed by the protocol handler will
    79  	// be forwarded to the backend.
    80  	Handle(peer *Peer, packet Packet) error
    81  }
    82  
    83  // MakeProtocols constructs the P2P protocol definitions for `snap`.
    84  func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
    85  	// Filter the discovery iterator for nodes advertising snap support.
    86  	dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
    87  		var snap enrEntry
    88  		return n.Load(&snap) == nil
    89  	})
    90  
    91  	protocols := make([]p2p.Protocol, len(ProtocolVersions))
    92  	for i, version := range ProtocolVersions {
    93  		version := version // Closure
    94  
    95  		protocols[i] = p2p.Protocol{
    96  			Name:    ProtocolName,
    97  			Version: version,
    98  			Length:  protocolLengths[version],
    99  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   100  				return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
   101  					return Handle(backend, peer)
   102  				})
   103  			},
   104  			NodeInfo: func() interface{} {
   105  				return nodeInfo(backend.Chain())
   106  			},
   107  			PeerInfo: func(id enode.ID) interface{} {
   108  				return backend.PeerInfo(id)
   109  			},
   110  			Attributes:     []enr.Entry{&enrEntry{}},
   111  			DialCandidates: dnsdisc,
   112  		}
   113  	}
   114  	return protocols
   115  }
   116  
   117  // Handle is the callback invoked to manage the life cycle of a `snap` peer.
   118  // When this function terminates, the peer is disconnected.
   119  func Handle(backend Backend, peer *Peer) error {
   120  	for {
   121  		if err := HandleMessage(backend, peer); err != nil {
   122  			peer.Log().Debug("Message handling failed in `snap`", "err", err)
   123  			return err
   124  		}
   125  	}
   126  }
   127  
   128  // HandleMessage is invoked whenever an inbound message is received from a
   129  // remote peer on the `snap` protocol. The remote connection is torn down upon
   130  // returning any error.
   131  func HandleMessage(backend Backend, peer *Peer) error {
   132  	// Read the next message from the remote peer, and ensure it's fully consumed
   133  	msg, err := peer.rw.ReadMsg()
   134  	if err != nil {
   135  		return err
   136  	}
   137  	if msg.Size > maxMessageSize {
   138  		return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
   139  	}
   140  	defer msg.Discard()
   141  	start := time.Now()
   142  	// Track the emount of time it takes to serve the request and run the handler
   143  	if metrics.Enabled {
   144  		h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
   145  		defer func(start time.Time) {
   146  			sampler := func() metrics.Sample {
   147  				return metrics.NewBoundedHistogramSample()
   148  			}
   149  			metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
   150  		}(start)
   151  	}
   152  	// Handle the message depending on its contents
   153  	switch {
   154  	case msg.Code == GetAccountRangeMsg:
   155  		// Decode the account retrieval request
   156  		var req GetAccountRangePacket
   157  		if err := msg.Decode(&req); err != nil {
   158  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   159  		}
   160  		// Service the request, potentially returning nothing in case of errors
   161  		accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
   162  
   163  		// Send back anything accumulated (or empty in case of errors)
   164  		return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
   165  			ID:       req.ID,
   166  			Accounts: accounts,
   167  			Proof:    proofs,
   168  		})
   169  
   170  	case msg.Code == AccountRangeMsg:
   171  		// A range of accounts arrived to one of our previous requests
   172  		res := new(AccountRangePacket)
   173  		if err := msg.Decode(res); err != nil {
   174  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   175  		}
   176  		// Ensure the range is monotonically increasing
   177  		for i := 1; i < len(res.Accounts); i++ {
   178  			if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
   179  				return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
   180  			}
   181  		}
   182  		requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
   183  
   184  		return backend.Handle(peer, res)
   185  
   186  	case msg.Code == GetStorageRangesMsg:
   187  		// Decode the storage retrieval request
   188  		var req GetStorageRangesPacket
   189  		if err := msg.Decode(&req); err != nil {
   190  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   191  		}
   192  		// Service the request, potentially returning nothing in case of errors
   193  		slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
   194  
   195  		// Send back anything accumulated (or empty in case of errors)
   196  		return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
   197  			ID:    req.ID,
   198  			Slots: slots,
   199  			Proof: proofs,
   200  		})
   201  
   202  	case msg.Code == StorageRangesMsg:
   203  		// A range of storage slots arrived to one of our previous requests
   204  		res := new(StorageRangesPacket)
   205  		if err := msg.Decode(res); err != nil {
   206  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   207  		}
   208  		// Ensure the ranges are monotonically increasing
   209  		for i, slots := range res.Slots {
   210  			for j := 1; j < len(slots); j++ {
   211  				if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
   212  					return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
   213  				}
   214  			}
   215  		}
   216  		requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
   217  
   218  		return backend.Handle(peer, res)
   219  
   220  	case msg.Code == GetByteCodesMsg:
   221  		// Decode bytecode retrieval request
   222  		var req GetByteCodesPacket
   223  		if err := msg.Decode(&req); err != nil {
   224  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   225  		}
   226  		// Service the request, potentially returning nothing in case of errors
   227  		codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
   228  
   229  		// Send back anything accumulated (or empty in case of errors)
   230  		return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
   231  			ID:    req.ID,
   232  			Codes: codes,
   233  		})
   234  
   235  	case msg.Code == ByteCodesMsg:
   236  		// A batch of byte codes arrived to one of our previous requests
   237  		res := new(ByteCodesPacket)
   238  		if err := msg.Decode(res); err != nil {
   239  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   240  		}
   241  		requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
   242  
   243  		return backend.Handle(peer, res)
   244  
   245  	case msg.Code == GetTrieNodesMsg:
   246  		// Decode trie node retrieval request
   247  		var req GetTrieNodesPacket
   248  		if err := msg.Decode(&req); err != nil {
   249  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   250  		}
   251  		// Service the request, potentially returning nothing in case of errors
   252  		nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
   253  		if err != nil {
   254  			return err
   255  		}
   256  		// Send back anything accumulated (or empty in case of errors)
   257  		return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
   258  			ID:    req.ID,
   259  			Nodes: nodes,
   260  		})
   261  
   262  	case msg.Code == TrieNodesMsg:
   263  		// A batch of trie nodes arrived to one of our previous requests
   264  		res := new(TrieNodesPacket)
   265  		if err := msg.Decode(res); err != nil {
   266  			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
   267  		}
   268  		requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
   269  
   270  		return backend.Handle(peer, res)
   271  
   272  	default:
   273  		return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
   274  	}
   275  }
   276  
   277  // ServiceGetAccountRangeQuery assembles the response to an account range query.
   278  // It is exposed to allow external packages to test protocol behavior.
   279  func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
   280  	if req.Bytes > softResponseLimit {
   281  		req.Bytes = softResponseLimit
   282  	}
   283  	// Retrieve the requested state and bail out if non existent
   284  	tr, err := trie.New(common.Hash{}, req.Root, chain.StateCache().TrieDB())
   285  	if err != nil {
   286  		return nil, nil
   287  	}
   288  	it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin)
   289  	if err != nil {
   290  		return nil, nil
   291  	}
   292  	// Iterate over the requested range and pile accounts up
   293  	var (
   294  		accounts []*AccountData
   295  		size     uint64
   296  		last     common.Hash
   297  	)
   298  	for it.Next() {
   299  		hash, account := it.Hash(), common.CopyBytes(it.Account())
   300  
   301  		// Track the returned interval for the Merkle proofs
   302  		last = hash
   303  
   304  		// Assemble the reply item
   305  		size += uint64(common.HashLength + len(account))
   306  		accounts = append(accounts, &AccountData{
   307  			Hash: hash,
   308  			Body: account,
   309  		})
   310  		// If we've exceeded the request threshold, abort
   311  		if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
   312  			break
   313  		}
   314  		if size > req.Bytes {
   315  			break
   316  		}
   317  	}
   318  	it.Release()
   319  
   320  	// Generate the Merkle proofs for the first and last account
   321  	proof := light.NewNodeSet()
   322  	if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
   323  		log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
   324  		return nil, nil
   325  	}
   326  	if last != (common.Hash{}) {
   327  		if err := tr.Prove(last[:], 0, proof); err != nil {
   328  			log.Warn("Failed to prove account range", "last", last, "err", err)
   329  			return nil, nil
   330  		}
   331  	}
   332  	var proofs [][]byte
   333  	for _, blob := range proof.NodeList() {
   334  		proofs = append(proofs, blob)
   335  	}
   336  	return accounts, proofs
   337  }
   338  
   339  func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
   340  	if req.Bytes > softResponseLimit {
   341  		req.Bytes = softResponseLimit
   342  	}
   343  	// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
   344  	// TODO(karalabe):   - Logging locally is not ideal as remote faulst annoy the local user
   345  	// TODO(karalabe):   - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
   346  
   347  	// Calculate the hard limit at which to abort, even if mid storage trie
   348  	hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
   349  
   350  	// Retrieve storage ranges until the packet limit is reached
   351  	var (
   352  		slots  [][]*StorageData
   353  		proofs [][]byte
   354  		size   uint64
   355  	)
   356  	for _, account := range req.Accounts {
   357  		// If we've exceeded the requested data limit, abort without opening
   358  		// a new storage range (that we'd need to prove due to exceeded size)
   359  		if size >= req.Bytes {
   360  			break
   361  		}
   362  		// The first account might start from a different origin and end sooner
   363  		var origin common.Hash
   364  		if len(req.Origin) > 0 {
   365  			origin, req.Origin = common.BytesToHash(req.Origin), nil
   366  		}
   367  		var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
   368  		if len(req.Limit) > 0 {
   369  			limit, req.Limit = common.BytesToHash(req.Limit), nil
   370  		}
   371  		// Retrieve the requested state and bail out if non existent
   372  		it, err := chain.Snapshots().StorageIterator(req.Root, account, origin)
   373  		if err != nil {
   374  			return nil, nil
   375  		}
   376  		// Iterate over the requested range and pile slots up
   377  		var (
   378  			storage []*StorageData
   379  			last    common.Hash
   380  			abort   bool
   381  		)
   382  		for it.Next() {
   383  			if size >= hardLimit {
   384  				abort = true
   385  				break
   386  			}
   387  			hash, slot := it.Hash(), common.CopyBytes(it.Slot())
   388  
   389  			// Track the returned interval for the Merkle proofs
   390  			last = hash
   391  
   392  			// Assemble the reply item
   393  			size += uint64(common.HashLength + len(slot))
   394  			storage = append(storage, &StorageData{
   395  				Hash: hash,
   396  				Body: slot,
   397  			})
   398  			// If we've exceeded the request threshold, abort
   399  			if bytes.Compare(hash[:], limit[:]) >= 0 {
   400  				break
   401  			}
   402  		}
   403  		if len(storage) > 0 {
   404  			slots = append(slots, storage)
   405  		}
   406  		it.Release()
   407  
   408  		// Generate the Merkle proofs for the first and last storage slot, but
   409  		// only if the response was capped. If the entire storage trie included
   410  		// in the response, no need for any proofs.
   411  		if origin != (common.Hash{}) || (abort && len(storage) > 0) {
   412  			// Request started at a non-zero hash or was capped prematurely, add
   413  			// the endpoint Merkle proofs
   414  			accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, chain.StateCache().TrieDB())
   415  			if err != nil {
   416  				return nil, nil
   417  			}
   418  			acc, err := accTrie.TryGetAccountWithPreHashedKey(account[:])
   419  			if err != nil || acc == nil {
   420  				return nil, nil
   421  			}
   422  			stTrie, err := trie.NewStateTrie(account, acc.Root, chain.StateCache().TrieDB())
   423  			if err != nil {
   424  				return nil, nil
   425  			}
   426  			proof := light.NewNodeSet()
   427  			if err := stTrie.Prove(origin[:], 0, proof); err != nil {
   428  				log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
   429  				return nil, nil
   430  			}
   431  			if last != (common.Hash{}) {
   432  				if err := stTrie.Prove(last[:], 0, proof); err != nil {
   433  					log.Warn("Failed to prove storage range", "last", last, "err", err)
   434  					return nil, nil
   435  				}
   436  			}
   437  			for _, blob := range proof.NodeList() {
   438  				proofs = append(proofs, blob)
   439  			}
   440  			// Proof terminates the reply as proofs are only added if a node
   441  			// refuses to serve more data (exception when a contract fetch is
   442  			// finishing, but that's that).
   443  			break
   444  		}
   445  	}
   446  	return slots, proofs
   447  }
   448  
   449  // ServiceGetByteCodesQuery assembles the response to a byte codes query.
   450  // It is exposed to allow external packages to test protocol behavior.
   451  func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
   452  	if req.Bytes > softResponseLimit {
   453  		req.Bytes = softResponseLimit
   454  	}
   455  	if len(req.Hashes) > maxCodeLookups {
   456  		req.Hashes = req.Hashes[:maxCodeLookups]
   457  	}
   458  	// Retrieve bytecodes until the packet size limit is reached
   459  	var (
   460  		codes [][]byte
   461  		bytes uint64
   462  	)
   463  	for _, hash := range req.Hashes {
   464  		if hash == emptyCode {
   465  			// Peers should not request the empty code, but if they do, at
   466  			// least sent them back a correct response without db lookups
   467  			codes = append(codes, []byte{})
   468  		} else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
   469  			codes = append(codes, blob)
   470  			bytes += uint64(len(blob))
   471  		}
   472  		if bytes > req.Bytes {
   473  			break
   474  		}
   475  	}
   476  	return codes
   477  }
   478  
   479  // ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
   480  // It is exposed to allow external packages to test protocol behavior.
   481  func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
   482  	if req.Bytes > softResponseLimit {
   483  		req.Bytes = softResponseLimit
   484  	}
   485  	// Make sure we have the state associated with the request
   486  	triedb := chain.StateCache().TrieDB()
   487  
   488  	accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, triedb)
   489  	if err != nil {
   490  		// We don't have the requested state available, bail out
   491  		return nil, nil
   492  	}
   493  	snap := chain.Snapshots().Snapshot(req.Root)
   494  	if snap == nil {
   495  		// We don't have the requested state snapshotted yet, bail out.
   496  		// In reality we could still serve using the account and storage
   497  		// tries only, but let's protect the node a bit while it's doing
   498  		// snapshot generation.
   499  		return nil, nil
   500  	}
   501  	// Retrieve trie nodes until the packet size limit is reached
   502  	var (
   503  		nodes [][]byte
   504  		bytes uint64
   505  		loads int // Trie hash expansions to count database reads
   506  	)
   507  	for _, pathset := range req.Paths {
   508  		switch len(pathset) {
   509  		case 0:
   510  			// Ensure we penalize invalid requests
   511  			return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
   512  
   513  		case 1:
   514  			// If we're only retrieving an account trie node, fetch it directly
   515  			blob, resolved, err := accTrie.TryGetNode(pathset[0])
   516  			loads += resolved // always account database reads, even for failures
   517  			if err != nil {
   518  				break
   519  			}
   520  			nodes = append(nodes, blob)
   521  			bytes += uint64(len(blob))
   522  
   523  		default:
   524  			// Storage slots requested, open the storage trie and retrieve from there
   525  			account, err := snap.Account(common.BytesToHash(pathset[0]))
   526  			loads++ // always account database reads, even for failures
   527  			if err != nil || account == nil {
   528  				break
   529  			}
   530  			stTrie, err := trie.NewStateTrie(common.BytesToHash(pathset[0]), common.BytesToHash(account.Root), triedb)
   531  			loads++ // always account database reads, even for failures
   532  			if err != nil {
   533  				break
   534  			}
   535  			for _, path := range pathset[1:] {
   536  				blob, resolved, err := stTrie.TryGetNode(path)
   537  				loads += resolved // always account database reads, even for failures
   538  				if err != nil {
   539  					break
   540  				}
   541  				nodes = append(nodes, blob)
   542  				bytes += uint64(len(blob))
   543  
   544  				// Sanity check limits to avoid DoS on the store trie loads
   545  				if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   546  					break
   547  				}
   548  			}
   549  		}
   550  		// Abort request processing if we've exceeded our limits
   551  		if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
   552  			break
   553  		}
   554  	}
   555  	return nodes, nil
   556  }
   557  
   558  // NodeInfo represents a short summary of the `snap` sub-protocol metadata
   559  // known about the host peer.
   560  type NodeInfo struct{}
   561  
   562  // nodeInfo retrieves some `snap` protocol metadata about the running host node.
   563  func nodeInfo(chain *core.BlockChain) *NodeInfo {
   564  	return &NodeInfo{}
   565  }