github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/eth/protocols/snap/handler.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snap 18 19 import ( 20 "bytes" 21 "fmt" 22 "time" 23 24 "github.com/tacshi/go-ethereum/common" 25 "github.com/tacshi/go-ethereum/core" 26 "github.com/tacshi/go-ethereum/core/types" 27 "github.com/tacshi/go-ethereum/light" 28 "github.com/tacshi/go-ethereum/log" 29 "github.com/tacshi/go-ethereum/metrics" 30 "github.com/tacshi/go-ethereum/p2p" 31 "github.com/tacshi/go-ethereum/p2p/enode" 32 "github.com/tacshi/go-ethereum/p2p/enr" 33 "github.com/tacshi/go-ethereum/trie" 34 ) 35 36 const ( 37 // softResponseLimit is the target maximum size of replies to data retrievals. 38 softResponseLimit = 2 * 1024 * 1024 39 40 // maxCodeLookups is the maximum number of bytecodes to serve. This number is 41 // there to limit the number of disk lookups. 42 maxCodeLookups = 1024 43 44 // stateLookupSlack defines the ratio by how much a state response can exceed 45 // the requested limit in order to try and avoid breaking up contracts into 46 // multiple packages and proving them. 47 stateLookupSlack = 0.1 48 49 // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This 50 // number is there to limit the number of disk lookups. 51 maxTrieNodeLookups = 1024 52 53 // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. 54 // If we spend too much time, then it's a fairly high chance of timing out 55 // at the remote side, which means all the work is in vain. 56 maxTrieNodeTimeSpent = 5 * time.Second 57 ) 58 59 // Handler is a callback to invoke from an outside runner after the boilerplate 60 // exchanges have passed. 61 type Handler func(peer *Peer) error 62 63 // Backend defines the data retrieval methods to serve remote requests and the 64 // callback methods to invoke on remote deliveries. 65 type Backend interface { 66 // Chain retrieves the blockchain object to serve data. 67 Chain() *core.BlockChain 68 69 // RunPeer is invoked when a peer joins on the `eth` protocol. The handler 70 // should do any peer maintenance work, handshakes and validations. If all 71 // is passed, control should be given back to the `handler` to process the 72 // inbound messages going forward. 73 RunPeer(peer *Peer, handler Handler) error 74 75 // PeerInfo retrieves all known `snap` information about a peer. 76 PeerInfo(id enode.ID) interface{} 77 78 // Handle is a callback to be invoked when a data packet is received from 79 // the remote peer. Only packets not consumed by the protocol handler will 80 // be forwarded to the backend. 81 Handle(peer *Peer, packet Packet) error 82 } 83 84 // MakeProtocols constructs the P2P protocol definitions for `snap`. 85 func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { 86 // Filter the discovery iterator for nodes advertising snap support. 87 dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool { 88 var snap enrEntry 89 return n.Load(&snap) == nil 90 }) 91 92 protocols := make([]p2p.Protocol, len(ProtocolVersions)) 93 for i, version := range ProtocolVersions { 94 version := version // Closure 95 96 protocols[i] = p2p.Protocol{ 97 Name: ProtocolName, 98 Version: version, 99 Length: protocolLengths[version], 100 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 101 return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { 102 return Handle(backend, peer) 103 }) 104 }, 105 NodeInfo: func() interface{} { 106 return nodeInfo(backend.Chain()) 107 }, 108 PeerInfo: func(id enode.ID) interface{} { 109 return backend.PeerInfo(id) 110 }, 111 Attributes: []enr.Entry{&enrEntry{}}, 112 DialCandidates: dnsdisc, 113 } 114 } 115 return protocols 116 } 117 118 // Handle is the callback invoked to manage the life cycle of a `snap` peer. 119 // When this function terminates, the peer is disconnected. 120 func Handle(backend Backend, peer *Peer) error { 121 for { 122 if err := HandleMessage(backend, peer); err != nil { 123 peer.Log().Debug("Message handling failed in `snap`", "err", err) 124 return err 125 } 126 } 127 } 128 129 // HandleMessage is invoked whenever an inbound message is received from a 130 // remote peer on the `snap` protocol. The remote connection is torn down upon 131 // returning any error. 132 func HandleMessage(backend Backend, peer *Peer) error { 133 // Read the next message from the remote peer, and ensure it's fully consumed 134 msg, err := peer.rw.ReadMsg() 135 if err != nil { 136 return err 137 } 138 if msg.Size > maxMessageSize { 139 return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) 140 } 141 defer msg.Discard() 142 start := time.Now() 143 // Track the amount of time it takes to serve the request and run the handler 144 if metrics.Enabled { 145 h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) 146 defer func(start time.Time) { 147 sampler := func() metrics.Sample { 148 return metrics.NewBoundedHistogramSample() 149 } 150 metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) 151 }(start) 152 } 153 // Handle the message depending on its contents 154 switch { 155 case msg.Code == GetAccountRangeMsg: 156 // Decode the account retrieval request 157 var req GetAccountRangePacket 158 if err := msg.Decode(&req); err != nil { 159 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 160 } 161 // Service the request, potentially returning nothing in case of errors 162 accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req) 163 164 // Send back anything accumulated (or empty in case of errors) 165 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ 166 ID: req.ID, 167 Accounts: accounts, 168 Proof: proofs, 169 }) 170 171 case msg.Code == AccountRangeMsg: 172 // A range of accounts arrived to one of our previous requests 173 res := new(AccountRangePacket) 174 if err := msg.Decode(res); err != nil { 175 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 176 } 177 // Ensure the range is monotonically increasing 178 for i := 1; i < len(res.Accounts); i++ { 179 if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { 180 return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) 181 } 182 } 183 requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID) 184 185 return backend.Handle(peer, res) 186 187 case msg.Code == GetStorageRangesMsg: 188 // Decode the storage retrieval request 189 var req GetStorageRangesPacket 190 if err := msg.Decode(&req); err != nil { 191 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 192 } 193 // Service the request, potentially returning nothing in case of errors 194 slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req) 195 196 // Send back anything accumulated (or empty in case of errors) 197 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ 198 ID: req.ID, 199 Slots: slots, 200 Proof: proofs, 201 }) 202 203 case msg.Code == StorageRangesMsg: 204 // A range of storage slots arrived to one of our previous requests 205 res := new(StorageRangesPacket) 206 if err := msg.Decode(res); err != nil { 207 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 208 } 209 // Ensure the ranges are monotonically increasing 210 for i, slots := range res.Slots { 211 for j := 1; j < len(slots); j++ { 212 if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { 213 return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) 214 } 215 } 216 } 217 requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID) 218 219 return backend.Handle(peer, res) 220 221 case msg.Code == GetByteCodesMsg: 222 // Decode bytecode retrieval request 223 var req GetByteCodesPacket 224 if err := msg.Decode(&req); err != nil { 225 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 226 } 227 // Service the request, potentially returning nothing in case of errors 228 codes := ServiceGetByteCodesQuery(backend.Chain(), &req) 229 230 // Send back anything accumulated (or empty in case of errors) 231 return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ 232 ID: req.ID, 233 Codes: codes, 234 }) 235 236 case msg.Code == ByteCodesMsg: 237 // A batch of byte codes arrived to one of our previous requests 238 res := new(ByteCodesPacket) 239 if err := msg.Decode(res); err != nil { 240 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 241 } 242 requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID) 243 244 return backend.Handle(peer, res) 245 246 case msg.Code == GetTrieNodesMsg: 247 // Decode trie node retrieval request 248 var req GetTrieNodesPacket 249 if err := msg.Decode(&req); err != nil { 250 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 251 } 252 // Service the request, potentially returning nothing in case of errors 253 nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start) 254 if err != nil { 255 return err 256 } 257 // Send back anything accumulated (or empty in case of errors) 258 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ 259 ID: req.ID, 260 Nodes: nodes, 261 }) 262 263 case msg.Code == TrieNodesMsg: 264 // A batch of trie nodes arrived to one of our previous requests 265 res := new(TrieNodesPacket) 266 if err := msg.Decode(res); err != nil { 267 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 268 } 269 requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID) 270 271 return backend.Handle(peer, res) 272 273 default: 274 return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) 275 } 276 } 277 278 // ServiceGetAccountRangeQuery assembles the response to an account range query. 279 // It is exposed to allow external packages to test protocol behavior. 280 func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) { 281 if req.Bytes > softResponseLimit { 282 req.Bytes = softResponseLimit 283 } 284 // Retrieve the requested state and bail out if non existent 285 tr, err := trie.New(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) 286 if err != nil { 287 return nil, nil 288 } 289 it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin) 290 if err != nil { 291 return nil, nil 292 } 293 // Iterate over the requested range and pile accounts up 294 var ( 295 accounts []*AccountData 296 size uint64 297 last common.Hash 298 ) 299 for it.Next() { 300 hash, account := it.Hash(), common.CopyBytes(it.Account()) 301 302 // Track the returned interval for the Merkle proofs 303 last = hash 304 305 // Assemble the reply item 306 size += uint64(common.HashLength + len(account)) 307 accounts = append(accounts, &AccountData{ 308 Hash: hash, 309 Body: account, 310 }) 311 // If we've exceeded the request threshold, abort 312 if bytes.Compare(hash[:], req.Limit[:]) >= 0 { 313 break 314 } 315 if size > req.Bytes { 316 break 317 } 318 } 319 it.Release() 320 321 // Generate the Merkle proofs for the first and last account 322 proof := light.NewNodeSet() 323 if err := tr.Prove(req.Origin[:], 0, proof); err != nil { 324 log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) 325 return nil, nil 326 } 327 if last != (common.Hash{}) { 328 if err := tr.Prove(last[:], 0, proof); err != nil { 329 log.Warn("Failed to prove account range", "last", last, "err", err) 330 return nil, nil 331 } 332 } 333 var proofs [][]byte 334 for _, blob := range proof.NodeList() { 335 proofs = append(proofs, blob) 336 } 337 return accounts, proofs 338 } 339 340 func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) { 341 if req.Bytes > softResponseLimit { 342 req.Bytes = softResponseLimit 343 } 344 // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? 345 // TODO(karalabe): - Logging locally is not ideal as remote faults annoy the local user 346 // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) 347 348 // Calculate the hard limit at which to abort, even if mid storage trie 349 hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) 350 351 // Retrieve storage ranges until the packet limit is reached 352 var ( 353 slots [][]*StorageData 354 proofs [][]byte 355 size uint64 356 ) 357 for _, account := range req.Accounts { 358 // If we've exceeded the requested data limit, abort without opening 359 // a new storage range (that we'd need to prove due to exceeded size) 360 if size >= req.Bytes { 361 break 362 } 363 // The first account might start from a different origin and end sooner 364 var origin common.Hash 365 if len(req.Origin) > 0 { 366 origin, req.Origin = common.BytesToHash(req.Origin), nil 367 } 368 var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 369 if len(req.Limit) > 0 { 370 limit, req.Limit = common.BytesToHash(req.Limit), nil 371 } 372 // Retrieve the requested state and bail out if non existent 373 it, err := chain.Snapshots().StorageIterator(req.Root, account, origin) 374 if err != nil { 375 return nil, nil 376 } 377 // Iterate over the requested range and pile slots up 378 var ( 379 storage []*StorageData 380 last common.Hash 381 abort bool 382 ) 383 for it.Next() { 384 if size >= hardLimit { 385 abort = true 386 break 387 } 388 hash, slot := it.Hash(), common.CopyBytes(it.Slot()) 389 390 // Track the returned interval for the Merkle proofs 391 last = hash 392 393 // Assemble the reply item 394 size += uint64(common.HashLength + len(slot)) 395 storage = append(storage, &StorageData{ 396 Hash: hash, 397 Body: slot, 398 }) 399 // If we've exceeded the request threshold, abort 400 if bytes.Compare(hash[:], limit[:]) >= 0 { 401 break 402 } 403 } 404 if len(storage) > 0 { 405 slots = append(slots, storage) 406 } 407 it.Release() 408 409 // Generate the Merkle proofs for the first and last storage slot, but 410 // only if the response was capped. If the entire storage trie included 411 // in the response, no need for any proofs. 412 if origin != (common.Hash{}) || (abort && len(storage) > 0) { 413 // Request started at a non-zero hash or was capped prematurely, add 414 // the endpoint Merkle proofs 415 accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) 416 if err != nil { 417 return nil, nil 418 } 419 acc, err := accTrie.TryGetAccountByHash(account) 420 if err != nil || acc == nil { 421 return nil, nil 422 } 423 id := trie.StorageTrieID(req.Root, account, acc.Root) 424 stTrie, err := trie.NewStateTrie(id, chain.StateCache().TrieDB()) 425 if err != nil { 426 return nil, nil 427 } 428 proof := light.NewNodeSet() 429 if err := stTrie.Prove(origin[:], 0, proof); err != nil { 430 log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) 431 return nil, nil 432 } 433 if last != (common.Hash{}) { 434 if err := stTrie.Prove(last[:], 0, proof); err != nil { 435 log.Warn("Failed to prove storage range", "last", last, "err", err) 436 return nil, nil 437 } 438 } 439 for _, blob := range proof.NodeList() { 440 proofs = append(proofs, blob) 441 } 442 // Proof terminates the reply as proofs are only added if a node 443 // refuses to serve more data (exception when a contract fetch is 444 // finishing, but that's that). 445 break 446 } 447 } 448 return slots, proofs 449 } 450 451 // ServiceGetByteCodesQuery assembles the response to a byte codes query. 452 // It is exposed to allow external packages to test protocol behavior. 453 func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte { 454 if req.Bytes > softResponseLimit { 455 req.Bytes = softResponseLimit 456 } 457 if len(req.Hashes) > maxCodeLookups { 458 req.Hashes = req.Hashes[:maxCodeLookups] 459 } 460 // Retrieve bytecodes until the packet size limit is reached 461 var ( 462 codes [][]byte 463 bytes uint64 464 ) 465 for _, hash := range req.Hashes { 466 if hash == types.EmptyCodeHash { 467 // Peers should not request the empty code, but if they do, at 468 // least sent them back a correct response without db lookups 469 codes = append(codes, []byte{}) 470 } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil { 471 codes = append(codes, blob) 472 bytes += uint64(len(blob)) 473 } 474 if bytes > req.Bytes { 475 break 476 } 477 } 478 return codes 479 } 480 481 // ServiceGetTrieNodesQuery assembles the response to a trie nodes query. 482 // It is exposed to allow external packages to test protocol behavior. 483 func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) { 484 if req.Bytes > softResponseLimit { 485 req.Bytes = softResponseLimit 486 } 487 // Make sure we have the state associated with the request 488 triedb := chain.StateCache().TrieDB() 489 490 accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb) 491 if err != nil { 492 // We don't have the requested state available, bail out 493 return nil, nil 494 } 495 // The 'snap' might be nil, in which case we cannot serve storage slots. 496 snap := chain.Snapshots().Snapshot(req.Root) 497 // Retrieve trie nodes until the packet size limit is reached 498 var ( 499 nodes [][]byte 500 bytes uint64 501 loads int // Trie hash expansions to count database reads 502 ) 503 for _, pathset := range req.Paths { 504 switch len(pathset) { 505 case 0: 506 // Ensure we penalize invalid requests 507 return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) 508 509 case 1: 510 // If we're only retrieving an account trie node, fetch it directly 511 blob, resolved, err := accTrie.TryGetNode(pathset[0]) 512 loads += resolved // always account database reads, even for failures 513 if err != nil { 514 break 515 } 516 nodes = append(nodes, blob) 517 bytes += uint64(len(blob)) 518 519 default: 520 var stRoot common.Hash 521 // Storage slots requested, open the storage trie and retrieve from there 522 if snap == nil { 523 // We don't have the requested state snapshotted yet (or it is stale), 524 // but can look up the account via the trie instead. 525 account, err := accTrie.TryGetAccountByHash(common.BytesToHash(pathset[0])) 526 loads += 8 // We don't know the exact cost of lookup, this is an estimate 527 if err != nil || account == nil { 528 break 529 } 530 stRoot = account.Root 531 } else { 532 account, err := snap.Account(common.BytesToHash(pathset[0])) 533 loads++ // always account database reads, even for failures 534 if err != nil || account == nil { 535 break 536 } 537 stRoot = common.BytesToHash(account.Root) 538 } 539 id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot) 540 stTrie, err := trie.NewStateTrie(id, triedb) 541 loads++ // always account database reads, even for failures 542 if err != nil { 543 break 544 } 545 for _, path := range pathset[1:] { 546 blob, resolved, err := stTrie.TryGetNode(path) 547 loads += resolved // always account database reads, even for failures 548 if err != nil { 549 break 550 } 551 nodes = append(nodes, blob) 552 bytes += uint64(len(blob)) 553 554 // Sanity check limits to avoid DoS on the store trie loads 555 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 556 break 557 } 558 } 559 } 560 // Abort request processing if we've exceeded our limits 561 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 562 break 563 } 564 } 565 return nodes, nil 566 } 567 568 // NodeInfo represents a short summary of the `snap` sub-protocol metadata 569 // known about the host peer. 570 type NodeInfo struct{} 571 572 // nodeInfo retrieves some `snap` protocol metadata about the running host node. 573 func nodeInfo(chain *core.BlockChain) *NodeInfo { 574 return &NodeInfo{} 575 }