github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/protocols/snap/handler.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snap 18 19 import ( 20 "bytes" 21 "fmt" 22 "time" 23 24 "github.com/ethereum/go-ethereum/common" 25 "github.com/ethereum/go-ethereum/core" 26 "github.com/ethereum/go-ethereum/core/types" 27 "github.com/ethereum/go-ethereum/light" 28 "github.com/ethereum/go-ethereum/log" 29 "github.com/ethereum/go-ethereum/metrics" 30 "github.com/ethereum/go-ethereum/p2p" 31 "github.com/ethereum/go-ethereum/p2p/enode" 32 "github.com/ethereum/go-ethereum/p2p/enr" 33 "github.com/ethereum/go-ethereum/rlp" 34 "github.com/ethereum/go-ethereum/trie" 35 ) 36 37 const ( 38 // softResponseLimit is the target maximum size of replies to data retrievals. 39 softResponseLimit = 2 * 1024 * 1024 40 41 // maxCodeLookups is the maximum number of bytecodes to serve. This number is 42 // there to limit the number of disk lookups. 43 maxCodeLookups = 1024 44 45 // stateLookupSlack defines the ratio by how much a state response can exceed 46 // the requested limit in order to try and avoid breaking up contracts into 47 // multiple packages and proving them. 48 stateLookupSlack = 0.1 49 50 // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This 51 // number is there to limit the number of disk lookups. 52 maxTrieNodeLookups = 1024 53 54 // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. 55 // If we spend too much time, then it's a fairly high chance of timing out 56 // at the remote side, which means all the work is in vain. 57 maxTrieNodeTimeSpent = 5 * time.Second 58 ) 59 60 // Handler is a callback to invoke from an outside runner after the boilerplate 61 // exchanges have passed. 62 type Handler func(peer *Peer) error 63 64 // Backend defines the data retrieval methods to serve remote requests and the 65 // callback methods to invoke on remote deliveries. 66 type Backend interface { 67 // Chain retrieves the blockchain object to serve data. 68 Chain() *core.BlockChain 69 70 // RunPeer is invoked when a peer joins on the `eth` protocol. The handler 71 // should do any peer maintenance work, handshakes and validations. If all 72 // is passed, control should be given back to the `handler` to process the 73 // inbound messages going forward. 74 RunPeer(peer *Peer, handler Handler) error 75 76 // PeerInfo retrieves all known `snap` information about a peer. 77 PeerInfo(id enode.ID) interface{} 78 79 // Handle is a callback to be invoked when a data packet is received from 80 // the remote peer. Only packets not consumed by the protocol handler will 81 // be forwarded to the backend. 82 Handle(peer *Peer, packet Packet) error 83 } 84 85 // MakeProtocols constructs the P2P protocol definitions for `snap`. 86 func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { 87 // Filter the discovery iterator for nodes advertising snap support. 88 dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool { 89 var snap enrEntry 90 return n.Load(&snap) == nil 91 }) 92 93 protocols := make([]p2p.Protocol, len(ProtocolVersions)) 94 for i, version := range ProtocolVersions { 95 version := version // Closure 96 97 protocols[i] = p2p.Protocol{ 98 Name: ProtocolName, 99 Version: version, 100 Length: protocolLengths[version], 101 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 102 return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { 103 return Handle(backend, peer) 104 }) 105 }, 106 NodeInfo: func() interface{} { 107 return nodeInfo(backend.Chain()) 108 }, 109 PeerInfo: func(id enode.ID) interface{} { 110 return backend.PeerInfo(id) 111 }, 112 Attributes: []enr.Entry{&enrEntry{}}, 113 DialCandidates: dnsdisc, 114 } 115 } 116 return protocols 117 } 118 119 // Handle is the callback invoked to manage the life cycle of a `snap` peer. 120 // When this function terminates, the peer is disconnected. 121 func Handle(backend Backend, peer *Peer) error { 122 for { 123 if err := HandleMessage(backend, peer); err != nil { 124 peer.Log().Debug("Message handling failed in `snap`", "err", err) 125 return err 126 } 127 } 128 } 129 130 // HandleMessage is invoked whenever an inbound message is received from a 131 // remote peer on the `snap` protocol. The remote connection is torn down upon 132 // returning any error. 133 func HandleMessage(backend Backend, peer *Peer) error { 134 // Read the next message from the remote peer, and ensure it's fully consumed 135 msg, err := peer.rw.ReadMsg() 136 if err != nil { 137 return err 138 } 139 if msg.Size > maxMessageSize { 140 return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) 141 } 142 defer msg.Discard() 143 start := time.Now() 144 // Track the emount of time it takes to serve the request and run the handler 145 if metrics.Enabled { 146 h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) 147 defer func(start time.Time) { 148 sampler := func() metrics.Sample { 149 return metrics.ResettingSample( 150 metrics.NewExpDecaySample(1028, 0.015), 151 ) 152 } 153 metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) 154 }(start) 155 } 156 // Handle the message depending on its contents 157 switch { 158 case msg.Code == GetAccountRangeMsg: 159 // Decode the account retrieval request 160 var req GetAccountRangePacket 161 if err := msg.Decode(&req); err != nil { 162 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 163 } 164 // Service the request, potentially returning nothing in case of errors 165 accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req) 166 167 // Send back anything accumulated (or empty in case of errors) 168 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ 169 ID: req.ID, 170 Accounts: accounts, 171 Proof: proofs, 172 }) 173 174 case msg.Code == AccountRangeMsg: 175 // A range of accounts arrived to one of our previous requests 176 res := new(AccountRangePacket) 177 if err := msg.Decode(res); err != nil { 178 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 179 } 180 // Ensure the range is monotonically increasing 181 for i := 1; i < len(res.Accounts); i++ { 182 if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { 183 return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) 184 } 185 } 186 requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID) 187 188 return backend.Handle(peer, res) 189 190 case msg.Code == GetStorageRangesMsg: 191 // Decode the storage retrieval request 192 var req GetStorageRangesPacket 193 if err := msg.Decode(&req); err != nil { 194 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 195 } 196 // Service the request, potentially returning nothing in case of errors 197 slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req) 198 199 // Send back anything accumulated (or empty in case of errors) 200 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ 201 ID: req.ID, 202 Slots: slots, 203 Proof: proofs, 204 }) 205 206 case msg.Code == StorageRangesMsg: 207 // A range of storage slots arrived to one of our previous requests 208 res := new(StorageRangesPacket) 209 if err := msg.Decode(res); err != nil { 210 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 211 } 212 // Ensure the ranges are monotonically increasing 213 for i, slots := range res.Slots { 214 for j := 1; j < len(slots); j++ { 215 if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { 216 return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) 217 } 218 } 219 } 220 requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID) 221 222 return backend.Handle(peer, res) 223 224 case msg.Code == GetByteCodesMsg: 225 // Decode bytecode retrieval request 226 var req GetByteCodesPacket 227 if err := msg.Decode(&req); err != nil { 228 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 229 } 230 // Service the request, potentially returning nothing in case of errors 231 codes := ServiceGetByteCodesQuery(backend.Chain(), &req) 232 233 // Send back anything accumulated (or empty in case of errors) 234 return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ 235 ID: req.ID, 236 Codes: codes, 237 }) 238 239 case msg.Code == ByteCodesMsg: 240 // A batch of byte codes arrived to one of our previous requests 241 res := new(ByteCodesPacket) 242 if err := msg.Decode(res); err != nil { 243 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 244 } 245 requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID) 246 247 return backend.Handle(peer, res) 248 249 case msg.Code == GetTrieNodesMsg: 250 // Decode trie node retrieval request 251 var req GetTrieNodesPacket 252 if err := msg.Decode(&req); err != nil { 253 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 254 } 255 // Service the request, potentially returning nothing in case of errors 256 nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start) 257 if err != nil { 258 return err 259 } 260 // Send back anything accumulated (or empty in case of errors) 261 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ 262 ID: req.ID, 263 Nodes: nodes, 264 }) 265 266 case msg.Code == TrieNodesMsg: 267 // A batch of trie nodes arrived to one of our previous requests 268 res := new(TrieNodesPacket) 269 if err := msg.Decode(res); err != nil { 270 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 271 } 272 requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID) 273 274 return backend.Handle(peer, res) 275 276 default: 277 return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) 278 } 279 } 280 281 // ServiceGetAccountRangeQuery assembles the response to an account range query. 282 // It is exposed to allow external packages to test protocol behavior. 283 func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) { 284 if req.Bytes > softResponseLimit { 285 req.Bytes = softResponseLimit 286 } 287 // Retrieve the requested state and bail out if non existent 288 tr, err := trie.New(req.Root, chain.StateCache().TrieDB()) 289 if err != nil { 290 return nil, nil 291 } 292 it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin) 293 if err != nil { 294 return nil, nil 295 } 296 // Iterate over the requested range and pile accounts up 297 var ( 298 accounts []*AccountData 299 size uint64 300 last common.Hash 301 ) 302 for it.Next() { 303 hash, account := it.Hash(), common.CopyBytes(it.Account()) 304 305 // Track the returned interval for the Merkle proofs 306 last = hash 307 308 // Assemble the reply item 309 size += uint64(common.HashLength + len(account)) 310 accounts = append(accounts, &AccountData{ 311 Hash: hash, 312 Body: account, 313 }) 314 // If we've exceeded the request threshold, abort 315 if bytes.Compare(hash[:], req.Limit[:]) >= 0 { 316 break 317 } 318 if size > req.Bytes { 319 break 320 } 321 } 322 it.Release() 323 324 // Generate the Merkle proofs for the first and last account 325 proof := light.NewNodeSet() 326 if err := tr.Prove(req.Origin[:], 0, proof); err != nil { 327 log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) 328 return nil, nil 329 } 330 if last != (common.Hash{}) { 331 if err := tr.Prove(last[:], 0, proof); err != nil { 332 log.Warn("Failed to prove account range", "last", last, "err", err) 333 return nil, nil 334 } 335 } 336 var proofs [][]byte 337 for _, blob := range proof.NodeList() { 338 proofs = append(proofs, blob) 339 } 340 return accounts, proofs 341 } 342 343 func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) { 344 if req.Bytes > softResponseLimit { 345 req.Bytes = softResponseLimit 346 } 347 // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? 348 // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user 349 // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) 350 351 // Calculate the hard limit at which to abort, even if mid storage trie 352 hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) 353 354 // Retrieve storage ranges until the packet limit is reached 355 var ( 356 slots [][]*StorageData 357 proofs [][]byte 358 size uint64 359 ) 360 for _, account := range req.Accounts { 361 // If we've exceeded the requested data limit, abort without opening 362 // a new storage range (that we'd need to prove due to exceeded size) 363 if size >= req.Bytes { 364 break 365 } 366 // The first account might start from a different origin and end sooner 367 var origin common.Hash 368 if len(req.Origin) > 0 { 369 origin, req.Origin = common.BytesToHash(req.Origin), nil 370 } 371 var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 372 if len(req.Limit) > 0 { 373 limit, req.Limit = common.BytesToHash(req.Limit), nil 374 } 375 // Retrieve the requested state and bail out if non existent 376 it, err := chain.Snapshots().StorageIterator(req.Root, account, origin) 377 if err != nil { 378 return nil, nil 379 } 380 // Iterate over the requested range and pile slots up 381 var ( 382 storage []*StorageData 383 last common.Hash 384 abort bool 385 ) 386 for it.Next() { 387 if size >= hardLimit { 388 abort = true 389 break 390 } 391 hash, slot := it.Hash(), common.CopyBytes(it.Slot()) 392 393 // Track the returned interval for the Merkle proofs 394 last = hash 395 396 // Assemble the reply item 397 size += uint64(common.HashLength + len(slot)) 398 storage = append(storage, &StorageData{ 399 Hash: hash, 400 Body: slot, 401 }) 402 // If we've exceeded the request threshold, abort 403 if bytes.Compare(hash[:], limit[:]) >= 0 { 404 break 405 } 406 } 407 slots = append(slots, storage) 408 it.Release() 409 410 // Generate the Merkle proofs for the first and last storage slot, but 411 // only if the response was capped. If the entire storage trie included 412 // in the response, no need for any proofs. 413 if origin != (common.Hash{}) || abort { 414 // Request started at a non-zero hash or was capped prematurely, add 415 // the endpoint Merkle proofs 416 accTrie, err := trie.New(req.Root, chain.StateCache().TrieDB()) 417 if err != nil { 418 return nil, nil 419 } 420 var acc types.StateAccount 421 if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { 422 return nil, nil 423 } 424 stTrie, err := trie.New(acc.Root, chain.StateCache().TrieDB()) 425 if err != nil { 426 return nil, nil 427 } 428 proof := light.NewNodeSet() 429 if err := stTrie.Prove(origin[:], 0, proof); err != nil { 430 log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) 431 return nil, nil 432 } 433 if last != (common.Hash{}) { 434 if err := stTrie.Prove(last[:], 0, proof); err != nil { 435 log.Warn("Failed to prove storage range", "last", last, "err", err) 436 return nil, nil 437 } 438 } 439 for _, blob := range proof.NodeList() { 440 proofs = append(proofs, blob) 441 } 442 // Proof terminates the reply as proofs are only added if a node 443 // refuses to serve more data (exception when a contract fetch is 444 // finishing, but that's that). 445 break 446 } 447 } 448 return slots, proofs 449 } 450 451 // ServiceGetByteCodesQuery assembles the response to a byte codes query. 452 // It is exposed to allow external packages to test protocol behavior. 453 func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte { 454 if req.Bytes > softResponseLimit { 455 req.Bytes = softResponseLimit 456 } 457 if len(req.Hashes) > maxCodeLookups { 458 req.Hashes = req.Hashes[:maxCodeLookups] 459 } 460 // Retrieve bytecodes until the packet size limit is reached 461 var ( 462 codes [][]byte 463 bytes uint64 464 ) 465 for _, hash := range req.Hashes { 466 if hash == emptyCode { 467 // Peers should not request the empty code, but if they do, at 468 // least sent them back a correct response without db lookups 469 codes = append(codes, []byte{}) 470 } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil { 471 codes = append(codes, blob) 472 bytes += uint64(len(blob)) 473 } 474 if bytes > req.Bytes { 475 break 476 } 477 } 478 return codes 479 } 480 481 // ServiceGetTrieNodesQuery assembles the response to a trie nodes query. 482 // It is exposed to allow external packages to test protocol behavior. 483 func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) { 484 if req.Bytes > softResponseLimit { 485 req.Bytes = softResponseLimit 486 } 487 // Make sure we have the state associated with the request 488 triedb := chain.StateCache().TrieDB() 489 490 accTrie, err := trie.NewSecure(req.Root, triedb) 491 if err != nil { 492 // We don't have the requested state available, bail out 493 return nil, nil 494 } 495 snap := chain.Snapshots().Snapshot(req.Root) 496 if snap == nil { 497 // We don't have the requested state snapshotted yet, bail out. 498 // In reality we could still serve using the account and storage 499 // tries only, but let's protect the node a bit while it's doing 500 // snapshot generation. 501 return nil, nil 502 } 503 // Retrieve trie nodes until the packet size limit is reached 504 var ( 505 nodes [][]byte 506 bytes uint64 507 loads int // Trie hash expansions to cound database reads 508 ) 509 for _, pathset := range req.Paths { 510 switch len(pathset) { 511 case 0: 512 // Ensure we penalize invalid requests 513 return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) 514 515 case 1: 516 // If we're only retrieving an account trie node, fetch it directly 517 blob, resolved, err := accTrie.TryGetNode(pathset[0]) 518 loads += resolved // always account database reads, even for failures 519 if err != nil { 520 break 521 } 522 nodes = append(nodes, blob) 523 bytes += uint64(len(blob)) 524 525 default: 526 // Storage slots requested, open the storage trie and retrieve from there 527 account, err := snap.Account(common.BytesToHash(pathset[0])) 528 loads++ // always account database reads, even for failures 529 if err != nil || account == nil { 530 break 531 } 532 stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) 533 loads++ // always account database reads, even for failures 534 if err != nil { 535 break 536 } 537 for _, path := range pathset[1:] { 538 blob, resolved, err := stTrie.TryGetNode(path) 539 loads += resolved // always account database reads, even for failures 540 if err != nil { 541 break 542 } 543 nodes = append(nodes, blob) 544 bytes += uint64(len(blob)) 545 546 // Sanity check limits to avoid DoS on the store trie loads 547 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 548 break 549 } 550 } 551 } 552 // Abort request processing if we've exceeded our limits 553 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 554 break 555 } 556 } 557 return nodes, nil 558 } 559 560 // NodeInfo represents a short summary of the `snap` sub-protocol metadata 561 // known about the host peer. 562 type NodeInfo struct{} 563 564 // nodeInfo retrieves some `snap` protocol metadata about the running host node. 565 func nodeInfo(chain *core.BlockChain) *NodeInfo { 566 return &NodeInfo{} 567 }