github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/eth/protocols/snap/handler.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snap 18 19 import ( 20 "bytes" 21 "fmt" 22 "time" 23 24 "github.com/unicornultrafoundation/go-u2u/common" 25 "github.com/unicornultrafoundation/go-u2u/core/state" 26 "github.com/unicornultrafoundation/go-u2u/core/state/snapshot" 27 "github.com/unicornultrafoundation/go-u2u/light" 28 "github.com/unicornultrafoundation/go-u2u/log" 29 "github.com/unicornultrafoundation/go-u2u/metrics" 30 "github.com/unicornultrafoundation/go-u2u/p2p" 31 "github.com/unicornultrafoundation/go-u2u/p2p/enode" 32 "github.com/unicornultrafoundation/go-u2u/p2p/enr" 33 "github.com/unicornultrafoundation/go-u2u/rlp" 34 "github.com/unicornultrafoundation/go-u2u/trie" 35 ) 36 37 const ( 38 // softResponseLimit is the target maximum size of replies to data retrievals. 39 softResponseLimit = 2 * 1024 * 1024 40 41 // maxCodeLookups is the maximum number of bytecodes to serve. This number is 42 // there to limit the number of disk lookups. 43 maxCodeLookups = 1024 44 45 // stateLookupSlack defines the ratio by how much a state response can exceed 46 // the requested limit in order to try and avoid breaking up contracts into 47 // multiple packages and proving them. 48 stateLookupSlack = 0.1 49 50 // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This 51 // number is there to limit the number of disk lookups. 52 maxTrieNodeLookups = 1024 53 54 // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. 55 // If we spend too much time, then it's a fairly high chance of timing out 56 // at the remote side, which means all the work is in vain. 57 maxTrieNodeTimeSpent = 5 * time.Second 58 ) 59 60 // Handler is a callback to invoke from an outside runner after the boilerplate 61 // exchanges have passed. 62 type Handler func(peer *Peer) error 63 64 type BlockChain interface { 65 // StateCache returns the caching database underpinning the blockchain instance. 66 StateCache() state.Database 67 // ContractCode retrieves a blob of data associated with a contract hash 68 // either from ephemeral in-memory cache, or from persistent storage. 69 ContractCode(hash common.Hash) ([]byte, error) 70 // Snapshots returns the blockchain snapshot tree. 71 Snapshots() *snapshot.Tree 72 } 73 74 // Backend defines the data retrieval methods to serve remote requests and the 75 // callback methods to invoke on remote deliveries. 76 type Backend interface { 77 // Chain retrieves the blockchain object to serve data. 78 Chain() BlockChain 79 80 // RunPeer is invoked when a peer joins on the `eth` protocol. The handler 81 // should do any peer maintenance work, handshakes and validations. If all 82 // is passed, control should be given back to the `handler` to process the 83 // inbound messages going forward. 84 RunPeer(peer *Peer, handler Handler) error 85 86 // PeerInfo retrieves all known `snap` information about a peer. 87 PeerInfo(id enode.ID) interface{} 88 89 // Handle is a callback to be invoked when a data packet is received from 90 // the remote peer. Only packets not consumed by the protocol handler will 91 // be forwarded to the backend. 92 Handle(peer *Peer, packet Packet) error 93 } 94 95 // MakeProtocols constructs the P2P protocol definitions for `snap`. 96 func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { 97 // Filter the discovery iterator for nodes advertising snap support. 98 dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool { 99 var snap enrEntry 100 return n.Load(&snap) == nil 101 }) 102 103 protocols := make([]p2p.Protocol, len(ProtocolVersions)) 104 for i, version := range ProtocolVersions { 105 version := version // Closure 106 107 protocols[i] = p2p.Protocol{ 108 Name: ProtocolName, 109 Version: version, 110 Length: protocolLengths[version], 111 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 112 return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error { 113 return handle(backend, peer) 114 }) 115 }, 116 NodeInfo: func() interface{} { 117 return nodeInfo(backend.Chain()) 118 }, 119 PeerInfo: func(id enode.ID) interface{} { 120 return backend.PeerInfo(id) 121 }, 122 Attributes: []enr.Entry{&enrEntry{}}, 123 DialCandidates: dnsdisc, 124 } 125 } 126 return protocols 127 } 128 129 // handle is the callback invoked to manage the life cycle of a `snap` peer. 130 // When this function terminates, the peer is disconnected. 131 func handle(backend Backend, peer *Peer) error { 132 for { 133 if err := handleMessage(backend, peer); err != nil { 134 peer.Log().Debug("Message handling failed in `snap`", "err", err) 135 return err 136 } 137 } 138 } 139 140 // handleMessage is invoked whenever an inbound message is received from a 141 // remote peer on the `snap` protocol. The remote connection is torn down upon 142 // returning any error. 143 func handleMessage(backend Backend, peer *Peer) error { 144 // Read the next message from the remote peer, and ensure it's fully consumed 145 msg, err := peer.rw.ReadMsg() 146 if err != nil { 147 return err 148 } 149 if msg.Size > maxMessageSize { 150 return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) 151 } 152 defer msg.Discard() 153 start := time.Now() 154 // Track the emount of time it takes to serve the request and run the handler 155 if metrics.Enabled { 156 h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) 157 defer func(start time.Time) { 158 sampler := func() metrics.Sample { 159 return metrics.ResettingSample( 160 metrics.NewExpDecaySample(1028, 0.015), 161 ) 162 } 163 metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) 164 }(start) 165 } 166 // Handle the message depending on its contents 167 switch { 168 case msg.Code == GetAccountRangeMsg: 169 // Decode the account retrieval request 170 var req GetAccountRangePacket 171 if err := msg.Decode(&req); err != nil { 172 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 173 } 174 if req.Bytes > softResponseLimit { 175 req.Bytes = softResponseLimit 176 } 177 // Retrieve the requested state and bail out if non existent 178 tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) 179 if err != nil { 180 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 181 } 182 it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin) 183 if err != nil { 184 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 185 } 186 // Iterate over the requested range and pile accounts up 187 var ( 188 accounts []*AccountData 189 size uint64 190 last common.Hash 191 ) 192 for it.Next() && size < req.Bytes { 193 hash, account := it.Hash(), common.CopyBytes(it.Account()) 194 195 // Track the returned interval for the Merkle proofs 196 last = hash 197 198 // Assemble the reply item 199 size += uint64(common.HashLength + len(account)) 200 accounts = append(accounts, &AccountData{ 201 Hash: hash, 202 Body: account, 203 }) 204 // If we've exceeded the request threshold, abort 205 if bytes.Compare(hash[:], req.Limit[:]) >= 0 { 206 break 207 } 208 } 209 it.Release() 210 211 // Generate the Merkle proofs for the first and last account 212 proof := light.NewNodeSet() 213 if err := tr.Prove(req.Origin[:], 0, proof); err != nil { 214 log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) 215 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 216 } 217 if last != (common.Hash{}) { 218 if err := tr.Prove(last[:], 0, proof); err != nil { 219 log.Warn("Failed to prove account range", "last", last, "err", err) 220 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 221 } 222 } 223 var proofs [][]byte 224 for _, blob := range proof.NodeList() { 225 proofs = append(proofs, blob) 226 } 227 // Send back anything accumulated 228 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ 229 ID: req.ID, 230 Accounts: accounts, 231 Proof: proofs, 232 }) 233 234 case msg.Code == AccountRangeMsg: 235 // A range of accounts arrived to one of our previous requests 236 res := new(AccountRangePacket) 237 if err := msg.Decode(res); err != nil { 238 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 239 } 240 // Ensure the range is monotonically increasing 241 for i := 1; i < len(res.Accounts); i++ { 242 if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { 243 return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) 244 } 245 } 246 requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID) 247 248 return backend.Handle(peer, res) 249 250 case msg.Code == GetStorageRangesMsg: 251 // Decode the storage retrieval request 252 var req GetStorageRangesPacket 253 if err := msg.Decode(&req); err != nil { 254 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 255 } 256 if req.Bytes > softResponseLimit { 257 req.Bytes = softResponseLimit 258 } 259 // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? 260 // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user 261 // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) 262 263 // Calculate the hard limit at which to abort, even if mid storage trie 264 hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) 265 266 // Retrieve storage ranges until the packet limit is reached 267 var ( 268 slots [][]*StorageData 269 proofs [][]byte 270 size uint64 271 ) 272 for _, account := range req.Accounts { 273 // If we've exceeded the requested data limit, abort without opening 274 // a new storage range (that we'd need to prove due to exceeded size) 275 if size >= req.Bytes { 276 break 277 } 278 // The first account might start from a different origin and end sooner 279 var origin common.Hash 280 if len(req.Origin) > 0 { 281 origin, req.Origin = common.BytesToHash(req.Origin), nil 282 } 283 var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 284 if len(req.Limit) > 0 { 285 limit, req.Limit = common.BytesToHash(req.Limit), nil 286 } 287 // Retrieve the requested state and bail out if non existent 288 it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin) 289 if err != nil { 290 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 291 } 292 // Iterate over the requested range and pile slots up 293 var ( 294 storage []*StorageData 295 last common.Hash 296 abort bool 297 ) 298 for it.Next() { 299 if size >= hardLimit { 300 abort = true 301 break 302 } 303 hash, slot := it.Hash(), common.CopyBytes(it.Slot()) 304 305 // Track the returned interval for the Merkle proofs 306 last = hash 307 308 // Assemble the reply item 309 size += uint64(common.HashLength + len(slot)) 310 storage = append(storage, &StorageData{ 311 Hash: hash, 312 Body: slot, 313 }) 314 // If we've exceeded the request threshold, abort 315 if bytes.Compare(hash[:], limit[:]) >= 0 { 316 break 317 } 318 } 319 slots = append(slots, storage) 320 it.Release() 321 322 // Generate the Merkle proofs for the first and last storage slot, but 323 // only if the response was capped. If the entire storage trie included 324 // in the response, no need for any proofs. 325 if origin != (common.Hash{}) || abort { 326 // Request started at a non-zero hash or was capped prematurely, add 327 // the endpoint Merkle proofs 328 accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) 329 if err != nil { 330 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 331 } 332 var acc state.Account 333 if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { 334 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 335 } 336 stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB()) 337 if err != nil { 338 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 339 } 340 proof := light.NewNodeSet() 341 if err := stTrie.Prove(origin[:], 0, proof); err != nil { 342 log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) 343 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 344 } 345 if last != (common.Hash{}) { 346 if err := stTrie.Prove(last[:], 0, proof); err != nil { 347 log.Warn("Failed to prove storage range", "last", last, "err", err) 348 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 349 } 350 } 351 for _, blob := range proof.NodeList() { 352 proofs = append(proofs, blob) 353 } 354 // Proof terminates the reply as proofs are only added if a node 355 // refuses to serve more data (exception when a contract fetch is 356 // finishing, but that's that). 357 break 358 } 359 } 360 // Send back anything accumulated 361 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ 362 ID: req.ID, 363 Slots: slots, 364 Proof: proofs, 365 }) 366 367 case msg.Code == StorageRangesMsg: 368 // A range of storage slots arrived to one of our previous requests 369 res := new(StorageRangesPacket) 370 if err := msg.Decode(res); err != nil { 371 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 372 } 373 // Ensure the ranges are monotonically increasing 374 for i, slots := range res.Slots { 375 for j := 1; j < len(slots); j++ { 376 if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { 377 return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) 378 } 379 } 380 } 381 requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID) 382 383 return backend.Handle(peer, res) 384 385 case msg.Code == GetByteCodesMsg: 386 // Decode bytecode retrieval request 387 var req GetByteCodesPacket 388 if err := msg.Decode(&req); err != nil { 389 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 390 } 391 if req.Bytes > softResponseLimit { 392 req.Bytes = softResponseLimit 393 } 394 if len(req.Hashes) > maxCodeLookups { 395 req.Hashes = req.Hashes[:maxCodeLookups] 396 } 397 // Retrieve bytecodes until the packet size limit is reached 398 var ( 399 codes [][]byte 400 bytes uint64 401 ) 402 for _, hash := range req.Hashes { 403 if hash == emptyCode { 404 // Peers should not request the empty code, but if they do, at 405 // least sent them back a correct response without db lookups 406 codes = append(codes, []byte{}) 407 } else if blob, err := backend.Chain().ContractCode(hash); err == nil { 408 codes = append(codes, blob) 409 bytes += uint64(len(blob)) 410 } 411 if bytes > req.Bytes { 412 break 413 } 414 } 415 // Send back anything accumulated 416 return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ 417 ID: req.ID, 418 Codes: codes, 419 }) 420 421 case msg.Code == ByteCodesMsg: 422 // A batch of byte codes arrived to one of our previous requests 423 res := new(ByteCodesPacket) 424 if err := msg.Decode(res); err != nil { 425 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 426 } 427 requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID) 428 429 return backend.Handle(peer, res) 430 431 case msg.Code == GetTrieNodesMsg: 432 // Decode trie node retrieval request 433 var req GetTrieNodesPacket 434 if err := msg.Decode(&req); err != nil { 435 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 436 } 437 if req.Bytes > softResponseLimit { 438 req.Bytes = softResponseLimit 439 } 440 // Make sure we have the state associated with the request 441 triedb := backend.Chain().StateCache().TrieDB() 442 443 accTrie, err := trie.NewSecure(req.Root, triedb) 444 if err != nil { 445 // We don't have the requested state available, bail out 446 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) 447 } 448 snap := backend.Chain().Snapshots().Snapshot(req.Root) 449 if snap == nil { 450 // We don't have the requested state snapshotted yet, bail out. 451 // In reality we could still serve using the account and storage 452 // tries only, but let's protect the node a bit while it's doing 453 // snapshot generation. 454 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) 455 } 456 // Retrieve trie nodes until the packet size limit is reached 457 var ( 458 nodes [][]byte 459 bytes uint64 460 loads int // Trie hash expansions to cound database reads 461 ) 462 for _, pathset := range req.Paths { 463 switch len(pathset) { 464 case 0: 465 // Ensure we penalize invalid requests 466 return fmt.Errorf("%w: zero-item pathset requested", errBadRequest) 467 468 case 1: 469 // If we're only retrieving an account trie node, fetch it directly 470 blob, resolved, err := accTrie.TryGetNode(pathset[0]) 471 loads += resolved // always account database reads, even for failures 472 if err != nil { 473 break 474 } 475 nodes = append(nodes, blob) 476 bytes += uint64(len(blob)) 477 478 default: 479 // Storage slots requested, open the storage trie and retrieve from there 480 account, err := snap.Account(common.BytesToHash(pathset[0])) 481 loads++ // always account database reads, even for failures 482 if err != nil || account == nil { 483 break 484 } 485 stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) 486 loads++ // always account database reads, even for failures 487 if err != nil { 488 break 489 } 490 for _, path := range pathset[1:] { 491 blob, resolved, err := stTrie.TryGetNode(path) 492 loads += resolved // always account database reads, even for failures 493 if err != nil { 494 break 495 } 496 nodes = append(nodes, blob) 497 bytes += uint64(len(blob)) 498 499 // Sanity check limits to avoid DoS on the store trie loads 500 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 501 break 502 } 503 } 504 } 505 // Abort request processing if we've exceeded our limits 506 if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { 507 break 508 } 509 } 510 // Send back anything accumulated 511 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ 512 ID: req.ID, 513 Nodes: nodes, 514 }) 515 516 case msg.Code == TrieNodesMsg: 517 // A batch of trie nodes arrived to one of our previous requests 518 res := new(TrieNodesPacket) 519 if err := msg.Decode(res); err != nil { 520 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 521 } 522 requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID) 523 524 return backend.Handle(peer, res) 525 526 default: 527 return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) 528 } 529 } 530 531 // NodeInfo represents a short summary of the `snap` sub-protocol metadata 532 // known about the host peer. 533 type NodeInfo struct{} 534 535 // nodeInfo retrieves some `snap` protocol metadata about the running host node. 536 func nodeInfo(BlockChain) *NodeInfo { 537 return &NodeInfo{} 538 }