github.com/juliankolbe/go-ethereum@v1.9.992/eth/protocols/snap/handler.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snap 18 19 import ( 20 "bytes" 21 "fmt" 22 23 "github.com/juliankolbe/go-ethereum/common" 24 "github.com/juliankolbe/go-ethereum/core" 25 "github.com/juliankolbe/go-ethereum/core/state" 26 "github.com/juliankolbe/go-ethereum/light" 27 "github.com/juliankolbe/go-ethereum/log" 28 "github.com/juliankolbe/go-ethereum/p2p" 29 "github.com/juliankolbe/go-ethereum/p2p/enode" 30 "github.com/juliankolbe/go-ethereum/p2p/enr" 31 "github.com/juliankolbe/go-ethereum/rlp" 32 "github.com/juliankolbe/go-ethereum/trie" 33 ) 34 35 const ( 36 // softResponseLimit is the target maximum size of replies to data retrievals. 37 softResponseLimit = 2 * 1024 * 1024 38 39 // maxCodeLookups is the maximum number of bytecodes to serve. This number is 40 // there to limit the number of disk lookups. 41 maxCodeLookups = 1024 42 43 // stateLookupSlack defines the ratio by how much a state response can exceed 44 // the requested limit in order to try and avoid breaking up contracts into 45 // multiple packages and proving them. 46 stateLookupSlack = 0.1 47 48 // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This 49 // number is there to limit the number of disk lookups. 50 maxTrieNodeLookups = 1024 51 ) 52 53 // Handler is a callback to invoke from an outside runner after the boilerplate 54 // exchanges have passed. 55 type Handler func(peer *Peer) error 56 57 // Backend defines the data retrieval methods to serve remote requests and the 58 // callback methods to invoke on remote deliveries. 59 type Backend interface { 60 // Chain retrieves the blockchain object to serve data. 61 Chain() *core.BlockChain 62 63 // RunPeer is invoked when a peer joins on the `eth` protocol. The handler 64 // should do any peer maintenance work, handshakes and validations. If all 65 // is passed, control should be given back to the `handler` to process the 66 // inbound messages going forward. 67 RunPeer(peer *Peer, handler Handler) error 68 69 // PeerInfo retrieves all known `snap` information about a peer. 70 PeerInfo(id enode.ID) interface{} 71 72 // Handle is a callback to be invoked when a data packet is received from 73 // the remote peer. Only packets not consumed by the protocol handler will 74 // be forwarded to the backend. 75 Handle(peer *Peer, packet Packet) error 76 } 77 78 // MakeProtocols constructs the P2P protocol definitions for `snap`. 79 func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { 80 protocols := make([]p2p.Protocol, len(ProtocolVersions)) 81 for i, version := range ProtocolVersions { 82 version := version // Closure 83 84 protocols[i] = p2p.Protocol{ 85 Name: ProtocolName, 86 Version: version, 87 Length: protocolLengths[version], 88 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 89 return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error { 90 return handle(backend, peer) 91 }) 92 }, 93 NodeInfo: func() interface{} { 94 return nodeInfo(backend.Chain()) 95 }, 96 PeerInfo: func(id enode.ID) interface{} { 97 return backend.PeerInfo(id) 98 }, 99 Attributes: []enr.Entry{&enrEntry{}}, 100 DialCandidates: dnsdisc, 101 } 102 } 103 return protocols 104 } 105 106 // handle is the callback invoked to manage the life cycle of a `snap` peer. 107 // When this function terminates, the peer is disconnected. 108 func handle(backend Backend, peer *Peer) error { 109 for { 110 if err := handleMessage(backend, peer); err != nil { 111 peer.Log().Debug("Message handling failed in `snap`", "err", err) 112 return err 113 } 114 } 115 } 116 117 // handleMessage is invoked whenever an inbound message is received from a 118 // remote peer on the `spap` protocol. The remote connection is torn down upon 119 // returning any error. 120 func handleMessage(backend Backend, peer *Peer) error { 121 // Read the next message from the remote peer, and ensure it's fully consumed 122 msg, err := peer.rw.ReadMsg() 123 if err != nil { 124 return err 125 } 126 if msg.Size > maxMessageSize { 127 return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) 128 } 129 defer msg.Discard() 130 131 // Handle the message depending on its contents 132 switch { 133 case msg.Code == GetAccountRangeMsg: 134 // Decode the account retrieval request 135 var req GetAccountRangePacket 136 if err := msg.Decode(&req); err != nil { 137 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 138 } 139 if req.Bytes > softResponseLimit { 140 req.Bytes = softResponseLimit 141 } 142 // Retrieve the requested state and bail out if non existent 143 tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) 144 if err != nil { 145 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 146 } 147 it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin) 148 if err != nil { 149 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 150 } 151 // Iterate over the requested range and pile accounts up 152 var ( 153 accounts []*AccountData 154 size uint64 155 last common.Hash 156 ) 157 for it.Next() && size < req.Bytes { 158 hash, account := it.Hash(), common.CopyBytes(it.Account()) 159 160 // Track the returned interval for the Merkle proofs 161 last = hash 162 163 // Assemble the reply item 164 size += uint64(common.HashLength + len(account)) 165 accounts = append(accounts, &AccountData{ 166 Hash: hash, 167 Body: account, 168 }) 169 // If we've exceeded the request threshold, abort 170 if bytes.Compare(hash[:], req.Limit[:]) >= 0 { 171 break 172 } 173 } 174 it.Release() 175 176 // Generate the Merkle proofs for the first and last account 177 proof := light.NewNodeSet() 178 if err := tr.Prove(req.Origin[:], 0, proof); err != nil { 179 log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) 180 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 181 } 182 if last != (common.Hash{}) { 183 if err := tr.Prove(last[:], 0, proof); err != nil { 184 log.Warn("Failed to prove account range", "last", last, "err", err) 185 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) 186 } 187 } 188 var proofs [][]byte 189 for _, blob := range proof.NodeList() { 190 proofs = append(proofs, blob) 191 } 192 // Send back anything accumulated 193 return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ 194 ID: req.ID, 195 Accounts: accounts, 196 Proof: proofs, 197 }) 198 199 case msg.Code == AccountRangeMsg: 200 // A range of accounts arrived to one of our previous requests 201 res := new(AccountRangePacket) 202 if err := msg.Decode(res); err != nil { 203 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 204 } 205 // Ensure the range is monotonically increasing 206 for i := 1; i < len(res.Accounts); i++ { 207 if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { 208 return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) 209 } 210 } 211 return backend.Handle(peer, res) 212 213 case msg.Code == GetStorageRangesMsg: 214 // Decode the storage retrieval request 215 var req GetStorageRangesPacket 216 if err := msg.Decode(&req); err != nil { 217 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 218 } 219 if req.Bytes > softResponseLimit { 220 req.Bytes = softResponseLimit 221 } 222 // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? 223 // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user 224 // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) 225 226 // Calculate the hard limit at which to abort, even if mid storage trie 227 hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) 228 229 // Retrieve storage ranges until the packet limit is reached 230 var ( 231 slots [][]*StorageData 232 proofs [][]byte 233 size uint64 234 ) 235 for _, account := range req.Accounts { 236 // If we've exceeded the requested data limit, abort without opening 237 // a new storage range (that we'd need to prove due to exceeded size) 238 if size >= req.Bytes { 239 break 240 } 241 // The first account might start from a different origin and end sooner 242 var origin common.Hash 243 if len(req.Origin) > 0 { 244 origin, req.Origin = common.BytesToHash(req.Origin), nil 245 } 246 var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") 247 if len(req.Limit) > 0 { 248 limit, req.Limit = common.BytesToHash(req.Limit), nil 249 } 250 // Retrieve the requested state and bail out if non existent 251 it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin) 252 if err != nil { 253 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 254 } 255 // Iterate over the requested range and pile slots up 256 var ( 257 storage []*StorageData 258 last common.Hash 259 ) 260 for it.Next() && size < hardLimit { 261 hash, slot := it.Hash(), common.CopyBytes(it.Slot()) 262 263 // Track the returned interval for the Merkle proofs 264 last = hash 265 266 // Assemble the reply item 267 size += uint64(common.HashLength + len(slot)) 268 storage = append(storage, &StorageData{ 269 Hash: hash, 270 Body: slot, 271 }) 272 // If we've exceeded the request threshold, abort 273 if bytes.Compare(hash[:], limit[:]) >= 0 { 274 break 275 } 276 } 277 slots = append(slots, storage) 278 it.Release() 279 280 // Generate the Merkle proofs for the first and last storage slot, but 281 // only if the response was capped. If the entire storage trie included 282 // in the response, no need for any proofs. 283 if origin != (common.Hash{}) || size >= hardLimit { 284 // Request started at a non-zero hash or was capped prematurely, add 285 // the endpoint Merkle proofs 286 accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) 287 if err != nil { 288 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 289 } 290 var acc state.Account 291 if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { 292 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 293 } 294 stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB()) 295 if err != nil { 296 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 297 } 298 proof := light.NewNodeSet() 299 if err := stTrie.Prove(origin[:], 0, proof); err != nil { 300 log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) 301 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 302 } 303 if last != (common.Hash{}) { 304 if err := stTrie.Prove(last[:], 0, proof); err != nil { 305 log.Warn("Failed to prove storage range", "last", last, "err", err) 306 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) 307 } 308 } 309 for _, blob := range proof.NodeList() { 310 proofs = append(proofs, blob) 311 } 312 // Proof terminates the reply as proofs are only added if a node 313 // refuses to serve more data (exception when a contract fetch is 314 // finishing, but that's that). 315 break 316 } 317 } 318 // Send back anything accumulated 319 return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ 320 ID: req.ID, 321 Slots: slots, 322 Proof: proofs, 323 }) 324 325 case msg.Code == StorageRangesMsg: 326 // A range of storage slots arrived to one of our previous requests 327 res := new(StorageRangesPacket) 328 if err := msg.Decode(res); err != nil { 329 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 330 } 331 // Ensure the ranges ae monotonically increasing 332 for i, slots := range res.Slots { 333 for j := 1; j < len(slots); j++ { 334 if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { 335 return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) 336 } 337 } 338 } 339 return backend.Handle(peer, res) 340 341 case msg.Code == GetByteCodesMsg: 342 // Decode bytecode retrieval request 343 var req GetByteCodesPacket 344 if err := msg.Decode(&req); err != nil { 345 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 346 } 347 if req.Bytes > softResponseLimit { 348 req.Bytes = softResponseLimit 349 } 350 if len(req.Hashes) > maxCodeLookups { 351 req.Hashes = req.Hashes[:maxCodeLookups] 352 } 353 // Retrieve bytecodes until the packet size limit is reached 354 var ( 355 codes [][]byte 356 bytes uint64 357 ) 358 for _, hash := range req.Hashes { 359 if hash == emptyCode { 360 // Peers should not request the empty code, but if they do, at 361 // least sent them back a correct response without db lookups 362 codes = append(codes, []byte{}) 363 } else if blob, err := backend.Chain().ContractCode(hash); err == nil { 364 codes = append(codes, blob) 365 bytes += uint64(len(blob)) 366 } 367 if bytes > req.Bytes { 368 break 369 } 370 } 371 // Send back anything accumulated 372 return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ 373 ID: req.ID, 374 Codes: codes, 375 }) 376 377 case msg.Code == ByteCodesMsg: 378 // A batch of byte codes arrived to one of our previous requests 379 res := new(ByteCodesPacket) 380 if err := msg.Decode(res); err != nil { 381 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 382 } 383 return backend.Handle(peer, res) 384 385 case msg.Code == GetTrieNodesMsg: 386 // Decode trie node retrieval request 387 var req GetTrieNodesPacket 388 if err := msg.Decode(&req); err != nil { 389 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 390 } 391 if req.Bytes > softResponseLimit { 392 req.Bytes = softResponseLimit 393 } 394 // Make sure we have the state associated with the request 395 triedb := backend.Chain().StateCache().TrieDB() 396 397 accTrie, err := trie.NewSecure(req.Root, triedb) 398 if err != nil { 399 // We don't have the requested state available, bail out 400 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) 401 } 402 snap := backend.Chain().Snapshots().Snapshot(req.Root) 403 if snap == nil { 404 // We don't have the requested state snapshotted yet, bail out. 405 // In reality we could still serve using the account and storage 406 // tries only, but let's protect the node a bit while it's doing 407 // snapshot generation. 408 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) 409 } 410 // Retrieve trie nodes until the packet size limit is reached 411 var ( 412 nodes [][]byte 413 bytes uint64 414 loads int // Trie hash expansions to cound database reads 415 ) 416 for _, pathset := range req.Paths { 417 switch len(pathset) { 418 case 0: 419 // Ensure we penalize invalid requests 420 return fmt.Errorf("%w: zero-item pathset requested", errBadRequest) 421 422 case 1: 423 // If we're only retrieving an account trie node, fetch it directly 424 blob, resolved, err := accTrie.TryGetNode(pathset[0]) 425 loads += resolved // always account database reads, even for failures 426 if err != nil { 427 break 428 } 429 nodes = append(nodes, blob) 430 bytes += uint64(len(blob)) 431 432 default: 433 // Storage slots requested, open the storage trie and retrieve from there 434 account, err := snap.Account(common.BytesToHash(pathset[0])) 435 loads++ // always account database reads, even for failures 436 if err != nil { 437 break 438 } 439 stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) 440 loads++ // always account database reads, even for failures 441 if err != nil { 442 break 443 } 444 for _, path := range pathset[1:] { 445 blob, resolved, err := stTrie.TryGetNode(path) 446 loads += resolved // always account database reads, even for failures 447 if err != nil { 448 break 449 } 450 nodes = append(nodes, blob) 451 bytes += uint64(len(blob)) 452 453 // Sanity check limits to avoid DoS on the store trie loads 454 if bytes > req.Bytes || loads > maxTrieNodeLookups { 455 break 456 } 457 } 458 } 459 // Abort request processing if we've exceeded our limits 460 if bytes > req.Bytes || loads > maxTrieNodeLookups { 461 break 462 } 463 } 464 // Send back anything accumulated 465 return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ 466 ID: req.ID, 467 Nodes: nodes, 468 }) 469 470 case msg.Code == TrieNodesMsg: 471 // A batch of trie nodes arrived to one of our previous requests 472 res := new(TrieNodesPacket) 473 if err := msg.Decode(res); err != nil { 474 return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) 475 } 476 return backend.Handle(peer, res) 477 478 default: 479 return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) 480 } 481 } 482 483 // NodeInfo represents a short summary of the `snap` sub-protocol metadata 484 // known about the host peer. 485 type NodeInfo struct{} 486 487 // nodeInfo retrieves some `snap` protocol metadata about the running host node. 488 func nodeInfo(chain *core.BlockChain) *NodeInfo { 489 return &NodeInfo{} 490 }