github.com/pfcoder/quorum@v2.0.3-0.20180501191142-d4a1b0958135+incompatible/eth/handler.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/consensus" 31 "github.com/ethereum/go-ethereum/consensus/misc" 32 "github.com/ethereum/go-ethereum/core" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/crypto" 35 "github.com/ethereum/go-ethereum/eth/downloader" 36 "github.com/ethereum/go-ethereum/eth/fetcher" 37 "github.com/ethereum/go-ethereum/ethdb" 38 "github.com/ethereum/go-ethereum/event" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/p2p" 41 "github.com/ethereum/go-ethereum/p2p/discover" 42 "github.com/ethereum/go-ethereum/params" 43 "github.com/ethereum/go-ethereum/rlp" 44 ) 45 46 const ( 47 softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. 48 estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header 49 50 // txChanSize is the size of channel listening to TxPreEvent. 51 // The number is referenced from the size of tx pool. 52 txChanSize = 4096 53 ) 54 55 var ( 56 daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge 57 ) 58 59 // errIncompatibleConfig is returned if the requested protocols and configs are 60 // not compatible (low protocol version restrictions and high requirements). 61 var errIncompatibleConfig = errors.New("incompatible configuration") 62 63 func errResp(code errCode, format string, v ...interface{}) error { 64 return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) 65 } 66 67 type ProtocolManager struct { 68 networkId uint64 69 70 fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) 71 acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) 72 73 txpool txPool 74 blockchain *core.BlockChain 75 chaindb ethdb.Database 76 chainconfig *params.ChainConfig 77 maxPeers int 78 79 downloader *downloader.Downloader 80 fetcher *fetcher.Fetcher 81 peers *peerSet 82 83 SubProtocols []p2p.Protocol 84 85 eventMux *event.TypeMux 86 txCh chan core.TxPreEvent 87 txSub event.Subscription 88 minedBlockSub *event.TypeMuxSubscription 89 90 // channels for fetcher, syncer, txsyncLoop 91 newPeerCh chan *peer 92 txsyncCh chan *txsync 93 quitSync chan struct{} 94 noMorePeers chan struct{} 95 96 // wait group is used for graceful shutdowns during downloading 97 // and processing 98 wg sync.WaitGroup 99 100 raftMode bool 101 engine consensus.Engine 102 } 103 104 // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable 105 // with the ethereum network. 106 func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, raftMode bool) (*ProtocolManager, error) { 107 // Create the protocol manager with the base fields 108 manager := &ProtocolManager{ 109 networkId: networkId, 110 eventMux: mux, 111 txpool: txpool, 112 blockchain: blockchain, 113 chaindb: chaindb, 114 chainconfig: config, 115 peers: newPeerSet(), 116 newPeerCh: make(chan *peer), 117 noMorePeers: make(chan struct{}), 118 txsyncCh: make(chan *txsync), 119 quitSync: make(chan struct{}), 120 raftMode: raftMode, 121 engine: engine, 122 } 123 124 if handler, ok := manager.engine.(consensus.Handler); ok { 125 handler.SetBroadcaster(manager) 126 } 127 128 // Figure out whether to allow fast sync or not 129 if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 { 130 log.Warn("Blockchain not empty, fast sync disabled") 131 mode = downloader.FullSync 132 } 133 if mode == downloader.FastSync { 134 manager.fastSync = uint32(1) 135 } 136 protocol := engine.Protocol() 137 // Initiate a sub-protocol for every implemented version we can handle 138 manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions)) 139 for i, version := range protocol.Versions { 140 // Skip protocol version if incompatible with the mode of operation 141 if mode == downloader.FastSync && version < consensus.Eth63 { 142 continue 143 } 144 // Compatible; initialise the sub-protocol 145 version := version // Closure for the run 146 manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ 147 Name: protocol.Name, 148 Version: version, 149 Length: protocol.Lengths[i], 150 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 151 peer := manager.newPeer(int(version), p, rw) 152 select { 153 case manager.newPeerCh <- peer: 154 manager.wg.Add(1) 155 defer manager.wg.Done() 156 return manager.handle(peer) 157 case <-manager.quitSync: 158 return p2p.DiscQuitting 159 } 160 }, 161 NodeInfo: func() interface{} { 162 return manager.NodeInfo() 163 }, 164 PeerInfo: func(id discover.NodeID) interface{} { 165 if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { 166 return p.Info() 167 } 168 return nil 169 }, 170 }) 171 } 172 if len(manager.SubProtocols) == 0 { 173 return nil, errIncompatibleConfig 174 } 175 // Construct the different synchronisation mechanisms 176 manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer) 177 178 validator := func(header *types.Header) error { 179 return engine.VerifyHeader(blockchain, header, true) 180 } 181 heighter := func() uint64 { 182 return blockchain.CurrentBlock().NumberU64() 183 } 184 inserter := func(blocks types.Blocks) (int, error) { 185 // If fast sync is running, deny importing weird blocks 186 if atomic.LoadUint32(&manager.fastSync) == 1 { 187 log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) 188 return 0, nil 189 } 190 atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import 191 return manager.blockchain.InsertChain(blocks) 192 } 193 manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) 194 195 return manager, nil 196 } 197 198 func (pm *ProtocolManager) removePeer(id string) { 199 // Short circuit if the peer was already removed 200 peer := pm.peers.Peer(id) 201 if peer == nil { 202 return 203 } 204 log.Debug("Removing Ethereum peer", "peer", id) 205 206 // Unregister the peer from the downloader and Ethereum peer set 207 pm.downloader.UnregisterPeer(id) 208 if err := pm.peers.Unregister(id); err != nil { 209 log.Error("Peer removal failed", "peer", id, "err", err) 210 } 211 // Hard disconnect at the networking layer 212 if peer != nil { 213 peer.Peer.Disconnect(p2p.DiscUselessPeer) 214 } 215 } 216 217 func (pm *ProtocolManager) Start(maxPeers int) { 218 pm.maxPeers = maxPeers 219 220 // broadcast transactions 221 pm.txCh = make(chan core.TxPreEvent, txChanSize) 222 pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh) 223 go pm.txBroadcastLoop() 224 225 if !pm.raftMode { 226 // broadcast mined blocks 227 pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) 228 go pm.minedBroadcastLoop() 229 } else { 230 // We set this immediately in raft mode to make sure the miner never drops 231 // incoming txes. Raft mode doesn't use the fetcher or downloader, and so 232 // this would never be set otherwise. 233 atomic.StoreUint32(&pm.acceptTxs, 1) 234 } 235 236 // start sync handlers 237 go pm.syncer() 238 go pm.txsyncLoop() 239 } 240 241 func (pm *ProtocolManager) Stop() { 242 log.Info("Stopping Ethereum protocol") 243 244 pm.txSub.Unsubscribe() // quits txBroadcastLoop 245 if !pm.raftMode { 246 pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop 247 } 248 249 // Quit the sync loop. 250 // After this send has completed, no new peers will be accepted. 251 pm.noMorePeers <- struct{}{} 252 253 // Quit fetcher, txsyncLoop. 254 close(pm.quitSync) 255 256 // Disconnect existing sessions. 257 // This also closes the gate for any new registrations on the peer set. 258 // sessions which are already established but not added to pm.peers yet 259 // will exit when they try to register. 260 pm.peers.Close() 261 262 // Wait for all peer handler goroutines and the loops to come down. 263 pm.wg.Wait() 264 265 log.Info("Ethereum protocol stopped") 266 } 267 268 func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { 269 return newPeer(pv, p, newMeteredMsgWriter(rw)) 270 } 271 272 // handle is the callback invoked to manage the life cycle of an eth peer. When 273 // this function terminates, the peer is disconnected. 274 func (pm *ProtocolManager) handle(p *peer) error { 275 if pm.peers.Len() >= pm.maxPeers { 276 return p2p.DiscTooManyPeers 277 } 278 p.Log().Debug("Ethereum peer connected", "name", p.Name()) 279 280 // Execute the Ethereum handshake 281 td, head, genesis := pm.blockchain.Status() 282 if err := p.Handshake(pm.networkId, td, head, genesis); err != nil { 283 p.Log().Debug("Ethereum handshake failed", "err", err) 284 return err 285 } 286 if rw, ok := p.rw.(*meteredMsgReadWriter); ok { 287 rw.Init(p.version) 288 } 289 // Register the peer locally 290 if err := pm.peers.Register(p); err != nil { 291 p.Log().Error("Ethereum peer registration failed", "err", err) 292 return err 293 } 294 defer pm.removePeer(p.id) 295 296 // Register the peer in the downloader. If the downloader considers it banned, we disconnect 297 if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil { 298 return err 299 } 300 // Propagate existing transactions. new transactions appearing 301 // after this will be sent via broadcasts. 302 pm.syncTransactions(p) 303 304 // If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork 305 if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil { 306 // Request the peer's DAO fork header for extra-data validation 307 if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil { 308 return err 309 } 310 // Start a timer to disconnect if the peer doesn't reply in time 311 p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() { 312 p.Log().Debug("Timed out DAO fork-check, dropping") 313 pm.removePeer(p.id) 314 }) 315 // Make sure it's cleaned up if the peer dies off 316 defer func() { 317 if p.forkDrop != nil { 318 p.forkDrop.Stop() 319 p.forkDrop = nil 320 } 321 }() 322 } 323 // main loop. handle incoming messages. 324 for { 325 if err := pm.handleMsg(p); err != nil { 326 p.Log().Debug("Ethereum message handling failed", "err", err) 327 return err 328 } 329 } 330 } 331 332 // handleMsg is invoked whenever an inbound message is received from a remote 333 // peer. The remote connection is torn down upon returning any error. 334 func (pm *ProtocolManager) handleMsg(p *peer) error { 335 // Read the next message from the remote peer, and ensure it's fully consumed 336 msg, err := p.rw.ReadMsg() 337 if err != nil { 338 return err 339 } 340 if msg.Size > ProtocolMaxMsgSize { 341 return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) 342 } 343 defer msg.Discard() 344 345 if pm.raftMode { 346 if msg.Code != TxMsg && 347 msg.Code != GetBlockHeadersMsg && msg.Code != BlockHeadersMsg && 348 msg.Code != GetBlockBodiesMsg && msg.Code != BlockBodiesMsg { 349 350 log.Info("raft: ignoring message", "code", msg.Code) 351 352 return nil 353 } 354 } else if handler, ok := pm.engine.(consensus.Handler); ok { 355 pubKey, err := p.ID().Pubkey() 356 if err != nil { 357 return err 358 } 359 addr := crypto.PubkeyToAddress(*pubKey) 360 handled, err := handler.HandleMsg(addr, msg) 361 if handled { 362 return err 363 } 364 } 365 366 // Handle the message depending on its contents 367 switch { 368 case msg.Code == StatusMsg: 369 // Status messages should never arrive after the handshake 370 return errResp(ErrExtraStatusMsg, "uncontrolled status message") 371 372 // Block header query, collect the requested headers and reply 373 case msg.Code == GetBlockHeadersMsg: 374 // Decode the complex header query 375 var query getBlockHeadersData 376 if err := msg.Decode(&query); err != nil { 377 return errResp(ErrDecode, "%v: %v", msg, err) 378 } 379 hashMode := query.Origin.Hash != (common.Hash{}) 380 381 // Gather headers until the fetch or network limits is reached 382 var ( 383 bytes common.StorageSize 384 headers []*types.Header 385 unknown bool 386 ) 387 for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch { 388 // Retrieve the next header satisfying the query 389 var origin *types.Header 390 if hashMode { 391 origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) 392 } else { 393 origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) 394 } 395 if origin == nil { 396 break 397 } 398 number := origin.Number.Uint64() 399 headers = append(headers, origin) 400 bytes += estHeaderRlpSize 401 402 // Advance to the next header of the query 403 switch { 404 case query.Origin.Hash != (common.Hash{}) && query.Reverse: 405 // Hash based traversal towards the genesis block 406 for i := 0; i < int(query.Skip)+1; i++ { 407 if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil { 408 query.Origin.Hash = header.ParentHash 409 number-- 410 } else { 411 unknown = true 412 break 413 } 414 } 415 case query.Origin.Hash != (common.Hash{}) && !query.Reverse: 416 // Hash based traversal towards the leaf block 417 var ( 418 current = origin.Number.Uint64() 419 next = current + query.Skip + 1 420 ) 421 if next <= current { 422 infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") 423 p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) 424 unknown = true 425 } else { 426 if header := pm.blockchain.GetHeaderByNumber(next); header != nil { 427 if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { 428 query.Origin.Hash = header.Hash() 429 } else { 430 unknown = true 431 } 432 } else { 433 unknown = true 434 } 435 } 436 case query.Reverse: 437 // Number based traversal towards the genesis block 438 if query.Origin.Number >= query.Skip+1 { 439 query.Origin.Number -= (query.Skip + 1) 440 } else { 441 unknown = true 442 } 443 444 case !query.Reverse: 445 // Number based traversal towards the leaf block 446 query.Origin.Number += (query.Skip + 1) 447 } 448 } 449 return p.SendBlockHeaders(headers) 450 451 case msg.Code == BlockHeadersMsg: 452 // A batch of headers arrived to one of our previous requests 453 var headers []*types.Header 454 if err := msg.Decode(&headers); err != nil { 455 return errResp(ErrDecode, "msg %v: %v", msg, err) 456 } 457 // If no headers were received, but we're expending a DAO fork check, maybe it's that 458 if len(headers) == 0 && p.forkDrop != nil { 459 // Possibly an empty reply to the fork header checks, sanity check TDs 460 verifyDAO := true 461 462 // If we already have a DAO header, we can check the peer's TD against it. If 463 // the peer's ahead of this, it too must have a reply to the DAO check 464 if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil { 465 if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 { 466 verifyDAO = false 467 } 468 } 469 // If we're seemingly on the same chain, disable the drop timer 470 if verifyDAO { 471 p.Log().Debug("Seems to be on the same side of the DAO fork") 472 p.forkDrop.Stop() 473 p.forkDrop = nil 474 return nil 475 } 476 } 477 // Filter out any explicitly requested headers, deliver the rest to the downloader 478 filter := len(headers) == 1 479 if filter { 480 // If it's a potential DAO fork check, validate against the rules 481 if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 { 482 // Disable the fork drop timer 483 p.forkDrop.Stop() 484 p.forkDrop = nil 485 486 // Validate the header and either drop the peer or continue 487 if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil { 488 p.Log().Debug("Verified to be on the other side of the DAO fork, dropping") 489 return err 490 } 491 p.Log().Debug("Verified to be on the same side of the DAO fork") 492 return nil 493 } 494 // Irrelevant of the fork checks, send the header to the fetcher just in case 495 headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now()) 496 } 497 if len(headers) > 0 || !filter { 498 err := pm.downloader.DeliverHeaders(p.id, headers) 499 if err != nil { 500 log.Debug("Failed to deliver headers", "err", err) 501 } 502 } 503 504 case msg.Code == GetBlockBodiesMsg: 505 // Decode the retrieval message 506 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 507 if _, err := msgStream.List(); err != nil { 508 return err 509 } 510 // Gather blocks until the fetch or network limits is reached 511 var ( 512 hash common.Hash 513 bytes int 514 bodies []rlp.RawValue 515 ) 516 for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { 517 // Retrieve the hash of the next block 518 if err := msgStream.Decode(&hash); err == rlp.EOL { 519 break 520 } else if err != nil { 521 return errResp(ErrDecode, "msg %v: %v", msg, err) 522 } 523 // Retrieve the requested block body, stopping if enough was found 524 if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { 525 bodies = append(bodies, data) 526 bytes += len(data) 527 } 528 } 529 return p.SendBlockBodiesRLP(bodies) 530 531 case msg.Code == BlockBodiesMsg: 532 // A batch of block bodies arrived to one of our previous requests 533 var request blockBodiesData 534 if err := msg.Decode(&request); err != nil { 535 return errResp(ErrDecode, "msg %v: %v", msg, err) 536 } 537 // Deliver them all to the downloader for queuing 538 trasactions := make([][]*types.Transaction, len(request)) 539 uncles := make([][]*types.Header, len(request)) 540 541 for i, body := range request { 542 trasactions[i] = body.Transactions 543 uncles[i] = body.Uncles 544 } 545 // Filter out any explicitly requested bodies, deliver the rest to the downloader 546 filter := len(trasactions) > 0 || len(uncles) > 0 547 if filter { 548 trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now()) 549 } 550 if len(trasactions) > 0 || len(uncles) > 0 || !filter { 551 err := pm.downloader.DeliverBodies(p.id, trasactions, uncles) 552 if err != nil { 553 log.Debug("Failed to deliver bodies", "err", err) 554 } 555 } 556 557 case p.version >= consensus.Eth63 && msg.Code == GetNodeDataMsg: 558 // Decode the retrieval message 559 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 560 if _, err := msgStream.List(); err != nil { 561 return err 562 } 563 // Gather state data until the fetch or network limits is reached 564 var ( 565 hash common.Hash 566 bytes int 567 data [][]byte 568 ) 569 for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch { 570 // Retrieve the hash of the next state entry 571 if err := msgStream.Decode(&hash); err == rlp.EOL { 572 break 573 } else if err != nil { 574 return errResp(ErrDecode, "msg %v: %v", msg, err) 575 } 576 // Retrieve the requested state entry, stopping if enough was found 577 if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil { 578 data = append(data, entry) 579 bytes += len(entry) 580 } 581 } 582 return p.SendNodeData(data) 583 584 case p.version >= consensus.Eth63 && msg.Code == NodeDataMsg: 585 // A batch of node state data arrived to one of our previous requests 586 var data [][]byte 587 if err := msg.Decode(&data); err != nil { 588 return errResp(ErrDecode, "msg %v: %v", msg, err) 589 } 590 // Deliver all to the downloader 591 if err := pm.downloader.DeliverNodeData(p.id, data); err != nil { 592 log.Debug("Failed to deliver node state data", "err", err) 593 } 594 595 case p.version >= consensus.Eth63 && msg.Code == GetReceiptsMsg: 596 // Decode the retrieval message 597 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 598 if _, err := msgStream.List(); err != nil { 599 return err 600 } 601 // Gather state data until the fetch or network limits is reached 602 var ( 603 hash common.Hash 604 bytes int 605 receipts []rlp.RawValue 606 ) 607 for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch { 608 // Retrieve the hash of the next block 609 if err := msgStream.Decode(&hash); err == rlp.EOL { 610 break 611 } else if err != nil { 612 return errResp(ErrDecode, "msg %v: %v", msg, err) 613 } 614 // Retrieve the requested block's receipts, skipping if unknown to us 615 results := core.GetBlockReceipts(pm.chaindb, hash, core.GetBlockNumber(pm.chaindb, hash)) 616 if results == nil { 617 if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { 618 continue 619 } 620 } 621 // If known, encode and queue for response packet 622 if encoded, err := rlp.EncodeToBytes(results); err != nil { 623 log.Error("Failed to encode receipt", "err", err) 624 } else { 625 receipts = append(receipts, encoded) 626 bytes += len(encoded) 627 } 628 } 629 return p.SendReceiptsRLP(receipts) 630 631 case p.version >= consensus.Eth63 && msg.Code == ReceiptsMsg: 632 // A batch of receipts arrived to one of our previous requests 633 var receipts [][]*types.Receipt 634 if err := msg.Decode(&receipts); err != nil { 635 return errResp(ErrDecode, "msg %v: %v", msg, err) 636 } 637 // Deliver all to the downloader 638 if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { 639 log.Debug("Failed to deliver receipts", "err", err) 640 } 641 642 case msg.Code == NewBlockHashesMsg: 643 var announces newBlockHashesData 644 if err := msg.Decode(&announces); err != nil { 645 return errResp(ErrDecode, "%v: %v", msg, err) 646 } 647 // Mark the hashes as present at the remote node 648 for _, block := range announces { 649 p.MarkBlock(block.Hash) 650 } 651 // Schedule all the unknown hashes for retrieval 652 unknown := make(newBlockHashesData, 0, len(announces)) 653 for _, block := range announces { 654 if !pm.blockchain.HasBlock(block.Hash, block.Number) { 655 unknown = append(unknown, block) 656 } 657 } 658 for _, block := range unknown { 659 pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies) 660 } 661 662 case msg.Code == NewBlockMsg: 663 // Retrieve and decode the propagated block 664 var request newBlockData 665 if err := msg.Decode(&request); err != nil { 666 return errResp(ErrDecode, "%v: %v", msg, err) 667 } 668 request.Block.ReceivedAt = msg.ReceivedAt 669 request.Block.ReceivedFrom = p 670 671 // Mark the peer as owning the block and schedule it for import 672 p.MarkBlock(request.Block.Hash()) 673 pm.fetcher.Enqueue(p.id, request.Block) 674 675 // Assuming the block is importable by the peer, but possibly not yet done so, 676 // calculate the head hash and TD that the peer truly must have. 677 var ( 678 trueHead = request.Block.ParentHash() 679 trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) 680 ) 681 // Update the peers total difficulty if better than the previous 682 if _, td := p.Head(); trueTD.Cmp(td) > 0 { 683 p.SetHead(trueHead, trueTD) 684 685 // Schedule a sync if above ours. Note, this will not fire a sync for a gap of 686 // a singe block (as the true TD is below the propagated block), however this 687 // scenario should easily be covered by the fetcher. 688 currentBlock := pm.blockchain.CurrentBlock() 689 if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 { 690 go pm.synchronise(p) 691 } 692 } 693 694 case msg.Code == TxMsg: 695 // Transactions arrived, make sure we have a valid and fresh chain to handle them 696 if atomic.LoadUint32(&pm.acceptTxs) == 0 { 697 break 698 } 699 // Transactions can be processed, parse all of them and deliver to the pool 700 var txs []*types.Transaction 701 if err := msg.Decode(&txs); err != nil { 702 return errResp(ErrDecode, "msg %v: %v", msg, err) 703 } 704 for i, tx := range txs { 705 // Validate and mark the remote transaction 706 if tx == nil { 707 return errResp(ErrDecode, "transaction %d is nil", i) 708 } 709 p.MarkTransaction(tx.Hash()) 710 } 711 pm.txpool.AddRemotes(txs) 712 713 default: 714 return errResp(ErrInvalidMsgCode, "%v", msg.Code) 715 } 716 return nil 717 } 718 719 func (pm *ProtocolManager) Enqueue(id string, block *types.Block) { 720 pm.fetcher.Enqueue(id, block) 721 } 722 723 // BroadcastBlock will either propagate a block to a subset of it's peers, or 724 // will only announce it's availability (depending what's requested). 725 func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { 726 hash := block.Hash() 727 peers := pm.peers.PeersWithoutBlock(hash) 728 729 // If propagation is requested, send to a subset of the peer 730 if propagate { 731 // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) 732 var td *big.Int 733 if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { 734 td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)) 735 } else { 736 log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) 737 return 738 } 739 // Send the block to a subset of our peers 740 transfer := peers[:int(math.Sqrt(float64(len(peers))))] 741 for _, peer := range transfer { 742 peer.SendNewBlock(block, td) 743 } 744 log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) 745 return 746 } 747 // Otherwise if the block is indeed in out own chain, announce it 748 if pm.blockchain.HasBlock(hash, block.NumberU64()) { 749 for _, peer := range peers { 750 peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()}) 751 } 752 log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) 753 } 754 } 755 756 // BroadcastTx will propagate a transaction to all peers which are not known to 757 // already have the given transaction. 758 func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { 759 // Broadcast transaction to a batch of peers not knowing about it 760 peers := pm.peers.PeersWithoutTx(hash) 761 // NOTE: Raft-based consensus currently assumes that geth broadcasts 762 // transactions to all peers in the network. A previous comment here 763 // indicated that this logic might change in the future to only send to a 764 // subset of peers. If this change occurs upstream, a merge conflict should 765 // arise here, and we should add logic to send to *all* peers in raft mode. 766 for _, peer := range peers { 767 peer.SendTransactions(types.Transactions{tx}) 768 } 769 log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers)) 770 } 771 772 // Mined broadcast loop 773 func (self *ProtocolManager) minedBroadcastLoop() { 774 // automatically stops if unsubscribe 775 for obj := range self.minedBlockSub.Chan() { 776 switch ev := obj.Data.(type) { 777 case core.NewMinedBlockEvent: 778 self.BroadcastBlock(ev.Block, true) // First propagate block to peers 779 self.BroadcastBlock(ev.Block, false) // Only then announce to the rest 780 } 781 } 782 } 783 784 func (self *ProtocolManager) txBroadcastLoop() { 785 for { 786 select { 787 case event := <-self.txCh: 788 self.BroadcastTx(event.Tx.Hash(), event.Tx) 789 790 // Err() channel will be closed when unsubscribing. 791 case <-self.txSub.Err(): 792 return 793 } 794 } 795 } 796 797 // EthNodeInfo represents a short summary of the Ethereum sub-protocol metadata known 798 // about the host peer. 799 type EthNodeInfo struct { 800 Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3) 801 Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain 802 Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block 803 Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block 804 } 805 806 // NodeInfo retrieves some protocol metadata about the running host node. 807 func (self *ProtocolManager) NodeInfo() *EthNodeInfo { 808 currentBlock := self.blockchain.CurrentBlock() 809 return &EthNodeInfo{ 810 Network: self.networkId, 811 Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()), 812 Genesis: self.blockchain.Genesis().Hash(), 813 Head: currentBlock.Hash(), 814 } 815 } 816 817 func (self *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer { 818 m := make(map[common.Address]consensus.Peer) 819 for _, p := range self.peers.Peers() { 820 pubKey, err := p.ID().Pubkey() 821 if err != nil { 822 continue 823 } 824 addr := crypto.PubkeyToAddress(*pubKey) 825 if targets[addr] { 826 m[addr] = p 827 } 828 } 829 return m 830 }