github.com/ethereumproject/go-ethereum@v5.5.2+incompatible/eth/handler.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package eth 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereumproject/go-ethereum/common" 30 "github.com/ethereumproject/go-ethereum/core" 31 "github.com/ethereumproject/go-ethereum/core/types" 32 "github.com/ethereumproject/go-ethereum/eth/downloader" 33 "github.com/ethereumproject/go-ethereum/eth/fetcher" 34 "github.com/ethereumproject/go-ethereum/ethdb" 35 "github.com/ethereumproject/go-ethereum/event" 36 "github.com/ethereumproject/go-ethereum/logger" 37 "github.com/ethereumproject/go-ethereum/logger/glog" 38 "github.com/ethereumproject/go-ethereum/p2p" 39 "github.com/ethereumproject/go-ethereum/p2p/discover" 40 "github.com/ethereumproject/go-ethereum/pow" 41 "github.com/ethereumproject/go-ethereum/rlp" 42 ) 43 44 const ( 45 softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. 46 estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header 47 48 // txChanSize is the size of channel listening to NewTxsEvent. 49 // The number is referenced from the size of tx pool. 50 txChanSize = 4096 51 ) 52 53 var ( 54 forkChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge 55 ) 56 57 // errIncompatibleConfig is returned if the requested protocols and configs are 58 // not compatible (low protocol version restrictions and high requirements). 59 var errIncompatibleConfig = errors.New("incompatible configuration") 60 61 func errResp(code errCode, format string, v ...interface{}) error { 62 return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) 63 } 64 65 type ProtocolManager struct { 66 networkId uint64 67 68 fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) 69 acceptsTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) 70 71 txpool txPool 72 blockchain *core.BlockChain 73 chaindb ethdb.Database 74 chainConfig *core.ChainConfig 75 maxPeers int 76 77 downloader *downloader.Downloader 78 fetcher *fetcher.Fetcher 79 peers *peerSet 80 81 SubProtocols []p2p.Protocol 82 83 eventMux *event.TypeMux 84 txSub event.Subscription 85 minedBlockSub event.Subscription 86 87 // channels for fetcher, syncer, txsyncLoop 88 newPeerCh chan *peer 89 txsyncCh chan *txsync 90 quitSync chan struct{} 91 noMorePeers chan struct{} 92 93 // wait group is used for graceful shutdowns during downloading 94 // and processing 95 wg sync.WaitGroup 96 } 97 98 // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable 99 // with the ethereum network. 100 func NewProtocolManager(config *core.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { 101 // Create the protocol manager with the base fields 102 manager := &ProtocolManager{ 103 networkId: networkId, 104 eventMux: mux, 105 txpool: txpool, 106 blockchain: blockchain, 107 chaindb: chaindb, 108 chainConfig: config, 109 peers: newPeerSet(), 110 newPeerCh: make(chan *peer), 111 noMorePeers: make(chan struct{}), 112 txsyncCh: make(chan *txsync), 113 quitSync: make(chan struct{}), 114 } 115 // Figure out whether to allow fast sync or not 116 if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 { 117 glog.V(logger.Warn).Infoln("Blockchain not empty, fast sync disabled") 118 glog.D(logger.Warn).Warnln("Blockchain not empty. Fast sync disabled.") 119 mode = downloader.FullSync 120 } 121 if mode == downloader.FastSync { 122 manager.fastSync = uint32(1) 123 glog.D(logger.Warn).Infoln("Fast sync mode enabled.") 124 } 125 // Initiate a sub-protocol for every implemented version we can handle 126 manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) 127 for i, version := range ProtocolVersions { 128 // Skip protocol version if incompatible with the mode of operation 129 if mode == downloader.FastSync && version < eth63 { 130 continue 131 } 132 // Compatible; initialise the sub-protocol 133 version := version // Closure for the run 134 manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ 135 Name: ProtocolName, 136 Version: version, 137 Length: ProtocolLengths[i], 138 Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { 139 peer := manager.newPeer(int(version), p, rw) 140 select { 141 case manager.newPeerCh <- peer: 142 manager.wg.Add(1) 143 defer manager.wg.Done() 144 return manager.handle(peer) 145 case <-manager.quitSync: 146 return p2p.DiscQuitting 147 } 148 }, 149 NodeInfo: func() interface{} { 150 return manager.NodeInfo() 151 }, 152 PeerInfo: func(id discover.NodeID) interface{} { 153 if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { 154 return p.Info() 155 } 156 return nil 157 }, 158 }) 159 } 160 if len(manager.SubProtocols) == 0 { 161 return nil, errIncompatibleConfig 162 } 163 // Construct the different synchronisation mechanisms 164 manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer) 165 166 validator := func(header *types.Header) error { 167 return manager.blockchain.Validator().ValidateHeader(header, manager.blockchain.GetHeader(header.ParentHash), true) 168 } 169 heighter := func() uint64 { 170 return blockchain.CurrentBlock().NumberU64() 171 } 172 inserter := func(blocks types.Blocks) *core.ChainInsertResult { 173 if atomic.LoadUint32(&manager.fastSync) == 1 { 174 glog.V(logger.Warn).Warnln("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash().Hex()[:9]) 175 glog.D(logger.Warn).Warnln("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash().Hex()[:9]) 176 } 177 // Mark initial sync done on any fetcher import 178 atomic.StoreUint32(&manager.acceptsTxs, 1) 179 return manager.blockchain.InsertChain(blocks) 180 } 181 manager.fetcher = fetcher.New(mux, blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) 182 183 return manager, nil 184 } 185 186 func (pm *ProtocolManager) removePeer(id string) { 187 // Short circuit if the peer was already removed 188 peer := pm.peers.Peer(id) 189 if peer == nil { 190 return 191 } 192 glog.V(logger.Debug).Infoln("Removing peer", id) 193 pm.eventMux.Post(PMHandlerRemoveEvent{ 194 PMPeersLen: pm.peers.Len(), 195 PMBestPeer: pm.peers.BestPeer(), 196 Peer: peer, 197 }) 198 199 // Unregister the peer from the downloader and Ethereum peer set 200 pm.downloader.UnregisterPeer(id) 201 if err := pm.peers.Unregister(id); err != nil { 202 glog.V(logger.Error).Infoln("Removal failed:", err) 203 } 204 // Hard disconnect at the networking layer 205 if peer != nil { 206 peer.Peer.Disconnect(p2p.DiscUselessPeer) 207 } 208 } 209 210 func (pm *ProtocolManager) Start(maxPeers int) { 211 pm.maxPeers = maxPeers 212 213 // broadcast transactions 214 pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{}) 215 go pm.txBroadcastLoop() 216 // broadcast mined blocks 217 pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) 218 go pm.minedBroadcastLoop() 219 220 // start sync handlers 221 go pm.syncer() 222 go pm.txsyncLoop() 223 } 224 225 func (pm *ProtocolManager) Stop() { 226 glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...") 227 228 pm.txSub.Unsubscribe() // quits txBroadcastLoop 229 pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop 230 231 // Quit the sync loop. 232 // After this send has completed, no new peers will be accepted. 233 pm.noMorePeers <- struct{}{} 234 235 // Quit fetcher, txsyncLoop. 236 close(pm.quitSync) 237 238 // Disconnect existing sessions. 239 // This also closes the gate for any new registrations on the peer set. 240 // sessions which are already established but not added to pm.peers yet 241 // will exit when they try to register. 242 pm.peers.Close() 243 244 // Wait for all peer handler goroutines and the loops to come down. 245 pm.wg.Wait() 246 247 glog.V(logger.Info).Infoln("Ethereum protocol handler stopped") 248 } 249 250 func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { 251 return newPeer(pv, p, newMeteredMsgWriter(rw)) 252 } 253 254 // handle is the callback invoked to manage the life cycle of an eth peer. When 255 // this function terminates, the peer is disconnected. 256 func (pm *ProtocolManager) handle(p *peer) error { 257 // Ignore maxPeers if this is a trusted peer 258 if l := pm.peers.Len(); l >= pm.maxPeers && !p.Peer.Info().Network.Trusted { 259 glog.D(logger.Error).Errorln("handler dropping pm.peers.len=", l, "pm.maxPeers=", pm.maxPeers) 260 return p2p.DiscTooManyPeers 261 } 262 glog.V(logger.Debug).Infof("handler: %s ->connected", p) 263 264 // Execute the Ethereum handshake 265 td, head, genesis := pm.blockchain.Status() 266 if err := p.Handshake(pm.networkId, td, head, genesis); err != nil { 267 glog.V(logger.Debug).Infof("handler: %s ->handshakefailed err=%v", p, err) 268 return err 269 } 270 if rw, ok := p.rw.(*meteredMsgReadWriter); ok { 271 rw.Init(p.version) 272 } 273 // Register the peer locally 274 glog.V(logger.Debug).Infof("handler: %s ->addpeer", p) 275 if err := pm.peers.Register(p); err != nil { 276 glog.V(logger.Error).Errorf("handler: %s ->addpeer err=%v", p, err) 277 return err 278 } else { 279 pm.eventMux.Post(PMHandlerAddEvent{ 280 PMPeersLen: pm.peers.Len(), 281 PMBestPeer: pm.peers.BestPeer(), 282 Peer: p, 283 }) 284 } 285 defer pm.removePeer(p.id) 286 287 // Register the peer in the downloader. If the downloader considers it banned, we disconnect 288 if err := pm.downloader.RegisterPeer(p.id, p.version, p.Name(), p.Head, 289 p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, 290 p.RequestReceipts, p.RequestNodeData); err != nil { 291 return err 292 } 293 // Propagate existing transactions. new transactions appearing 294 // after this will be sent via broadcasts. 295 pm.syncTransactions(p) 296 297 pHead, _ := p.Head() 298 if headerN, doValidate := pm.getRequiredHashBlockNumber(head, pHead); doValidate { 299 // Request the peer's fork block header for extra-dat 300 if err := p.RequestHeadersByNumber(headerN, 1, 0, false); err != nil { 301 glog.V(logger.Debug).Infof("handler: %s ->headersbynumber err=%v", p, err) 302 return err 303 } 304 // Start a timer to disconnect if the peer doesn't reply in time 305 // FIXME: un-hardcode timeout 306 p.forkDrop = time.AfterFunc(forkChallengeTimeout, func() { 307 glog.V(logger.Debug).Infof("handler: %s ->headersbynumber err='timed out fork-check, dropping'", p) 308 pm.removePeer(p.id) 309 }) 310 // Make sure it's cleaned up if the peer dies off 311 defer func() { 312 if p.forkDrop != nil { 313 p.forkDrop.Stop() 314 p.forkDrop = nil 315 } 316 }() 317 } 318 319 // main loop. handle incoming messages. 320 for { 321 if err := pm.handleMsg(p); err != nil { 322 glog.V(logger.Debug).Infof("handler: %s ->msghandlefailed err=%v", p, err) 323 return err 324 } 325 } 326 } 327 328 // getRequiredHashBlockNumber returns block number of most relevant fork with requiredHash 329 // and information is the block validation required. 330 func (pm *ProtocolManager) getRequiredHashBlockNumber(localHead, peerHead common.Hash) (blockNumber uint64, validate bool) { 331 // Drop connections incongruent with any network split or checkpoint that's relevant 332 // Check for latest relevant required hash based on our status. 333 var headN *big.Int 334 headB := pm.blockchain.GetBlock(localHead) 335 if headB != nil { 336 headN = headB.Number() 337 } 338 latestReqHashFork := pm.chainConfig.GetLatestRequiredHashFork(headN) // returns nil if no applicable fork with required hash 339 340 // If our local sync progress has not yet reached a height at which a fork with a required hash would be relevant, 341 // we can skip this check. This allows the client to be fork agnostic until a configured fork(s) is reached. 342 // If we already have the peer's head, the peer is on the right chain, so we can skip required hash validation. 343 if latestReqHashFork != nil { 344 validate = pm.blockchain.GetBlock(peerHead) == nil 345 blockNumber = latestReqHashFork.Block.Uint64() 346 } 347 return 348 } 349 350 // handleMsg is invoked whenever an inbound message is received from a remote 351 // peer. The remote connection is torn down upon returning any error. 352 func (pm *ProtocolManager) handleMsg(p *peer) (err error) { 353 // Read the next message from the remote peer, and ensure it's fully consumed 354 var unknownMessageCode uint64 = math.MaxUint64 355 msg, err := p.rw.ReadMsg() 356 if err != nil { 357 mlogWireDelegate(p, "receive", unknownMessageCode, -1, nil, err) 358 return 359 } 360 intSize := int(msg.Size) 361 if msg.Size > ProtocolMaxMsgSize { 362 err = errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) 363 mlogWireDelegate(p, "receive", msg.Code, intSize, nil, err) 364 return 365 } 366 defer msg.Discard() 367 368 // Handle the message depending on its contents 369 switch { 370 case msg.Code == StatusMsg: 371 // Status messages should never arrive after the handshake 372 err = errResp(ErrExtraStatusMsg, "uncontrolled status message") 373 mlogWireDelegate(p, "receive", StatusMsg, intSize, nil, err) 374 return 375 // Block header query, collect the requested headers and reply 376 case p.version >= eth62 && msg.Code == GetBlockHeadersMsg: 377 // Decode the complex header query 378 var query getBlockHeadersData 379 if e := msg.Decode(&query); e != nil { 380 err = errResp(ErrDecode, "%v: %v", msg, e) 381 mlogWireDelegate(p, "receive", GetBlockHeadersMsg, intSize, &query, err) 382 return 383 } 384 mlogWireDelegate(p, "receive", GetBlockHeadersMsg, intSize, &query, err) 385 hashMode := query.Origin.Hash != (common.Hash{}) 386 387 // Gather headers until the fetch or network limits is reached 388 var ( 389 bytes common.StorageSize 390 headers []*types.Header 391 unknown bool 392 ) 393 for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch { 394 // Retrieve the next header satisfying the query 395 var origin *types.Header 396 if hashMode { 397 origin = pm.blockchain.GetHeader(query.Origin.Hash) 398 } else { 399 origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) 400 } 401 if origin == nil { 402 break 403 } 404 headers = append(headers, origin) 405 bytes += estHeaderRlpSize 406 407 // Advance to the next header of the query 408 switch { 409 case query.Origin.Hash != (common.Hash{}) && query.Reverse: 410 // Hash based traversal towards the genesis block 411 for i := 0; i < int(query.Skip)+1; i++ { 412 if header := pm.blockchain.GetHeader(query.Origin.Hash); header != nil { 413 query.Origin.Hash = header.ParentHash 414 } else { 415 unknown = true 416 break 417 } 418 } 419 case query.Origin.Hash != (common.Hash{}) && !query.Reverse: 420 // Hash based traversal towards the leaf block 421 var ( 422 current = origin.Number.Uint64() 423 next = current + query.Skip + 1 424 ) 425 if next <= current { 426 infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") 427 glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos) 428 unknown = true 429 } else { 430 if header := pm.blockchain.GetHeaderByNumber(next); header != nil { 431 if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { 432 query.Origin.Hash = header.Hash() 433 } else { 434 unknown = true 435 } 436 } else { 437 unknown = true 438 } 439 } 440 case query.Reverse: 441 // Number based traversal towards the genesis block 442 if query.Origin.Number >= query.Skip+1 { 443 query.Origin.Number -= (query.Skip + 1) 444 } else { 445 unknown = true 446 } 447 448 case !query.Reverse: 449 // Number based traversal towards the leaf block 450 query.Origin.Number += (query.Skip + 1) 451 } 452 } 453 return p.SendBlockHeaders(headers) 454 455 case p.version >= eth62 && msg.Code == BlockHeadersMsg: 456 // A batch of headers arrived to one of our previous requests 457 var headers []*types.Header 458 if e := msg.Decode(&headers); e != nil { 459 err = errResp(ErrDecode, "msg %v: %v", msg, e) 460 mlogWireDelegate(p, "receive", BlockHeadersMsg, intSize, headers, err) 461 return 462 } 463 defer mlogWireDelegate(p, "receive", BlockHeadersMsg, intSize, headers, err) 464 465 // Good will assumption. Even if the peer is ahead of the fork check header but returns 466 // empty header response, it might be that the peer is a light client which only keeps 467 // the last 256 block headers. Besides it does not prevent network attacks. See #313 for 468 // an explaination. 469 if len(headers) == 0 && p.forkDrop != nil { 470 // Disable the fork drop timeout 471 p.forkDrop.Stop() 472 p.forkDrop = nil 473 return nil 474 } 475 // Filter out any explicitly requested headers, deliver the rest to the downloader 476 filter := len(headers) == 1 477 if filter { 478 if p.forkDrop != nil { 479 // Disable the fork drop timeout 480 p.forkDrop.Stop() 481 p.forkDrop = nil 482 } 483 484 if err = pm.chainConfig.HeaderCheck(headers[0]); err != nil { 485 pm.removePeer(p.id) 486 return err 487 } 488 // Irrelevant of the fork checks, send the header to the fetcher just in case 489 headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now()) 490 } 491 if len(headers) > 0 || !filter { 492 err := pm.downloader.DeliverHeaders(p.id, headers) 493 if err != nil { 494 glog.V(logger.Debug).Infoln("peer", p.id, err) 495 } 496 } 497 498 case p.version >= eth62 && msg.Code == GetBlockBodiesMsg: 499 // Decode the retrieval message 500 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 501 if _, err = msgStream.List(); err != nil { 502 return err 503 } 504 // Gather blocks until the fetch or network limits is reached 505 var ( 506 hash common.Hash 507 bytes int 508 bodies []rlp.RawValue 509 ) 510 for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { 511 // Retrieve the hash of the next block 512 if e := msgStream.Decode(&hash); e == rlp.EOL { 513 break 514 } else if e != nil { 515 err = errResp(ErrDecode, "msg %v: %v", msg, e) 516 mlogWireDelegate(p, "receive", GetBlockBodiesMsg, intSize, bodies, err) 517 return err 518 } 519 // Retrieve the requested block body, stopping if enough was found 520 if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { 521 bodies = append(bodies, data) 522 bytes += len(data) 523 } 524 } 525 mlogWireDelegate(p, "receive", GetBlockBodiesMsg, intSize, bodies, err) 526 return p.SendBlockBodiesRLP(bodies) 527 528 case p.version >= eth62 && msg.Code == BlockBodiesMsg: 529 // A batch of block bodies arrived to one of our previous requests 530 var request blockBodiesData 531 // Deliver them all to the downloader for queuing 532 if e := msg.Decode(&request); e != nil { 533 err = errResp(ErrDecode, "msg %v: %v", msg, e) 534 mlogWireDelegate(p, "receive", BlockBodiesMsg, intSize, request, err) 535 return 536 } 537 mlogWireDelegate(p, "receive", BlockBodiesMsg, intSize, request, err) 538 539 transactions := make([][]*types.Transaction, len(request)) 540 uncles := make([][]*types.Header, len(request)) 541 542 for i, body := range request { 543 transactions[i] = body.Transactions 544 uncles[i] = body.Uncles 545 } 546 // Filter out any explicitly requested bodies, deliver the rest to the downloader 547 filter := len(transactions) > 0 || len(uncles) > 0 548 if filter { 549 transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now()) 550 } 551 if len(transactions) > 0 || len(uncles) > 0 || !filter { 552 if e := pm.downloader.DeliverBodies(p.id, transactions, uncles); e != nil { 553 glog.V(logger.Debug).Infoln(e) 554 } 555 } 556 557 case p.version >= eth63 && msg.Code == GetNodeDataMsg: 558 // Decode the retrieval message 559 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 560 if _, err = msgStream.List(); err != nil { 561 mlogWireDelegate(p, "receive", GetNodeDataMsg, intSize, [][]byte{}, err) 562 return err 563 } 564 // Gather state data until the fetch or network limits is reached 565 var ( 566 hash common.Hash 567 bytes int 568 data [][]byte 569 ) 570 for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch { 571 // Retrieve the hash of the next state entry 572 if e := msgStream.Decode(&hash); e == rlp.EOL { 573 break 574 } else if e != nil { 575 err = errResp(ErrDecode, "msg %v: %v", msg, e) 576 mlogWireDelegate(p, "receive", GetNodeDataMsg, intSize, data, err) 577 return 578 } 579 // Retrieve the requested state entry, stopping if enough was found 580 if entry, e := pm.chaindb.Get(hash.Bytes()); e == nil { 581 data = append(data, entry) 582 bytes += len(entry) 583 } 584 } 585 mlogWireDelegate(p, "receive", GetNodeDataMsg, intSize, data, err) 586 return p.SendNodeData(data) 587 588 case p.version >= eth63 && msg.Code == NodeDataMsg: 589 // A batch of node state data arrived to one of our previous requests 590 var data [][]byte 591 592 if e := msg.Decode(&data); e != nil { 593 err = errResp(ErrDecode, "msg %v: %v", msg, e) 594 mlogWireDelegate(p, "receive", NodeDataMsg, intSize, data, err) 595 return 596 } 597 mlogWireDelegate(p, "receive", NodeDataMsg, intSize, data, err) 598 // Deliver all to the downloader 599 if e := pm.downloader.DeliverNodeData(p.id, data); e != nil { 600 glog.V(logger.Core).Warnf("failed to deliver node state data: %v", e) 601 } 602 603 case p.version >= eth63 && msg.Code == GetReceiptsMsg: 604 // Decode the retrieval message 605 msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) 606 if _, err = msgStream.List(); err != nil { 607 mlogWireDelegate(p, "receive", GetReceiptsMsg, intSize, []rlp.RawValue{}, err) 608 return err 609 } 610 // Gather state data until the fetch or network limits is reached 611 var ( 612 hash common.Hash 613 bytes int 614 receipts []rlp.RawValue 615 ) 616 for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch { 617 // Retrieve the hash of the next block 618 if e := msgStream.Decode(&hash); e == rlp.EOL { 619 break 620 } else if e != nil { 621 err = errResp(ErrDecode, "msg %v: %v", msg, e) 622 mlogWireDelegate(p, "receive", GetReceiptsMsg, intSize, receipts, err) 623 return 624 } 625 // Retrieve the requested block's receipts, skipping if unknown to us 626 results := core.GetBlockReceipts(pm.chaindb, hash) 627 if results == nil { 628 if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { 629 continue 630 } 631 } 632 // If known, encode and queue for response packet 633 if encoded, err := rlp.EncodeToBytes(results); err != nil { 634 glog.V(logger.Error).Infof("failed to encode receipt: %v", err) 635 } else { 636 receipts = append(receipts, encoded) 637 bytes += len(encoded) 638 } 639 } 640 mlogWireDelegate(p, "receive", GetReceiptsMsg, intSize, receipts, err) 641 return p.SendReceiptsRLP(receipts) 642 643 case p.version >= eth63 && msg.Code == ReceiptsMsg: 644 // A batch of receipts arrived to one of our previous requests 645 var receipts [][]*types.Receipt 646 if err := msg.Decode(&receipts); err != nil { 647 mlogWireDelegate(p, "receive", ReceiptsMsg, intSize, receipts, err) 648 return errResp(ErrDecode, "msg %v: %v", msg, err) 649 } 650 mlogWireDelegate(p, "receive", ReceiptsMsg, intSize, receipts, err) 651 // Deliver all to the downloader 652 if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { 653 glog.V(logger.Core).Warnf("failed to deliver receipts: %v", err) 654 } 655 656 case msg.Code == NewBlockHashesMsg: 657 // Retrieve and deserialize the remote new block hashes notification 658 var announces newBlockHashesData // = []announce{} 659 660 if p.version < eth62 { 661 // We're running the old protocol, make block number unknown (0) 662 var hashes []common.Hash 663 if e := msg.Decode(&hashes); e != nil { 664 err = errResp(ErrDecode, "%v: %v", msg, e) 665 mlogWireDelegate(p, "receive", NewBlockHashesMsg, intSize, announces, err) 666 return 667 } 668 for _, hash := range hashes { 669 announces = append(announces, announce{hash, 0}) 670 } 671 } else { 672 // Otherwise extract both block hash and number 673 var request newBlockHashesData 674 if e := msg.Decode(&request); e != nil { 675 err = errResp(ErrDecode, "%v: %v", msg, e) 676 mlogWireDelegate(p, "receive", NewBlockHashesMsg, intSize, announces, err) 677 return 678 } 679 for _, block := range request { 680 announces = append(announces, announce{block.Hash, block.Number}) 681 } 682 } 683 mlogWireDelegate(p, "receive", NewBlockHashesMsg, intSize, announces, err) 684 // Mark the hashes as present at the remote node 685 for _, block := range announces { 686 p.MarkBlock(block.Hash) 687 p.SetHead(block.Hash, p.td) 688 } 689 // Schedule all the unknown hashes for retrieval 690 unknown := make([]announce, 0, len(announces)) 691 for _, block := range announces { 692 if !pm.blockchain.HasBlock(block.Hash) { 693 unknown = append(unknown, block) 694 } 695 } 696 for _, block := range unknown { 697 // TODO Breaking /eth tests 698 pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies) 699 } 700 701 case msg.Code == NewBlockMsg: 702 // Retrieve and decode the propagated block 703 var request newBlockData 704 705 if e := msg.Decode(&request); e != nil { 706 err = errResp(ErrDecode, "%v: %v", msg, e) 707 mlogWireDelegate(p, "receive", NewBlockMsg, intSize, request, err) 708 return 709 } 710 if e := request.Block.ValidateFields(); e != nil { 711 err = errResp(ErrDecode, "block validation %v: %v", msg, e) 712 mlogWireDelegate(p, "receive", NewBlockMsg, intSize, request, err) 713 return 714 } 715 716 mlogWireDelegate(p, "receive", NewBlockMsg, intSize, request, err) 717 718 request.Block.ReceivedAt = msg.ReceivedAt 719 request.Block.ReceivedFrom = p 720 721 // Mark the peer as owning the block and schedule it for import 722 p.MarkBlock(request.Block.Hash()) 723 pm.fetcher.Enqueue(p.id, request.Block) 724 725 // Assuming the block is importable by the peer, but possibly not yet done so, 726 // calculate the head hash and TD that the peer truly must have. 727 var ( 728 trueHead = request.Block.ParentHash() 729 trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) 730 ) 731 // Update the peers total difficulty if better than the previous 732 if _, td := p.Head(); trueTD.Cmp(td) > 0 { 733 glog.V(logger.Debug).Infof("Peer %s: setting head: tdWas=%v trueTD=%v", p.id, td, trueTD) 734 p.SetHead(trueHead, trueTD) 735 736 // Schedule a sync if above ours. Note, this will not fire a sync for a gap of 737 // a singe block (as the true TD is below the propagated block), however this 738 // scenario should easily be covered by the fetcher. 739 currentBlock := pm.blockchain.CurrentBlock() 740 if localTd := pm.blockchain.GetTd(currentBlock.Hash()); trueTD.Cmp(localTd) > 0 { 741 if !pm.downloader.Synchronising() { 742 glog.V(logger.Info).Infof("Peer %s: localTD=%v (<) peerTrueTD=%v, synchronising", p.id, localTd, trueTD) 743 go pm.synchronise(p) 744 } 745 } else { 746 glog.V(logger.Detail).Infof("Peer %s: localTD=%v (>=) peerTrueTD=%v, NOT synchronising", p.id, localTd, trueTD) 747 } 748 } else { 749 glog.V(logger.Detail).Infof("Peer %s: NOT setting head: tdWas=%v trueTD=%v", p.id, td, trueTD) 750 } 751 752 case msg.Code == TxMsg: 753 // Transactions arrived, make sure we have a valid and fresh chain to handle them 754 if atomic.LoadUint32(&pm.acceptsTxs) == 0 { 755 mlogWireDelegate(p, "receive", TxMsg, intSize, []*types.Transaction{}, errors.New("not synced")) 756 break 757 } 758 // Transactions can be processed, parse all of them and deliver to the pool 759 var txs []*types.Transaction 760 if e := msg.Decode(&txs); e != nil { 761 err = errResp(ErrDecode, "msg %v: %v", msg, e) 762 mlogWireDelegate(p, "receive", TxMsg, intSize, txs, err) 763 return 764 } 765 mlogWireDelegate(p, "receive", TxMsg, intSize, txs, err) 766 for i, tx := range txs { 767 // Validate and mark the remote transaction 768 if tx == nil { 769 return errResp(ErrDecode, "transaction %d is nil", i) 770 } 771 p.MarkTransaction(tx.Hash()) 772 } 773 pm.txpool.AddTransactions(txs) 774 775 default: 776 err = errResp(ErrInvalidMsgCode, "%v", msg.Code) 777 mlogWireDelegate(p, "receive", unknownMessageCode, intSize, nil, err) 778 return 779 } 780 return nil 781 } 782 783 // BroadcastBlock will either propagate a block to a subset of it's peers, or 784 // will only announce it's availability (depending what's requested). 785 func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { 786 hash := block.Hash() 787 peers := pm.peers.PeersWithoutBlock(hash) 788 789 // If propagation is requested, send to a subset of the peer 790 if propagate { 791 // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) 792 var td *big.Int 793 if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil { 794 td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash())) 795 } else { 796 glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) 797 return 798 } 799 // Send the block to a subset of our peers 800 transfer := peers[:int(math.Sqrt(float64(len(peers))))] 801 for _, peer := range transfer { 802 peer.AsyncSendNewBlock(block, td) 803 } 804 glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) 805 } 806 // Otherwise if the block is indeed in our own chain, announce it 807 if pm.blockchain.HasBlock(block.Hash()) { 808 for _, peer := range peers { 809 peer.AsyncSendNewBlockHash(block) 810 } 811 glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)) 812 } 813 } 814 815 // BroadcastTx will propagate a transaction to all peers which are not known to 816 // already have the given transaction. 817 func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { 818 // Broadcast transaction to a batch of peers not knowing about it 819 peers := pm.peers.PeersWithoutTx(hash) 820 //FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] 821 for _, peer := range peers { 822 peer.AsyncSendTransactions(types.Transactions{tx}) 823 } 824 glog.V(logger.Detail).Infof("broadcast tx [%s] to %d peers", tx.Hash().Hex(), len(peers)) 825 } 826 827 // Mined broadcast loop 828 func (self *ProtocolManager) minedBroadcastLoop() { 829 // automatically stops if unsubscribe 830 for obj := range self.minedBlockSub.Chan() { 831 switch ev := obj.Data.(type) { 832 case core.NewMinedBlockEvent: 833 self.BroadcastBlock(ev.Block, true) // First propagate block to peers 834 self.BroadcastBlock(ev.Block, false) // Only then announce to the rest 835 } 836 } 837 } 838 839 func (self *ProtocolManager) txBroadcastLoop() { 840 // automatically stops if unsubscribe 841 for obj := range self.txSub.Chan() { 842 event := obj.Data.(core.TxPreEvent) 843 self.BroadcastTx(event.Tx.Hash(), event.Tx) 844 } 845 } 846 847 // EthNodeInfo represents a short summary of the Ethereum sub-protocol metadata known 848 // about the host peer. 849 type EthNodeInfo struct { 850 Network int `json:"network"` // Ethereum network ID (1=Mainnet, 2=Morden) 851 Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain 852 Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block 853 Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block 854 } 855 856 // NodeInfo retrieves some protocol metadata about the running host node. 857 func (self *ProtocolManager) NodeInfo() *EthNodeInfo { 858 return &EthNodeInfo{ 859 Network: int(self.networkId), 860 Difficulty: self.blockchain.GetTd(self.blockchain.CurrentBlock().Hash()), 861 Genesis: self.blockchain.Genesis().Hash(), 862 Head: self.blockchain.CurrentBlock().Hash(), 863 } 864 }