github.com/hyperion-hyn/go-ethereum@v2.4.0+incompatible/eth/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus"
    31  	"github.com/ethereum/go-ethereum/consensus/clique"
    32  	"github.com/ethereum/go-ethereum/consensus/ethash"
    33  	"github.com/ethereum/go-ethereum/consensus/misc"
    34  	"github.com/ethereum/go-ethereum/core"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/eth/downloader"
    38  	"github.com/ethereum/go-ethereum/eth/fetcher"
    39  	"github.com/ethereum/go-ethereum/ethdb"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/p2p"
    43  	"github.com/ethereum/go-ethereum/p2p/enode"
    44  	"github.com/ethereum/go-ethereum/params"
    45  	"github.com/ethereum/go-ethereum/rlp"
    46  )
    47  
    48  const (
    49  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    50  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    51  
    52  	// txChanSize is the size of channel listening to NewTxsEvent.
    53  	// The number is referenced from the size of tx pool.
    54  	txChanSize = 4096
    55  
    56  	// minimim number of peers to broadcast new blocks to
    57  	minBroadcastPeers = 4
    58  )
    59  
    60  var (
    61  	daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge
    62  )
    63  
    64  // errIncompatibleConfig is returned if the requested protocols and configs are
    65  // not compatible (low protocol version restrictions and high requirements).
    66  var errIncompatibleConfig = errors.New("incompatible configuration")
    67  
    68  func errResp(code errCode, format string, v ...interface{}) error {
    69  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    70  }
    71  
    72  type ProtocolManager struct {
    73  	networkID uint64
    74  
    75  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    76  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    77  
    78  	txpool      txPool
    79  	blockchain  *core.BlockChain
    80  	chainconfig *params.ChainConfig
    81  	maxPeers    int
    82  
    83  	downloader *downloader.Downloader
    84  	fetcher    *fetcher.Fetcher
    85  	peers      *peerSet
    86  
    87  	SubProtocols []p2p.Protocol
    88  
    89  	eventMux      *event.TypeMux
    90  	txsCh         chan core.NewTxsEvent
    91  	txsSub        event.Subscription
    92  	minedBlockSub *event.TypeMuxSubscription
    93  
    94  	// channels for fetcher, syncer, txsyncLoop
    95  	newPeerCh   chan *peer
    96  	txsyncCh    chan *txsync
    97  	quitSync    chan struct{}
    98  	noMorePeers chan struct{}
    99  
   100  	// wait group is used for graceful shutdowns during downloading
   101  	// and processing
   102  	wg sync.WaitGroup
   103  
   104  	raftMode bool
   105  	engine   consensus.Engine
   106  }
   107  
   108  // NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
   109  // with the Ethereum network.
   110  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, raftMode bool) (*ProtocolManager, error) {
   111  	// Create the protocol manager with the base fields
   112  	manager := &ProtocolManager{
   113  		networkID:   networkID,
   114  		eventMux:    mux,
   115  		txpool:      txpool,
   116  		blockchain:  blockchain,
   117  		chainconfig: config,
   118  		peers:       newPeerSet(),
   119  		newPeerCh:   make(chan *peer),
   120  		noMorePeers: make(chan struct{}),
   121  		txsyncCh:    make(chan *txsync),
   122  		quitSync:    make(chan struct{}),
   123  		raftMode:    raftMode,
   124  		engine:      engine,
   125  	}
   126  
   127  	if handler, ok := manager.engine.(consensus.Handler); ok {
   128  		handler.SetBroadcaster(manager)
   129  	}
   130  
   131  	// Figure out whether to allow fast sync or not
   132  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   133  		log.Warn("Blockchain not empty, fast sync disabled")
   134  		mode = downloader.FullSync
   135  	}
   136  	if mode == downloader.FastSync {
   137  		manager.fastSync = uint32(1)
   138  	}
   139  	protocol := engine.Protocol()
   140  	// Initiate a sub-protocol for every implemented version we can handle
   141  	manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions))
   142  	for i, version := range protocol.Versions {
   143  		// Skip protocol version if incompatible with the mode of operation
   144  		if mode == downloader.FastSync && version < eth63 {
   145  			continue
   146  		}
   147  		// Compatible; initialise the sub-protocol
   148  		version := version // Closure for the run
   149  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   150  			Name:    protocol.Name,
   151  			Version: version,
   152  			Length:  protocol.Lengths[i],
   153  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   154  				peer := manager.newPeer(int(version), p, rw)
   155  				select {
   156  				case manager.newPeerCh <- peer:
   157  					manager.wg.Add(1)
   158  					defer manager.wg.Done()
   159  					return manager.handle(peer)
   160  				case <-manager.quitSync:
   161  					return p2p.DiscQuitting
   162  				}
   163  			},
   164  			NodeInfo: func() interface{} {
   165  				return manager.NodeInfo()
   166  			},
   167  			PeerInfo: func(id enode.ID) interface{} {
   168  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   169  					return p.Info()
   170  				}
   171  				return nil
   172  			},
   173  		})
   174  	}
   175  	if len(manager.SubProtocols) == 0 {
   176  		return nil, errIncompatibleConfig
   177  	}
   178  	// Construct the different synchronisation mechanisms
   179  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   180  
   181  	validator := func(header *types.Header) error {
   182  		return engine.VerifyHeader(blockchain, header, true)
   183  	}
   184  	heighter := func() uint64 {
   185  		return blockchain.CurrentBlock().NumberU64()
   186  	}
   187  	inserter := func(blocks types.Blocks) (int, error) {
   188  		// If fast sync is running, deny importing weird blocks
   189  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   190  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   191  			return 0, nil
   192  		}
   193  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   194  		return manager.blockchain.InsertChain(blocks)
   195  	}
   196  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   197  
   198  	return manager, nil
   199  }
   200  
   201  func (pm *ProtocolManager) removePeer(id string) {
   202  	// Short circuit if the peer was already removed
   203  	peer := pm.peers.Peer(id)
   204  	if peer == nil {
   205  		return
   206  	}
   207  	log.Debug("Removing Ethereum peer", "peer", id)
   208  
   209  	// Unregister the peer from the downloader and Ethereum peer set
   210  	pm.downloader.UnregisterPeer(id)
   211  	if err := pm.peers.Unregister(id); err != nil {
   212  		log.Error("Peer removal failed", "peer", id, "err", err)
   213  	}
   214  	// Hard disconnect at the networking layer
   215  	if peer != nil {
   216  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   217  	}
   218  }
   219  
   220  func (pm *ProtocolManager) Start(maxPeers int) {
   221  	pm.maxPeers = maxPeers
   222  
   223  	// broadcast transactions
   224  	pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
   225  	pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
   226  	go pm.txBroadcastLoop()
   227  
   228  	if !pm.raftMode {
   229  		// broadcast mined blocks
   230  		pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   231  		go pm.minedBroadcastLoop()
   232  	} else {
   233  		// We set this immediately in raft mode to make sure the miner never drops
   234  		// incoming txes. Raft mode doesn't use the fetcher or downloader, and so
   235  		// this would never be set otherwise.
   236  		atomic.StoreUint32(&pm.acceptTxs, 1)
   237  	}
   238  
   239  	// start sync handlers
   240  	go pm.syncer()
   241  	go pm.txsyncLoop()
   242  }
   243  
   244  func (pm *ProtocolManager) Stop() {
   245  	log.Info("Stopping Ethereum protocol")
   246  
   247  	pm.txsSub.Unsubscribe() // quits txBroadcastLoop
   248  	if !pm.raftMode {
   249  		pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   250  	}
   251  
   252  	// Quit the sync loop.
   253  	// After this send has completed, no new peers will be accepted.
   254  	pm.noMorePeers <- struct{}{}
   255  
   256  	// Quit fetcher, txsyncLoop.
   257  	close(pm.quitSync)
   258  
   259  	// Disconnect existing sessions.
   260  	// This also closes the gate for any new registrations on the peer set.
   261  	// sessions which are already established but not added to pm.peers yet
   262  	// will exit when they try to register.
   263  	pm.peers.Close()
   264  
   265  	// Wait for all peer handler goroutines and the loops to come down.
   266  	pm.wg.Wait()
   267  
   268  	log.Info("Ethereum protocol stopped")
   269  }
   270  
   271  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   272  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   273  }
   274  
   275  // handle is the callback invoked to manage the life cycle of an eth peer. When
   276  // this function terminates, the peer is disconnected.
   277  func (pm *ProtocolManager) handle(p *peer) error {
   278  	// Ignore maxPeers if this is a trusted peer
   279  	if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   280  		return p2p.DiscTooManyPeers
   281  	}
   282  	p.Log().Debug("Ethereum peer connected", "name", p.Name())
   283  
   284  	// Execute the Ethereum handshake
   285  	var (
   286  		genesis = pm.blockchain.Genesis()
   287  		head    = pm.blockchain.CurrentHeader()
   288  		hash    = head.Hash()
   289  		number  = head.Number.Uint64()
   290  		td      = pm.blockchain.GetTd(hash, number)
   291  	)
   292  	if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil {
   293  		p.Log().Debug("Ethereum handshake failed", "err", err)
   294  		return err
   295  	}
   296  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   297  		rw.Init(p.version)
   298  	}
   299  	// Register the peer locally
   300  	if err := pm.peers.Register(p); err != nil {
   301  		p.Log().Error("Ethereum peer registration failed", "err", err)
   302  		return err
   303  	}
   304  	defer pm.removePeer(p.id)
   305  
   306  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   307  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   308  		return err
   309  	}
   310  	// Propagate existing transactions. new transactions appearing
   311  	// after this will be sent via broadcasts.
   312  	pm.syncTransactions(p)
   313  
   314  	// If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork
   315  	if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil {
   316  		// Request the peer's DAO fork header for extra-data validation
   317  		if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil {
   318  			return err
   319  		}
   320  		// Start a timer to disconnect if the peer doesn't reply in time
   321  		p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
   322  			p.Log().Debug("Timed out DAO fork-check, dropping")
   323  			pm.removePeer(p.id)
   324  		})
   325  		// Make sure it's cleaned up if the peer dies off
   326  		defer func() {
   327  			if p.forkDrop != nil {
   328  				p.forkDrop.Stop()
   329  				p.forkDrop = nil
   330  			}
   331  		}()
   332  	}
   333  	// main loop. handle incoming messages.
   334  	for {
   335  		if err := pm.handleMsg(p); err != nil {
   336  			p.Log().Debug("Ethereum message handling failed", "err", err)
   337  			return err
   338  		}
   339  	}
   340  }
   341  
   342  // handleMsg is invoked whenever an inbound message is received from a remote
   343  // peer. The remote connection is torn down upon returning any error.
   344  func (pm *ProtocolManager) handleMsg(p *peer) error {
   345  	// Read the next message from the remote peer, and ensure it's fully consumed
   346  	msg, err := p.rw.ReadMsg()
   347  	if err != nil {
   348  		return err
   349  	}
   350  	if msg.Size > ProtocolMaxMsgSize {
   351  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   352  	}
   353  	defer msg.Discard()
   354  
   355  	if pm.raftMode {
   356  		if msg.Code != TxMsg &&
   357  			msg.Code != GetBlockHeadersMsg && msg.Code != BlockHeadersMsg &&
   358  			msg.Code != GetBlockBodiesMsg && msg.Code != BlockBodiesMsg {
   359  
   360  			log.Info("raft: ignoring message", "code", msg.Code)
   361  
   362  			return nil
   363  		}
   364  	} else if handler, ok := pm.engine.(consensus.Handler); ok {
   365  		pubKey := p.Node().Pubkey()
   366  		addr := crypto.PubkeyToAddress(*pubKey)
   367  		handled, err := handler.HandleMsg(addr, msg)
   368  		if handled {
   369  			return err
   370  		}
   371  	}
   372  
   373  	// Handle the message depending on its contents
   374  	switch {
   375  	case msg.Code == StatusMsg:
   376  		// Status messages should never arrive after the handshake
   377  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   378  
   379  	// Block header query, collect the requested headers and reply
   380  	case msg.Code == GetBlockHeadersMsg:
   381  		// Decode the complex header query
   382  		var query getBlockHeadersData
   383  		if err := msg.Decode(&query); err != nil {
   384  			return errResp(ErrDecode, "%v: %v", msg, err)
   385  		}
   386  		hashMode := query.Origin.Hash != (common.Hash{})
   387  		first := true
   388  		maxNonCanonical := uint64(100)
   389  
   390  		// Gather headers until the fetch or network limits is reached
   391  		var (
   392  			bytes   common.StorageSize
   393  			headers []*types.Header
   394  			unknown bool
   395  		)
   396  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   397  			// Retrieve the next header satisfying the query
   398  			var origin *types.Header
   399  			if hashMode {
   400  				if first {
   401  					first = false
   402  					origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   403  					if origin != nil {
   404  						query.Origin.Number = origin.Number.Uint64()
   405  					}
   406  				} else {
   407  					origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   408  				}
   409  			} else {
   410  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   411  			}
   412  			if origin == nil {
   413  				break
   414  			}
   415  			headers = append(headers, origin)
   416  			bytes += estHeaderRlpSize
   417  
   418  			// Advance to the next header of the query
   419  			switch {
   420  			case hashMode && query.Reverse:
   421  				// Hash based traversal towards the genesis block
   422  				ancestor := query.Skip + 1
   423  				if ancestor == 0 {
   424  					unknown = true
   425  				} else {
   426  					query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   427  					unknown = (query.Origin.Hash == common.Hash{})
   428  				}
   429  			case hashMode && !query.Reverse:
   430  				// Hash based traversal towards the leaf block
   431  				var (
   432  					current = origin.Number.Uint64()
   433  					next    = current + query.Skip + 1
   434  				)
   435  				if next <= current {
   436  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   437  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   438  					unknown = true
   439  				} else {
   440  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   441  						nextHash := header.Hash()
   442  						expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   443  						if expOldHash == query.Origin.Hash {
   444  							query.Origin.Hash, query.Origin.Number = nextHash, next
   445  						} else {
   446  							unknown = true
   447  						}
   448  					} else {
   449  						unknown = true
   450  					}
   451  				}
   452  			case query.Reverse:
   453  				// Number based traversal towards the genesis block
   454  				if query.Origin.Number >= query.Skip+1 {
   455  					query.Origin.Number -= query.Skip + 1
   456  				} else {
   457  					unknown = true
   458  				}
   459  
   460  			case !query.Reverse:
   461  				// Number based traversal towards the leaf block
   462  				query.Origin.Number += query.Skip + 1
   463  			}
   464  		}
   465  		return p.SendBlockHeaders(headers)
   466  
   467  	case msg.Code == BlockHeadersMsg:
   468  		// A batch of headers arrived to one of our previous requests
   469  		var headers []*types.Header
   470  		if err := msg.Decode(&headers); err != nil {
   471  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   472  		}
   473  		// If no headers were received, but we're expending a DAO fork check, maybe it's that
   474  		if len(headers) == 0 && p.forkDrop != nil {
   475  			// Possibly an empty reply to the fork header checks, sanity check TDs
   476  			verifyDAO := true
   477  
   478  			// If we already have a DAO header, we can check the peer's TD against it. If
   479  			// the peer's ahead of this, it too must have a reply to the DAO check
   480  			if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil {
   481  				if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 {
   482  					verifyDAO = false
   483  				}
   484  			}
   485  			// If we're seemingly on the same chain, disable the drop timer
   486  			if verifyDAO {
   487  				p.Log().Debug("Seems to be on the same side of the DAO fork")
   488  				p.forkDrop.Stop()
   489  				p.forkDrop = nil
   490  				return nil
   491  			}
   492  		}
   493  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   494  		filter := len(headers) == 1
   495  		if filter {
   496  			// If it's a potential DAO fork check, validate against the rules
   497  			if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 {
   498  				// Disable the fork drop timer
   499  				p.forkDrop.Stop()
   500  				p.forkDrop = nil
   501  
   502  				// Validate the header and either drop the peer or continue
   503  				if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
   504  					p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
   505  					return err
   506  				}
   507  				p.Log().Debug("Verified to be on the same side of the DAO fork")
   508  				return nil
   509  			}
   510  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   511  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   512  		}
   513  		if len(headers) > 0 || !filter {
   514  			err := pm.downloader.DeliverHeaders(p.id, headers)
   515  			if err != nil {
   516  				log.Debug("Failed to deliver headers", "err", err)
   517  			}
   518  		}
   519  
   520  	case msg.Code == GetBlockBodiesMsg:
   521  		// Decode the retrieval message
   522  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   523  		if _, err := msgStream.List(); err != nil {
   524  			return err
   525  		}
   526  		// Gather blocks until the fetch or network limits is reached
   527  		var (
   528  			hash   common.Hash
   529  			bytes  int
   530  			bodies []rlp.RawValue
   531  		)
   532  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   533  			// Retrieve the hash of the next block
   534  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   535  				break
   536  			} else if err != nil {
   537  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   538  			}
   539  			// Retrieve the requested block body, stopping if enough was found
   540  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   541  				bodies = append(bodies, data)
   542  				bytes += len(data)
   543  			}
   544  		}
   545  		return p.SendBlockBodiesRLP(bodies)
   546  
   547  	case msg.Code == BlockBodiesMsg:
   548  		// A batch of block bodies arrived to one of our previous requests
   549  		var request blockBodiesData
   550  		if err := msg.Decode(&request); err != nil {
   551  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   552  		}
   553  		// Deliver them all to the downloader for queuing
   554  		transactions := make([][]*types.Transaction, len(request))
   555  		uncles := make([][]*types.Header, len(request))
   556  
   557  		for i, body := range request {
   558  			transactions[i] = body.Transactions
   559  			uncles[i] = body.Uncles
   560  		}
   561  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   562  		filter := len(transactions) > 0 || len(uncles) > 0
   563  		if filter {
   564  			transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
   565  		}
   566  		if len(transactions) > 0 || len(uncles) > 0 || !filter {
   567  			err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
   568  			if err != nil {
   569  				log.Debug("Failed to deliver bodies", "err", err)
   570  			}
   571  		}
   572  
   573  	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
   574  		// Decode the retrieval message
   575  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   576  		if _, err := msgStream.List(); err != nil {
   577  			return err
   578  		}
   579  		// Gather state data until the fetch or network limits is reached
   580  		var (
   581  			hash  common.Hash
   582  			bytes int
   583  			data  [][]byte
   584  		)
   585  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   586  			// Retrieve the hash of the next state entry
   587  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   588  				break
   589  			} else if err != nil {
   590  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   591  			}
   592  			// Retrieve the requested state entry, stopping if enough was found
   593  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   594  				data = append(data, entry)
   595  				bytes += len(entry)
   596  			}
   597  		}
   598  		return p.SendNodeData(data)
   599  
   600  	case p.version >= eth63 && msg.Code == NodeDataMsg:
   601  		// A batch of node state data arrived to one of our previous requests
   602  		var data [][]byte
   603  		if err := msg.Decode(&data); err != nil {
   604  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   605  		}
   606  		// Deliver all to the downloader
   607  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   608  			log.Debug("Failed to deliver node state data", "err", err)
   609  		}
   610  
   611  	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
   612  		// Decode the retrieval message
   613  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   614  		if _, err := msgStream.List(); err != nil {
   615  			return err
   616  		}
   617  		// Gather state data until the fetch or network limits is reached
   618  		var (
   619  			hash     common.Hash
   620  			bytes    int
   621  			receipts []rlp.RawValue
   622  		)
   623  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   624  			// Retrieve the hash of the next block
   625  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   626  				break
   627  			} else if err != nil {
   628  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   629  			}
   630  			// Retrieve the requested block's receipts, skipping if unknown to us
   631  			results := pm.blockchain.GetReceiptsByHash(hash)
   632  			if results == nil {
   633  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   634  					continue
   635  				}
   636  			}
   637  			// If known, encode and queue for response packet
   638  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   639  				log.Error("Failed to encode receipt", "err", err)
   640  			} else {
   641  				receipts = append(receipts, encoded)
   642  				bytes += len(encoded)
   643  			}
   644  		}
   645  		return p.SendReceiptsRLP(receipts)
   646  
   647  	case p.version >= eth63 && msg.Code == ReceiptsMsg:
   648  		// A batch of receipts arrived to one of our previous requests
   649  		var receipts [][]*types.Receipt
   650  		if err := msg.Decode(&receipts); err != nil {
   651  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   652  		}
   653  		// Deliver all to the downloader
   654  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   655  			log.Debug("Failed to deliver receipts", "err", err)
   656  		}
   657  
   658  	case msg.Code == NewBlockHashesMsg:
   659  		var announces newBlockHashesData
   660  		if err := msg.Decode(&announces); err != nil {
   661  			return errResp(ErrDecode, "%v: %v", msg, err)
   662  		}
   663  		// Mark the hashes as present at the remote node
   664  		for _, block := range announces {
   665  			p.MarkBlock(block.Hash)
   666  		}
   667  		// Schedule all the unknown hashes for retrieval
   668  		unknown := make(newBlockHashesData, 0, len(announces))
   669  		for _, block := range announces {
   670  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   671  				unknown = append(unknown, block)
   672  			}
   673  		}
   674  		for _, block := range unknown {
   675  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   676  		}
   677  
   678  	case msg.Code == NewBlockMsg:
   679  		// Retrieve and decode the propagated block
   680  		var request newBlockData
   681  		if err := msg.Decode(&request); err != nil {
   682  			return errResp(ErrDecode, "%v: %v", msg, err)
   683  		}
   684  		request.Block.ReceivedAt = msg.ReceivedAt
   685  		request.Block.ReceivedFrom = p
   686  
   687  		// Mark the peer as owning the block and schedule it for import
   688  		p.MarkBlock(request.Block.Hash())
   689  		pm.fetcher.Enqueue(p.id, request.Block)
   690  
   691  		// Assuming the block is importable by the peer, but possibly not yet done so,
   692  		// calculate the head hash and TD that the peer truly must have.
   693  		var (
   694  			trueHead = request.Block.ParentHash()
   695  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   696  		)
   697  		// Update the peer's total difficulty if better than the previous
   698  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   699  			p.SetHead(trueHead, trueTD)
   700  
   701  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   702  			// a singe block (as the true TD is below the propagated block), however this
   703  			// scenario should easily be covered by the fetcher.
   704  			currentBlock := pm.blockchain.CurrentBlock()
   705  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   706  				go pm.synchronise(p)
   707  			}
   708  		}
   709  
   710  	case msg.Code == TxMsg:
   711  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   712  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   713  			break
   714  		}
   715  		// Transactions can be processed, parse all of them and deliver to the pool
   716  		var txs []*types.Transaction
   717  		if err := msg.Decode(&txs); err != nil {
   718  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   719  		}
   720  		for i, tx := range txs {
   721  			// Validate and mark the remote transaction
   722  			if tx == nil {
   723  				return errResp(ErrDecode, "transaction %d is nil", i)
   724  			}
   725  			p.MarkTransaction(tx.Hash())
   726  		}
   727  		pm.txpool.AddRemotes(txs)
   728  
   729  	default:
   730  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   731  	}
   732  	return nil
   733  }
   734  
   735  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
   736  	pm.fetcher.Enqueue(id, block)
   737  }
   738  
   739  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   740  // will only announce it's availability (depending what's requested).
   741  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   742  	hash := block.Hash()
   743  	peers := pm.peers.PeersWithoutBlock(hash)
   744  
   745  	// If propagation is requested, send to a subset of the peer
   746  	if propagate {
   747  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   748  		var td *big.Int
   749  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   750  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   751  		} else {
   752  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   753  			return
   754  		}
   755  		// Send the block to a subset of our peers
   756  		transferLen := int(math.Sqrt(float64(len(peers))))
   757  		if transferLen < minBroadcastPeers {
   758  			transferLen = minBroadcastPeers
   759  		}
   760  		if transferLen > len(peers) {
   761  			transferLen = len(peers)
   762  		}
   763  		transfer := peers[:transferLen]
   764  		for _, peer := range transfer {
   765  			peer.AsyncSendNewBlock(block, td)
   766  		}
   767  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   768  		return
   769  	}
   770  	// Otherwise if the block is indeed in out own chain, announce it
   771  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   772  		for _, peer := range peers {
   773  			peer.AsyncSendNewBlockHash(block)
   774  		}
   775  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   776  	}
   777  }
   778  
   779  // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
   780  // already have the given transaction.
   781  func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
   782  	var txset = make(map[*peer]types.Transactions)
   783  
   784  	// Broadcast transactions to a batch of peers not knowing about it
   785  	// NOTE: Raft-based consensus currently assumes that geth broadcasts
   786  	// transactions to all peers in the network. A previous comment here
   787  	// indicated that this logic might change in the future to only send to a
   788  	// subset of peers. If this change occurs upstream, a merge conflict should
   789  	// arise here, and we should add logic to send to *all* peers in raft mode.
   790  
   791  	for _, tx := range txs {
   792  		peers := pm.peers.PeersWithoutTx(tx.Hash())
   793  		for _, peer := range peers {
   794  			txset[peer] = append(txset[peer], tx)
   795  		}
   796  		log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
   797  	}
   798  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   799  	for peer, txs := range txset {
   800  		peer.AsyncSendTransactions(txs)
   801  	}
   802  }
   803  
   804  // Mined broadcast loop
   805  func (pm *ProtocolManager) minedBroadcastLoop() {
   806  	// automatically stops if unsubscribe
   807  	for obj := range pm.minedBlockSub.Chan() {
   808  		if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {
   809  			pm.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   810  			pm.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   811  		}
   812  	}
   813  }
   814  
   815  func (pm *ProtocolManager) txBroadcastLoop() {
   816  	for {
   817  		select {
   818  		case event := <-pm.txsCh:
   819  			pm.BroadcastTxs(event.Txs)
   820  
   821  		// Err() channel will be closed when unsubscribing.
   822  		case <-pm.txsSub.Err():
   823  			return
   824  		}
   825  	}
   826  }
   827  
   828  // NodeInfo represents a short summary of the Ethereum sub-protocol metadata
   829  // known about the host peer.
   830  type NodeInfo struct {
   831  	Network    uint64              `json:"network"`    // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   832  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   833  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   834  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   835  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   836  	Consensus  string              `json:"consensus"`  // Consensus mechanism in use
   837  }
   838  
   839  // NodeInfo retrieves some protocol metadata about the running host node.
   840  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
   841  	currentBlock := pm.blockchain.CurrentBlock()
   842  
   843  	return &NodeInfo{
   844  		Network:    pm.networkID,
   845  		Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   846  		Genesis:    pm.blockchain.Genesis().Hash(),
   847  		Config:     pm.blockchain.Config(),
   848  		Head:       currentBlock.Hash(),
   849  		Consensus:  pm.getConsensusAlgorithm(),
   850  	}
   851  }
   852  
   853  func (pm *ProtocolManager) getConsensusAlgorithm() string {
   854  	var consensusAlgo string
   855  	if pm.raftMode { // raft does not use consensus interface
   856  		consensusAlgo = "raft"
   857  	} else {
   858  		switch pm.engine.(type) {
   859  		case consensus.Istanbul:
   860  			consensusAlgo = "istanbul"
   861  		case *clique.Clique:
   862  			consensusAlgo = "clique"
   863  		case *ethash.Ethash:
   864  			consensusAlgo = "ethash"
   865  		default:
   866  			consensusAlgo = "unknown"
   867  		}
   868  	}
   869  	return consensusAlgo
   870  }
   871  
   872  func (self *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
   873  	m := make(map[common.Address]consensus.Peer)
   874  	for _, p := range self.peers.Peers() {
   875  		pubKey := p.Node().Pubkey()
   876  		addr := crypto.PubkeyToAddress(*pubKey)
   877  		if targets[addr] {
   878  			m[addr] = p
   879  		}
   880  	}
   881  	return m
   882  }