github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/eth/handler.go (about)

     1  // Copyright 2015 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/SmartMeshFoundation/Spectrum/common"
    30  	"github.com/SmartMeshFoundation/Spectrum/consensus"
    31  	"github.com/SmartMeshFoundation/Spectrum/consensus/misc"
    32  	"github.com/SmartMeshFoundation/Spectrum/core"
    33  	"github.com/SmartMeshFoundation/Spectrum/core/types"
    34  	"github.com/SmartMeshFoundation/Spectrum/crypto"
    35  	"github.com/SmartMeshFoundation/Spectrum/eth/downloader"
    36  	"github.com/SmartMeshFoundation/Spectrum/eth/fetcher"
    37  	"github.com/SmartMeshFoundation/Spectrum/ethdb"
    38  	"github.com/SmartMeshFoundation/Spectrum/event"
    39  	"github.com/SmartMeshFoundation/Spectrum/log"
    40  	"github.com/SmartMeshFoundation/Spectrum/p2p"
    41  	"github.com/SmartMeshFoundation/Spectrum/p2p/discover"
    42  	"github.com/SmartMeshFoundation/Spectrum/params"
    43  	"github.com/SmartMeshFoundation/Spectrum/rlp"
    44  )
    45  
    46  const (
    47  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    48  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    49  
    50  	// txChanSize is the size of channel listening to TxPreEvent.
    51  	// The number is referenced from the size of tx pool.
    52  	txChanSize = 4096
    53  )
    54  
    55  var (
    56  	daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge
    57  )
    58  
    59  // errIncompatibleConfig is returned if the requested protocols and configs are
    60  // not compatible (low protocol version restrictions and high requirements).
    61  var errIncompatibleConfig = errors.New("incompatible configuration")
    62  
    63  func errResp(code errCode, format string, v ...interface{}) error {
    64  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    65  }
    66  
    67  type ProtocolManager struct {
    68  	networkId uint64
    69  
    70  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    71  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    72  
    73  	txpool      txPool
    74  	blockchain  *core.BlockChain
    75  	chaindb     ethdb.Database
    76  	chainconfig *params.ChainConfig
    77  	maxPeers    int
    78  
    79  	downloader *downloader.Downloader
    80  	fetcher    *fetcher.Fetcher
    81  	peers      *peerSet
    82  
    83  	SubProtocols []p2p.Protocol
    84  
    85  	eventMux      *event.TypeMux
    86  	txCh          chan core.TxPreEvent
    87  	txSub         event.Subscription
    88  	minedBlockSub *event.TypeMuxSubscription
    89  
    90  	// channels for fetcher, syncer, txsyncLoop
    91  	newPeerCh   chan *peer
    92  	txsyncCh    chan *txsync
    93  	quitSync    chan struct{}
    94  	noMorePeers chan struct{}
    95  
    96  	// wait group is used for graceful shutdowns during downloading
    97  	// and processing
    98  	wg sync.WaitGroup
    99  }
   100  
   101  // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
   102  // with the ethereum network.
   103  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
   104  	// Create the protocol manager with the base fields
   105  	manager := &ProtocolManager{
   106  		networkId:   networkId,
   107  		eventMux:    mux,
   108  		txpool:      txpool,
   109  		blockchain:  blockchain,
   110  		chaindb:     chaindb,
   111  		chainconfig: config,
   112  		peers:       newPeerSet(),
   113  		newPeerCh:   make(chan *peer),
   114  		noMorePeers: make(chan struct{}),
   115  		txsyncCh:    make(chan *txsync),
   116  		quitSync:    make(chan struct{}),
   117  	}
   118  	// Figure out whether to allow fast sync or not
   119  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   120  		log.Warn("Blockchain not empty, fast sync disabled")
   121  		mode = downloader.FullSync
   122  	}
   123  	if mode == downloader.FastSync {
   124  		manager.fastSync = uint32(1)
   125  	}
   126  	// Initiate a sub-protocol for every implemented version we can handle
   127  	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
   128  	for i, version := range ProtocolVersions {
   129  		// Skip protocol version if incompatible with the mode of operation
   130  		if mode == downloader.FastSync && version < eth63 {
   131  			continue
   132  		}
   133  		// Compatible; initialise the sub-protocol
   134  		version := version // Closure for the run
   135  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   136  			Name:    ProtocolName,
   137  			Version: version,
   138  			Length:  ProtocolLengths[i],
   139  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   140  				log.Debug(fmt.Sprintf("[ ProtocolManager ] ==> establish a conn with %v", p.RemoteAddr()))
   141  				peer := manager.newPeer(int(version), p, rw)
   142  				select {
   143  				case manager.newPeerCh <- peer:
   144  					manager.wg.Add(1)
   145  					defer manager.wg.Done()
   146  					return manager.handle(peer)
   147  				case <-manager.quitSync:
   148  					return p2p.DiscQuitting
   149  				}
   150  			},
   151  			NodeInfo: func() interface{} {
   152  				return manager.NodeInfo()
   153  			},
   154  			PeerInfo: func(id discover.NodeID) interface{} {
   155  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   156  					return p.Info()
   157  				}
   158  				return nil
   159  			},
   160  		})
   161  	}
   162  	if len(manager.SubProtocols) == 0 {
   163  		return nil, errIncompatibleConfig
   164  	}
   165  	// Construct the different synchronisation mechanisms
   166  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   167  
   168  	validator := func(header *types.Header) error {
   169  		return engine.VerifyHeader(blockchain, header, true)
   170  	}
   171  	heighter := func() uint64 {
   172  		return blockchain.CurrentBlock().NumberU64()
   173  	}
   174  	inserter := func(blocks types.Blocks) (i int, e error) {
   175  		// If fast sync is running, deny importing weird blocks
   176  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   177  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   178  			return 0, nil
   179  		}
   180  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   181  		i, e = manager.blockchain.InsertChain(blocks)
   182  		log.Debug("Inserted block done", "number", blocks[0].Number())
   183  		return
   184  	}
   185  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   186  
   187  	return manager, nil
   188  }
   189  
   190  func (pm *ProtocolManager) removePeer(id string) {
   191  	// Short circuit if the peer was already removed
   192  	peer := pm.peers.Peer(id)
   193  	if peer == nil {
   194  		return
   195  	}
   196  	log.Debug("Removing Ethereum peer", "peer", id)
   197  
   198  	// Unregister the peer from the downloader and Ethereum peer set
   199  	pm.downloader.UnregisterPeer(id)
   200  	if err := pm.peers.Unregister(id); err != nil {
   201  		log.Error("Peer removal failed", "peer", id, "err", err)
   202  	}
   203  	// Hard disconnect at the networking layer
   204  	if peer != nil {
   205  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   206  	}
   207  }
   208  
   209  func (pm *ProtocolManager) Start(maxPeers int) {
   210  	pm.maxPeers = maxPeers
   211  
   212  	// broadcast transactions
   213  	pm.txCh = make(chan core.TxPreEvent, txChanSize)
   214  	pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
   215  	go pm.txBroadcastLoop()
   216  
   217  	// broadcast mined blocks
   218  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   219  	go pm.minedBroadcastLoop()
   220  
   221  	// start sync handlers
   222  	go pm.syncer()
   223  	go pm.txsyncLoop()
   224  }
   225  
   226  func (pm *ProtocolManager) Stop() {
   227  	log.Info("Stopping Ethereum protocol")
   228  
   229  	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
   230  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   231  
   232  	// Quit the sync loop.
   233  	// After this send has completed, no new peers will be accepted.
   234  	pm.noMorePeers <- struct{}{}
   235  
   236  	// Quit fetcher, txsyncLoop.
   237  	close(pm.quitSync)
   238  
   239  	// Disconnect existing sessions.
   240  	// This also closes the gate for any new registrations on the peer set.
   241  	// sessions which are already established but not added to pm.peers yet
   242  	// will exit when they try to register.
   243  	pm.peers.Close()
   244  
   245  	// Wait for all peer handler goroutines and the loops to come down.
   246  	pm.wg.Wait()
   247  
   248  	log.Info("Ethereum protocol stopped")
   249  }
   250  
   251  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   252  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   253  }
   254  
   255  // handle is the callback invoked to manage the life cycle of an eth peer. When
   256  // this function terminates, the peer is disconnected.
   257  func (pm *ProtocolManager) handle(p *peer) error {
   258  	if pm.peers.Len() >= pm.maxPeers {
   259  		return p2p.DiscTooManyPeers
   260  	}
   261  	p.Log().Debug("Ethereum peer connected", "name", p.Name())
   262  
   263  	// Execute the Ethereum handshake
   264  	td, head, genesis := pm.blockchain.Status()
   265  	if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
   266  		p.Log().Debug("Ethereum handshake failed", "peer", p.RemoteAddr(), "err", err)
   267  		return err
   268  	}
   269  	p.Log().Debug("Ethereum hanshake success", "peer", p.RemoteAddr())
   270  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   271  		rw.Init(p.version)
   272  	}
   273  	// Register the peer locally
   274  	if err := pm.peers.Register(p); err != nil {
   275  		p.Log().Error("Ethereum peer registration failed", "err", err)
   276  		return err
   277  	}
   278  	defer pm.removePeer(p.id)
   279  
   280  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   281  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   282  		return err
   283  	}
   284  	// Propagate existing transactions. new transactions appearing
   285  	// after this will be sent via broadcasts.
   286  	pm.syncTransactions(p)
   287  
   288  	// If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork
   289  	if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil {
   290  		// Request the peer's DAO fork header for extra-data validation
   291  		if err := p.RequestHeadersByNumber(daoBlock.Uint64(), 1, 0, false); err != nil {
   292  			return err
   293  		}
   294  		// Start a timer to disconnect if the peer doesn't reply in time
   295  		p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
   296  			p.Log().Debug("Timed out DAO fork-check, dropping")
   297  			pm.removePeer(p.id)
   298  		})
   299  		// Make sure it's cleaned up if the peer dies off
   300  		defer func() {
   301  			if p.forkDrop != nil {
   302  				p.forkDrop.Stop()
   303  				p.forkDrop = nil
   304  			}
   305  		}()
   306  	}
   307  	// main loop. handle incoming messages.
   308  	for {
   309  		if err := pm.handleMsg(p); err != nil {
   310  			p.Log().Debug("Ethereum message handling failed", "err", err)
   311  			return err
   312  		}
   313  	}
   314  }
   315  
   316  // handleMsg is invoked whenever an inbound message is received from a remote
   317  // peer. The remote connection is torn down upon returning any error.
   318  func (pm *ProtocolManager) handleMsg(p *peer) error {
   319  	// Read the next message from the remote peer, and ensure it's fully consumed
   320  	msg, err := p.rw.ReadMsg()
   321  	if err != nil {
   322  		return err
   323  	}
   324  	if msg.Size > ProtocolMaxMsgSize {
   325  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   326  	}
   327  	defer msg.Discard()
   328  
   329  	// Handle the message depending on its contents
   330  	switch {
   331  	case msg.Code == StatusMsg:
   332  		// Status messages should never arrive after the handshake
   333  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   334  
   335  		// Block header query, collect the requested headers and reply
   336  	case msg.Code == GetBlockHeadersMsg:
   337  		// Decode the complex header query
   338  		var query getBlockHeadersData
   339  		if err := msg.Decode(&query); err != nil {
   340  			return errResp(ErrDecode, "%v: %v", msg, err)
   341  		}
   342  		hashMode := query.Origin.Hash != (common.Hash{})
   343  
   344  		// Gather headers until the fetch or network limits is reached
   345  		var (
   346  			bytes   common.StorageSize
   347  			headers []*types.Header
   348  			unknown bool
   349  		)
   350  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   351  			// Retrieve the next header satisfying the query
   352  			var origin *types.Header
   353  			if hashMode {
   354  				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   355  			} else {
   356  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   357  			}
   358  			if origin == nil {
   359  				break
   360  			}
   361  			number := origin.Number.Uint64()
   362  			headers = append(headers, origin)
   363  			bytes += estHeaderRlpSize
   364  
   365  			// Advance to the next header of the query
   366  			switch {
   367  			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   368  				// Hash based traversal towards the genesis block
   369  				for i := 0; i < int(query.Skip)+1; i++ {
   370  					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   371  						query.Origin.Hash = header.ParentHash
   372  						number--
   373  					} else {
   374  						unknown = true
   375  						break
   376  					}
   377  				}
   378  			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   379  				// Hash based traversal towards the leaf block
   380  				var (
   381  					current = origin.Number.Uint64()
   382  					next    = current + query.Skip + 1
   383  				)
   384  				if next <= current {
   385  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   386  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   387  					unknown = true
   388  				} else {
   389  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   390  						if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   391  							query.Origin.Hash = header.Hash()
   392  						} else {
   393  							unknown = true
   394  						}
   395  					} else {
   396  						unknown = true
   397  					}
   398  				}
   399  			case query.Reverse:
   400  				// Number based traversal towards the genesis block
   401  				if query.Origin.Number >= query.Skip+1 {
   402  					query.Origin.Number -= (query.Skip + 1)
   403  				} else {
   404  					unknown = true
   405  				}
   406  
   407  			case !query.Reverse:
   408  				// Number based traversal towards the leaf block
   409  				query.Origin.Number += (query.Skip + 1)
   410  			}
   411  		}
   412  		return p.SendBlockHeaders(headers)
   413  
   414  	case msg.Code == BlockHeadersMsg:
   415  		// A batch of headers arrived to one of our previous requests
   416  		var headers []*types.Header
   417  		if err := msg.Decode(&headers); err != nil {
   418  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   419  		}
   420  		// If no headers were received, but we're expending a DAO fork check, maybe it's that
   421  		if len(headers) == 0 && p.forkDrop != nil {
   422  			// Possibly an empty reply to the fork header checks, sanity check TDs
   423  			verifyDAO := true
   424  
   425  			// If we already have a DAO header, we can check the peer's TD against it. If
   426  			// the peer's ahead of this, it too must have a reply to the DAO check
   427  			if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil {
   428  				if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash(), daoHeader.Number.Uint64())) >= 0 {
   429  					verifyDAO = false
   430  				}
   431  			}
   432  			// If we're seemingly on the same chain, disable the drop timer
   433  			if verifyDAO {
   434  				p.Log().Debug("Seems to be on the same side of the DAO fork")
   435  				p.forkDrop.Stop()
   436  				p.forkDrop = nil
   437  				return nil
   438  			}
   439  		}
   440  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   441  		filter := len(headers) == 1
   442  		if filter {
   443  			// If it's a potential DAO fork check, validate against the rules
   444  			if p.forkDrop != nil && pm.chainconfig.DAOForkBlock.Cmp(headers[0].Number) == 0 {
   445  				// Disable the fork drop timer
   446  				p.forkDrop.Stop()
   447  				p.forkDrop = nil
   448  
   449  				// Validate the header and either drop the peer or continue
   450  				if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
   451  					p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
   452  					return err
   453  				}
   454  				p.Log().Debug("Verified to be on the same side of the DAO fork")
   455  				return nil
   456  			}
   457  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   458  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   459  		}
   460  		if len(headers) > 0 || !filter {
   461  			err := pm.downloader.DeliverHeaders(p.id, headers)
   462  			if err != nil {
   463  				log.Debug("Failed to deliver headers", "err", err)
   464  			}
   465  		}
   466  
   467  	case msg.Code == GetBlockBodiesMsg:
   468  		// Decode the retrieval message
   469  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   470  		if _, err := msgStream.List(); err != nil {
   471  			return err
   472  		}
   473  		// Gather blocks until the fetch or network limits is reached
   474  		var (
   475  			hash   common.Hash
   476  			bytes  int
   477  			bodies []rlp.RawValue
   478  		)
   479  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   480  			// Retrieve the hash of the next block
   481  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   482  				break
   483  			} else if err != nil {
   484  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   485  			}
   486  			// Retrieve the requested block body, stopping if enough was found
   487  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   488  				bodies = append(bodies, data)
   489  				bytes += len(data)
   490  			}
   491  		}
   492  		return p.SendBlockBodiesRLP(bodies)
   493  
   494  	case msg.Code == BlockBodiesMsg:
   495  		// A batch of block bodies arrived to one of our previous requests
   496  		var request blockBodiesData
   497  		if err := msg.Decode(&request); err != nil {
   498  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   499  		}
   500  		// Deliver them all to the downloader for queuing
   501  		trasactions := make([][]*types.Transaction, len(request))
   502  		uncles := make([][]*types.Header, len(request))
   503  
   504  		for i, body := range request {
   505  			trasactions[i] = body.Transactions
   506  			uncles[i] = body.Uncles
   507  		}
   508  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   509  		filter := len(trasactions) > 0 || len(uncles) > 0
   510  		if filter {
   511  			trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
   512  		}
   513  		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
   514  			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
   515  			if err != nil {
   516  				log.Debug("Failed to deliver bodies", "err", err)
   517  			}
   518  		}
   519  
   520  	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
   521  		// Decode the retrieval message
   522  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   523  		if _, err := msgStream.List(); err != nil {
   524  			return err
   525  		}
   526  		// Gather state data until the fetch or network limits is reached
   527  		var (
   528  			hash  common.Hash
   529  			bytes int
   530  			data  [][]byte
   531  		)
   532  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   533  			// Retrieve the hash of the next state entry
   534  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   535  				break
   536  			} else if err != nil {
   537  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   538  			}
   539  			// Retrieve the requested state entry, stopping if enough was found
   540  			if entry, err := pm.chaindb.Get(hash.Bytes()); err == nil {
   541  				data = append(data, entry)
   542  				bytes += len(entry)
   543  			}
   544  		}
   545  		return p.SendNodeData(data)
   546  
   547  	case p.version >= eth63 && msg.Code == NodeDataMsg:
   548  		// A batch of node state data arrived to one of our previous requests
   549  		var data [][]byte
   550  		if err := msg.Decode(&data); err != nil {
   551  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   552  		}
   553  		// Deliver all to the downloader
   554  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   555  			log.Debug("Failed to deliver node state data", "err", err)
   556  		}
   557  
   558  	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
   559  		// Decode the retrieval message
   560  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   561  		if _, err := msgStream.List(); err != nil {
   562  			return err
   563  		}
   564  		// Gather state data until the fetch or network limits is reached
   565  		var (
   566  			hash     common.Hash
   567  			bytes    int
   568  			receipts []rlp.RawValue
   569  		)
   570  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   571  			// Retrieve the hash of the next block
   572  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   573  				break
   574  			} else if err != nil {
   575  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   576  			}
   577  			// Retrieve the requested block's receipts, skipping if unknown to us
   578  			results := core.GetBlockReceipts(pm.chaindb, hash, core.GetBlockNumber(pm.chaindb, hash))
   579  			if results == nil {
   580  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   581  					continue
   582  				}
   583  			}
   584  			// If known, encode and queue for response packet
   585  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   586  				log.Error("Failed to encode receipt", "err", err)
   587  			} else {
   588  				receipts = append(receipts, encoded)
   589  				bytes += len(encoded)
   590  			}
   591  		}
   592  		return p.SendReceiptsRLP(receipts)
   593  
   594  	case p.version >= eth63 && msg.Code == ReceiptsMsg:
   595  		// A batch of receipts arrived to one of our previous requests
   596  		var receipts [][]*types.Receipt
   597  		if err := msg.Decode(&receipts); err != nil {
   598  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   599  		}
   600  		// Deliver all to the downloader
   601  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   602  			log.Debug("Failed to deliver receipts", "err", err)
   603  		}
   604  
   605  	case msg.Code == NewBlockHashesMsg:
   606  		var announces newBlockHashesData
   607  		if err := msg.Decode(&announces); err != nil {
   608  			return errResp(ErrDecode, "%v: %v", msg, err)
   609  		}
   610  		// Mark the hashes as present at the remote node
   611  		for _, block := range announces {
   612  			p.MarkBlock(block.Hash)
   613  		}
   614  		// Schedule all the unknown hashes for retrieval
   615  		unknown := make(newBlockHashesData, 0, len(announces))
   616  		for _, block := range announces {
   617  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   618  				unknown = append(unknown, block)
   619  			}
   620  		}
   621  		for _, block := range unknown {
   622  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   623  		}
   624  
   625  	case msg.Code == NewBlockMsg:
   626  		// Retrieve and decode the propagated block
   627  		var request newBlockData
   628  		if err := msg.Decode(&request); err != nil {
   629  			return errResp(ErrDecode, "%v: %v", msg, err)
   630  		}
   631  		request.Block.ReceivedAt = msg.ReceivedAt
   632  		request.Block.ReceivedFrom = p
   633  
   634  		// Mark the peer as owning the block and schedule it for import
   635  		p.MarkBlock(request.Block.Hash())
   636  		pm.fetcher.Enqueue(p.id, request.Block)
   637  
   638  		// Assuming the block is importable by the peer, but possibly not yet done so,
   639  		// calculate the head hash and TD that the peer truly must have.
   640  		peerpub, _ := p.ID().Pubkey()
   641  		var (
   642  			trueHead     = request.Block.ParentHash()
   643  			trueTD       = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   644  			currentBlock = pm.blockchain.CurrentBlock()
   645  			peeraddr     = crypto.PubkeyToAddress(*peerpub)
   646  		)
   647  		_, tttt := p.Head()
   648  
   649  		log.Info("<<NewBlockMsg>>",
   650  			"currentBlock", currentBlock.Number(),
   651  			"recvBlock", request.Block.Number(),
   652  			"currentTD", pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   653  			"trueTD", trueTD,
   654  			"p.td", tttt,
   655  			"p.addr", peeraddr.Hex(),
   656  			"synchronise", trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())),
   657  		)
   658  		// Update the peers total difficulty if better than the previous
   659  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   660  			p.SetHead(trueHead, trueTD)
   661  
   662  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   663  			// a singe block (as the true TD is below the propagated block), however this
   664  			// scenario should easily be covered by the fetcher.
   665  			//currentBlock := pm.blockchain.CurrentBlock()
   666  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   667  				go pm.synchronise(p)
   668  			}
   669  		}
   670  
   671  	case msg.Code == TxMsg:
   672  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   673  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   674  			break
   675  		}
   676  		// Transactions can be processed, parse all of them and deliver to the pool
   677  		var txs []*types.Transaction
   678  		if err := msg.Decode(&txs); err != nil {
   679  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   680  		}
   681  		for i, tx := range txs {
   682  			// Validate and mark the remote transaction
   683  			if tx == nil {
   684  				return errResp(ErrDecode, "transaction %d is nil", i)
   685  			}
   686  			p.MarkTransaction(tx.Hash())
   687  		}
   688  		pm.txpool.AddRemotes(txs)
   689  
   690  	default:
   691  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   692  	}
   693  	return nil
   694  }
   695  
   696  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   697  // will only announce it's availability (depending what's requested).
   698  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   699  	hash := block.Hash()
   700  	peers := pm.peers.PeersWithoutBlock(hash)
   701  
   702  	// If propagation is requested, send to a subset of the peer
   703  	if propagate {
   704  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   705  		var td *big.Int
   706  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   707  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   708  		} else {
   709  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   710  			return
   711  		}
   712  		// Send the block to a subset of our peers
   713  		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
   714  		for _, peer := range transfer {
   715  			err := peer.SendNewBlock(block, td)
   716  			if err != nil {
   717  				log.Error("SendNewBlock----->", "number", block.Number(), "peer", peer.String(), "err", err)
   718  			}
   719  		}
   720  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   721  		return
   722  	}
   723  	// Otherwise if the block is indeed in out own chain, announce it
   724  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   725  		for _, peer := range peers {
   726  			err := peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
   727  			if err != nil {
   728  				log.Error("SendNewBlockHashes----->", "number", block.Number(), "peer", peer.String(), "err", err)
   729  			}
   730  		}
   731  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   732  	}
   733  }
   734  
   735  // BroadcastTx will propagate a transaction to all peers which are not known to
   736  // already have the given transaction.
   737  func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
   738  	// Broadcast transaction to a batch of peers not knowing about it
   739  	peers := pm.peers.PeersWithoutTx(hash)
   740  	//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   741  	for _, peer := range peers {
   742  		peer.SendTransactions(types.Transactions{tx})
   743  	}
   744  	log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
   745  }
   746  
   747  // Mined broadcast loop
   748  func (self *ProtocolManager) minedBroadcastLoop() {
   749  	// automatically stops if unsubscribe
   750  	for obj := range self.minedBlockSub.Chan() {
   751  		switch ev := obj.Data.(type) {
   752  		case core.NewMinedBlockEvent:
   753  			self.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   754  			self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   755  		}
   756  	}
   757  }
   758  
   759  func (self *ProtocolManager) txBroadcastLoop() {
   760  	for {
   761  		select {
   762  		case event := <-self.txCh:
   763  			// add by liangc
   764  			if event.Tx.To() == nil || !(params.IsChiefAddress(*event.Tx.To()) && params.IsChiefUpdate(event.Tx.Data())) {
   765  				self.BroadcastTx(event.Tx.Hash(), event.Tx)
   766  			}
   767  			// Err() channel will be closed when unsubscribing.
   768  		case <-self.txSub.Err():
   769  			return
   770  		}
   771  	}
   772  }
   773  
   774  // NodeInfo represents a short summary of the Ethereum sub-protocol metadata
   775  // known about the host peer.
   776  type NodeInfo struct {
   777  	Network    uint64              `json:"network"`    // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   778  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   779  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   780  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   781  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   782  }
   783  
   784  // NodeInfo retrieves some protocol metadata about the running host node.
   785  func (self *ProtocolManager) NodeInfo() *NodeInfo {
   786  	currentBlock := self.blockchain.CurrentBlock()
   787  	return &NodeInfo{
   788  		Network:    self.networkId,
   789  		Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   790  		Genesis:    self.blockchain.Genesis().Hash(),
   791  		Config:     self.blockchain.Config(),
   792  		Head:       currentBlock.Hash(),
   793  	}
   794  }