github.com/n1ghtfa1l/go-vnt@v0.6.4-alpha.6/vnt/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package vnt
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/vntchain/go-vnt/common"
    30  	"github.com/vntchain/go-vnt/consensus"
    31  	"github.com/vntchain/go-vnt/core"
    32  	"github.com/vntchain/go-vnt/core/types"
    33  	"github.com/vntchain/go-vnt/event"
    34  	"github.com/vntchain/go-vnt/log"
    35  	"github.com/vntchain/go-vnt/node"
    36  	"github.com/vntchain/go-vnt/params"
    37  	"github.com/vntchain/go-vnt/rlp"
    38  	"github.com/vntchain/go-vnt/vnt/downloader"
    39  	"github.com/vntchain/go-vnt/vnt/fetcher"
    40  	"github.com/vntchain/go-vnt/vntdb"
    41  	"github.com/vntchain/go-vnt/vntp2p"
    42  
    43  	libp2p "github.com/libp2p/go-libp2p-peer"
    44  )
    45  
    46  const (
    47  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    48  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    49  
    50  	// txChanSize is the size of channel listening to NewTxsEvent.
    51  	// The number is referenced from the size of tx pool.
    52  	txChanSize = 4096
    53  )
    54  
    55  // errIncompatibleConfig is returned if the requested protocols and configs are
    56  // not compatible (low protocol version restrictions and high requirements).
    57  var errIncompatibleConfig = errors.New("incompatible configuration")
    58  
    59  func errResp(code errCode, format string, v ...interface{}) error {
    60  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    61  }
    62  
    63  type ProtocolManager struct {
    64  	networkId uint64
    65  
    66  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    67  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    68  
    69  	txpool      txPool
    70  	blockchain  *core.BlockChain
    71  	chainconfig *params.ChainConfig
    72  	maxPeers    int
    73  
    74  	downloader *downloader.Downloader
    75  	fetcher    *fetcher.Fetcher
    76  	peers      *peerSet
    77  	node       *node.Node
    78  
    79  	SubProtocols []vntp2p.Protocol
    80  
    81  	eventMux         *event.TypeMux
    82  	txsCh            chan core.NewTxsEvent
    83  	txsSub           event.Subscription
    84  	producedBlockSub *event.TypeMuxSubscription
    85  	bftMsgSub        *event.TypeMuxSubscription
    86  	bftPeerSub       *event.TypeMuxSubscription
    87  
    88  	// channels for fetcher, syncer, txsyncLoop
    89  	newPeerCh   chan *peer
    90  	txsyncCh    chan *txsync
    91  	quitSync    chan struct{}
    92  	noMorePeers chan struct{}
    93  
    94  	// wait group is used for graceful shutdowns during downloading
    95  	// and processing
    96  	wg sync.WaitGroup
    97  
    98  	urlsCh chan []string // 传递p2p urls of witnesses
    99  }
   100  
   101  // NewProtocolManager returns a new VNT sub protocol manager. The VNT sub protocol manages peers capable
   102  // with the VNT network.
   103  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb vntdb.Database, node *node.Node) (*ProtocolManager, error) {
   104  	// Create the protocol manager with the base fields
   105  	manager := &ProtocolManager{
   106  		networkId:   networkId,
   107  		eventMux:    mux,
   108  		txpool:      txpool,
   109  		blockchain:  blockchain,
   110  		chainconfig: config,
   111  		peers:       newPeerSet(),
   112  		newPeerCh:   make(chan *peer),
   113  		noMorePeers: make(chan struct{}),
   114  		txsyncCh:    make(chan *txsync),
   115  		quitSync:    make(chan struct{}),
   116  		node:        node,
   117  
   118  		urlsCh: make(chan []string),
   119  	}
   120  	// Figure out whether to allow fast sync or not
   121  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   122  		log.Warn("Blockchain not empty, fast sync disabled")
   123  		mode = downloader.FullSync
   124  	}
   125  	if mode == downloader.FastSync {
   126  		manager.fastSync = uint32(1)
   127  	}
   128  	// Initiate a sub-protocol for every implemented version we can handle
   129  	manager.SubProtocols = make([]vntp2p.Protocol, 0, len(ProtocolVersions))
   130  	for i, version := range ProtocolVersions {
   131  		// Skip protocol version if incompatible with the mode of operation
   132  		if mode == downloader.FastSync && version < vnt63 {
   133  			continue
   134  		}
   135  		// Compatible; initialise the sub-protocol
   136  		version := version // Closure for the run
   137  		manager.SubProtocols = append(manager.SubProtocols, vntp2p.Protocol{
   138  			Name:    ProtocolName,
   139  			Version: version,
   140  			Length:  ProtocolLengths[i],
   141  			Run: func(p *vntp2p.Peer, rw vntp2p.MsgReadWriter) error {
   142  				peer := manager.newPeer(int(version), p, rw)
   143  				select {
   144  				case manager.newPeerCh <- peer:
   145  					manager.wg.Add(1)
   146  					defer manager.wg.Done()
   147  					return manager.handle(peer)
   148  				case <-manager.quitSync:
   149  					return vntp2p.DiscQuitting
   150  				}
   151  			},
   152  			NodeInfo: func() interface{} {
   153  				return manager.NodeInfo()
   154  			},
   155  			PeerInfo: func(id libp2p.ID) interface{} {
   156  				if p := manager.peers.Peer(id); p != nil {
   157  					return p.Info()
   158  				}
   159  				return nil
   160  			},
   161  		})
   162  	}
   163  	if len(manager.SubProtocols) == 0 {
   164  		return nil, errIncompatibleConfig
   165  	}
   166  	// Construct the different synchronisation mechanisms
   167  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   168  
   169  	validator := func(header *types.Header) error {
   170  		return engine.VerifyHeader(blockchain, header, true)
   171  	}
   172  	heighter := func() uint64 {
   173  		return blockchain.CurrentBlock().NumberU64()
   174  	}
   175  	inserter := func(blocks types.Blocks) (int, error) {
   176  		// If fast sync is running, deny importing weird blocks
   177  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   178  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   179  			return 0, nil
   180  		}
   181  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   182  		return manager.blockchain.InsertChain(blocks)
   183  	}
   184  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   185  
   186  	return manager, nil
   187  }
   188  
   189  func (pm *ProtocolManager) removePeer(id libp2p.ID) {
   190  	// Short circuit if the peer was already removed
   191  	peer := pm.peers.Peer(id)
   192  	if peer == nil {
   193  		return
   194  	}
   195  	log.Debug("Removing VNT peer", "peer", id)
   196  
   197  	// Unregister the peer from the downloader and VNT peer set
   198  	pm.downloader.UnregisterPeer(id)
   199  	if err := pm.peers.Unregister(id); err != nil {
   200  		log.Error("Peer removal failed", "peer", id, "err", err)
   201  	}
   202  	// Hard disconnect at the networking layer
   203  	if peer != nil {
   204  		peer.Peer.Disconnect(vntp2p.DiscUselessPeer)
   205  	}
   206  }
   207  
   208  // resetBftPeer update current bft peer connection. If node not has connection
   209  // will them, will connecting to them. url format is:
   210  // /ip4/192.168.102.2/tcp/5216/ipfs/1kHBzN17vVE75rwZA7vKAFfxUYS8XMh6QBYS6JWF13xHGX9
   211  func (pm *ProtocolManager) resetBftPeer(urls []string) {
   212  	pm.peers.lock.Lock()
   213  	defer pm.peers.lock.Unlock()
   214  
   215  	// Clean old records
   216  	pm.peers.bftPeers = make(map[libp2p.ID]struct{})
   217  
   218  	// Add new records, and addPeer if not connect
   219  	selfID := pm.node.Server().NodeInfo().ID
   220  	for _, url := range urls {
   221  		node, err := vntp2p.ParseNode(url)
   222  		if err != nil {
   223  			log.Error("resetBftPeer invalid vnode:", "error", err)
   224  			continue
   225  		}
   226  		if node.Id.ToString() == selfID {
   227  			continue
   228  		}
   229  
   230  		pm.peers.bftPeers[node.Id] = struct{}{}
   231  		if _, exists := pm.peers.peers[node.Id]; !exists {
   232  			log.Debug("Reset bft peer, connecting to", "peer", url)
   233  			go pm.node.Server().AddPeer(context.Background(), node)
   234  		}
   235  	}
   236  }
   237  
   238  func (pm *ProtocolManager) Start(maxPeers int) {
   239  	pm.maxPeers = maxPeers
   240  
   241  	// broadcast transactions
   242  	pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
   243  	pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
   244  	go pm.txBroadcastLoop()
   245  
   246  	// broadcast produced blocks
   247  	pm.producedBlockSub = pm.eventMux.Subscribe(core.NewProducedBlockEvent{})
   248  	pm.bftMsgSub = pm.eventMux.Subscribe(core.SendBftMsgEvent{})
   249  	pm.bftPeerSub = pm.eventMux.Subscribe(core.BftPeerChangeEvent{})
   250  	go pm.producedBroadcastLoop()
   251  	go pm.bftBroadcastLoop()
   252  
   253  	go pm.resetBftPeerLoop()
   254  
   255  	// start sync handlers
   256  	go pm.syncer()
   257  	go pm.txsyncLoop()
   258  	go pm.bftPeerLoop()
   259  }
   260  
   261  func (pm *ProtocolManager) Stop() {
   262  	log.Info("Stopping VNT protocol")
   263  
   264  	pm.txsSub.Unsubscribe()           // quits txBroadcastLoop
   265  	pm.producedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   266  	pm.bftMsgSub.Unsubscribe()
   267  	pm.bftPeerSub.Unsubscribe()
   268  
   269  	close(pm.urlsCh)
   270  
   271  	// Quit the sync loop.
   272  	// After this send has completed, no new peers will be accepted.
   273  	pm.noMorePeers <- struct{}{}
   274  
   275  	// Quit fetcher, txsyncLoop.
   276  	close(pm.quitSync)
   277  
   278  	// Disconnect existing sessions.
   279  	// This also closes the gate for any new registrations on the peer set.
   280  	// sessions which are already established but not added to pm.peers yet
   281  	// will exit when they try to register.
   282  	pm.peers.Close()
   283  
   284  	// Wait for all peer handler goroutines and the loops to come down.
   285  	pm.wg.Wait()
   286  
   287  	log.Info("VNT protocol stopped")
   288  }
   289  
   290  func (pm *ProtocolManager) newPeer(pv int, p *vntp2p.Peer, rw vntp2p.MsgReadWriter) *peer {
   291  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   292  }
   293  
   294  // handle is the callback invoked to manage the life cycle of an vnt peer. When
   295  // this function terminates, the peer is disconnected.
   296  func (pm *ProtocolManager) handle(p *peer) error {
   297  	// Ignore maxPeers if this is a trusted peer
   298  
   299  	// if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   300  	// 	return vntp2p.DiscTooManyPeers
   301  	// }
   302  
   303  	// p.Log().Debug("VNT peer connected", "name", p.Name())
   304  
   305  	// Execute the VNT handshake
   306  	var (
   307  		genesis = pm.blockchain.Genesis()
   308  		head    = pm.blockchain.CurrentHeader()
   309  		hash    = head.Hash()
   310  		number  = head.Number.Uint64()
   311  		td      = pm.blockchain.GetTd(hash, number)
   312  	)
   313  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   314  		p.Log().Debug("VNT handshake failed", "err", err)
   315  		return err
   316  	}
   317  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   318  		rw.Init(p.version)
   319  	}
   320  	// Register the peer locally
   321  	if err := pm.peers.Register(p); err != nil {
   322  		p.Log().Error("VNT peer registration failed", "err", err)
   323  		return err
   324  	}
   325  	defer pm.removePeer(p.id)
   326  
   327  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   328  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   329  		return err
   330  	}
   331  	// Propagate existing transactions. new transactions appearing
   332  	// after this will be sent via broadcasts.
   333  	pm.syncTransactions(p)
   334  
   335  	// main loop. handle incoming messages.
   336  	for {
   337  		if err := pm.handleMsg(p); err != nil {
   338  			p.Log().Debug("VNT message handling failed", "err", err)
   339  			return err
   340  		}
   341  	}
   342  }
   343  
   344  // handleMsg is invoked whenever an inbound message is received from a remote
   345  // peer. The remote connection is torn down upon returning any error.
   346  func (pm *ProtocolManager) handleMsg(p *peer) error {
   347  	// Read the next message from the remote peer, and ensure it's fully consumed
   348  	msg, err := p.rw.ReadMsg()
   349  	if err != nil {
   350  		return err
   351  	}
   352  	size := msg.GetBodySize()
   353  	if size > ProtocolMaxMsgSize {
   354  		return errResp(ErrMsgTooLarge, "%v > %v", size, ProtocolMaxMsgSize)
   355  	}
   356  
   357  	//按理说,新版的协议处理方式,不会有残留数据得不到处理
   358  	//defer msg.Discard()
   359  
   360  	// Handle the message depending on its contents
   361  	switch {
   362  	case msg.Body.Type == StatusMsg:
   363  		// Status messages should never arrive after the handshake
   364  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   365  
   366  	// Block header query, collect the requested headers and reply
   367  	case msg.Body.Type == GetBlockHeadersMsg:
   368  		// Decode the complex header query
   369  		var query getBlockHeadersData
   370  		if err := msg.Decode(&query); err != nil {
   371  			return errResp(ErrDecode, "%v: %v", msg, err)
   372  		}
   373  		hashMode := query.Origin.Hash != (common.Hash{})
   374  		first := true
   375  		maxNonCanonical := uint64(100)
   376  
   377  		// Gather headers until the fetch or network limits is reached
   378  		var (
   379  			bytes   common.StorageSize
   380  			headers []*types.Header
   381  			unknown bool
   382  		)
   383  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   384  			// Retrieve the next header satisfying the query
   385  			var origin *types.Header
   386  			if hashMode {
   387  				if first {
   388  					first = false
   389  					origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   390  					if origin != nil {
   391  						query.Origin.Number = origin.Number.Uint64()
   392  					}
   393  				} else {
   394  					origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   395  				}
   396  			} else {
   397  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   398  			}
   399  			if origin == nil {
   400  				break
   401  			}
   402  			headers = append(headers, origin)
   403  			bytes += estHeaderRlpSize
   404  
   405  			// Advance to the next header of the query
   406  			switch {
   407  			case hashMode && query.Reverse:
   408  				// Hash based traversal towards the genesis block
   409  				ancestor := query.Skip + 1
   410  				if ancestor == 0 {
   411  					unknown = true
   412  				} else {
   413  					query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   414  					unknown = (query.Origin.Hash == common.Hash{})
   415  				}
   416  			case hashMode && !query.Reverse:
   417  				// Hash based traversal towards the leaf block
   418  				var (
   419  					current = origin.Number.Uint64()
   420  					next    = current + query.Skip + 1
   421  				)
   422  				if next <= current {
   423  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   424  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   425  					unknown = true
   426  				} else {
   427  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   428  						nextHash := header.Hash()
   429  						expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   430  						if expOldHash == query.Origin.Hash {
   431  							query.Origin.Hash, query.Origin.Number = nextHash, next
   432  						} else {
   433  							unknown = true
   434  						}
   435  					} else {
   436  						unknown = true
   437  					}
   438  				}
   439  			case query.Reverse:
   440  				// Number based traversal towards the genesis block
   441  				if query.Origin.Number >= query.Skip+1 {
   442  					query.Origin.Number -= query.Skip + 1
   443  				} else {
   444  					unknown = true
   445  				}
   446  
   447  			case !query.Reverse:
   448  				// Number based traversal towards the leaf block
   449  				query.Origin.Number += query.Skip + 1
   450  			}
   451  		}
   452  		return p.SendBlockHeaders(headers)
   453  
   454  	case msg.Body.Type == BlockHeadersMsg:
   455  		// A batch of headers arrived to one of our previous requests
   456  		var headers []*types.Header
   457  		if err := msg.Decode(&headers); err != nil {
   458  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   459  		}
   460  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   461  		filter := len(headers) == 1
   462  		if filter {
   463  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   464  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   465  		}
   466  		if len(headers) > 0 || !filter {
   467  			err := pm.downloader.DeliverHeaders(p.id, headers)
   468  			if err != nil {
   469  				log.Debug("Failed to deliver headers", "err", err)
   470  			}
   471  		}
   472  
   473  	case msg.Body.Type == GetBlockBodiesMsg:
   474  		// Decode the retrieval message
   475  		msgStream := rlp.NewStream(msg.Body.Payload, uint64(msg.Body.PayloadSize))
   476  		if _, err := msgStream.List(); err != nil {
   477  			return err
   478  		}
   479  		// Gather blocks until the fetch or network limits is reached
   480  		var (
   481  			hash   common.Hash
   482  			bytes  int
   483  			bodies []rlp.RawValue
   484  		)
   485  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   486  			// Retrieve the hash of the next block
   487  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   488  				break
   489  			} else if err != nil {
   490  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   491  			}
   492  			// Retrieve the requested block body, stopping if enough was found
   493  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   494  				bodies = append(bodies, data)
   495  				bytes += len(data)
   496  			}
   497  		}
   498  		return p.SendBlockBodiesRLP(bodies)
   499  
   500  	case msg.Body.Type == BlockBodiesMsg:
   501  		// A batch of block bodies arrived to one of our previous requests
   502  		var request blockBodiesData
   503  		if err := msg.Decode(&request); err != nil {
   504  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   505  		}
   506  		// Deliver them all to the downloader for queuing
   507  		transactions := make([][]*types.Transaction, len(request))
   508  
   509  		for i, body := range request {
   510  			transactions[i] = body.Transactions
   511  		}
   512  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   513  		filter := len(transactions) > 0
   514  		if filter {
   515  			transactions = pm.fetcher.FilterBodies(p.id, transactions, time.Now())
   516  		}
   517  		err := pm.downloader.DeliverBodies(p.id, transactions)
   518  		if err != nil {
   519  			p.Log().Debug("Failed to deliver bodies", "err", err)
   520  		}
   521  
   522  	case p.version >= vnt63 && msg.Body.Type == GetNodeDataMsg:
   523  		// Decode the retrieval message
   524  		msgStream := rlp.NewStream(msg.Body.Payload, uint64(msg.Body.PayloadSize))
   525  		if _, err := msgStream.List(); err != nil {
   526  			return err
   527  		}
   528  		// Gather state data until the fetch or network limits is reached
   529  		var (
   530  			hash  common.Hash
   531  			bytes int
   532  			data  [][]byte
   533  		)
   534  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   535  			// Retrieve the hash of the next state entry
   536  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   537  				break
   538  			} else if err != nil {
   539  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   540  			}
   541  			// Retrieve the requested state entry, stopping if enough was found
   542  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   543  				data = append(data, entry)
   544  				bytes += len(entry)
   545  			}
   546  		}
   547  		return p.SendNodeData(data)
   548  
   549  	case p.version >= vnt63 && msg.Body.Type == NodeDataMsg:
   550  		// A batch of node state data arrived to one of our previous requests
   551  		var data [][]byte
   552  		if err := msg.Decode(&data); err != nil {
   553  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   554  		}
   555  		// Deliver all to the downloader
   556  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   557  			log.Debug("Failed to deliver node state data", "err", err)
   558  		}
   559  
   560  	case p.version >= vnt63 && msg.Body.Type == GetReceiptsMsg:
   561  		// Decode the retrieval message
   562  		msgStream := rlp.NewStream(msg.Body.Payload, uint64(msg.Body.PayloadSize))
   563  		if _, err := msgStream.List(); err != nil {
   564  			return err
   565  		}
   566  		// Gather state data until the fetch or network limits is reached
   567  		var (
   568  			hash     common.Hash
   569  			bytes    int
   570  			receipts []rlp.RawValue
   571  		)
   572  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   573  			// Retrieve the hash of the next block
   574  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   575  				break
   576  			} else if err != nil {
   577  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   578  			}
   579  			// Retrieve the requested block's receipts, skipping if unknown to us
   580  			results := pm.blockchain.GetReceiptsByHash(hash)
   581  			if results == nil {
   582  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   583  					continue
   584  				}
   585  			}
   586  			// If known, encode and queue for response packet
   587  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   588  				log.Error("Failed to encode receipt", "err", err)
   589  			} else {
   590  				receipts = append(receipts, encoded)
   591  				bytes += len(encoded)
   592  			}
   593  		}
   594  		return p.SendReceiptsRLP(receipts)
   595  
   596  	case p.version >= vnt63 && msg.Body.Type == ReceiptsMsg:
   597  		// A batch of receipts arrived to one of our previous requests
   598  		var receipts [][]*types.Receipt
   599  		if err := msg.Decode(&receipts); err != nil {
   600  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   601  		}
   602  		// Deliver all to the downloader
   603  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   604  			log.Debug("Failed to deliver receipts", "err", err)
   605  		}
   606  
   607  	case msg.Body.Type == NewBlockHashesMsg:
   608  		var announces newBlockHashesData
   609  		if err := msg.Decode(&announces); err != nil {
   610  			return errResp(ErrDecode, "%v: %v", msg, err)
   611  		}
   612  		// Mark the hashes as present at the remote node
   613  		for _, block := range announces {
   614  			log.Debug("Receive announce", "high", block.Number, "hash", block.Hash, "parent", block.ParentHash, "parent td", block.ParentTD, "from", p.id)
   615  			p.MarkBlock(block.Hash)
   616  		}
   617  		// Schedule all the unknown hashes for retrieval
   618  		unknown := make(newBlockHashesData, 0, len(announces))
   619  		for _, block := range announces {
   620  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   621  				unknown = append(unknown, block)
   622  			}
   623  		}
   624  
   625  		maxTd := big.NewInt(0)
   626  		var maxHash common.Hash
   627  		for _, block := range unknown {
   628  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   629  			if block.ParentTD.Cmp(maxTd) > 0 {
   630  				maxTd = block.ParentTD
   631  				maxHash = block.ParentHash
   632  			}
   633  		}
   634  		// Update with peer when fall behind
   635  		pm.updatePeerHeadAndSync(p, maxHash, maxTd)
   636  
   637  	case msg.Body.Type == NewBlockMsg:
   638  		// This message is forbid. The peer is malicious and will be removed.
   639  		log.Info("Receive NewBlockMsg from", "peer", p.id)
   640  		pm.removePeer(p.id)
   641  
   642  	case msg.Body.Type == TxMsg:
   643  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   644  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   645  			break
   646  		}
   647  		// Transactions can be processed, parse all of them and deliver to the pool
   648  		var txs []*types.Transaction
   649  		if err := msg.Decode(&txs); err != nil {
   650  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   651  		}
   652  		for i, tx := range txs {
   653  			// Validate and mark the remote transaction
   654  			if tx == nil {
   655  				return errResp(ErrDecode, "transaction %d is nil", i)
   656  			}
   657  			p.MarkTransaction(tx.Hash())
   658  		}
   659  		pm.txpool.AddRemotes(txs)
   660  	case msg.Body.Type == BftPreprepareMsg:
   661  		bftMsg := types.PreprepareMsg{}
   662  		if err := msg.Decode(&bftMsg); err != nil {
   663  			log.Error("Decode bftMsg Error", "err", err)
   664  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   665  		}
   666  		pm.postRecBftEvent(&bftMsg)
   667  	case msg.Body.Type == BftPrepareMsg:
   668  		bftMsg := types.PrepareMsg{}
   669  		if err := msg.Decode(&bftMsg); err != nil {
   670  			log.Error("Decode bftMsg Error", "err", err)
   671  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   672  		}
   673  		pm.postRecBftEvent(&bftMsg)
   674  	case msg.Body.Type == BftCommitMsg:
   675  		bftMsg := types.CommitMsg{}
   676  		if err := msg.Decode(&bftMsg); err != nil {
   677  			log.Error("Decode bftMsg Error", "err", err)
   678  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   679  		}
   680  		pm.postRecBftEvent(&bftMsg)
   681  	default:
   682  		return errResp(ErrInvalidMsgCode, "%v", msg.Body.Type)
   683  	}
   684  	return nil
   685  }
   686  
   687  // updatePeerHeadAndSync will update peer's head and start a sync if local fall behind of peer.
   688  func (pm *ProtocolManager) updatePeerHeadAndSync(p *peer, parentHash common.Hash, parentTd *big.Int) {
   689  	// Update the peers total difficulty if better than the previous
   690  	if _, td := p.Head(); parentTd.Cmp(td) > 0 {
   691  		p.SetHead(parentHash, parentTd)
   692  
   693  		// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   694  		// a singe block (as the true TD is below the propagated block), however this
   695  		// scenario should easily be covered by the fetcher.
   696  		currentBlock := pm.blockchain.CurrentBlock()
   697  		currentTd := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   698  		if parentTd.Cmp(currentTd) > 0 {
   699  			log.Debug("updatePeerHeadAndSync: local behind peer", "local td", currentTd.Int64(), "parent td", parentTd.Int64())
   700  			go pm.synchronise(p)
   701  		}
   702  	}
   703  }
   704  
   705  func (pm *ProtocolManager) postRecBftEvent(msg types.ConsensusMsg) {
   706  	log.Debug("Post RecBftEvent", "type", msg.Type(),
   707  		"h", msg.GetBlockNum(), "r", msg.GetRound(), "hash", msg.Hash())
   708  	pm.eventMux.Post(core.RecBftMsgEvent{
   709  		BftMsg: types.BftMsg{
   710  			BftType: msg.Type(),
   711  			Msg:     msg,
   712  		},
   713  	})
   714  }
   715  
   716  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   717  // will only announce it's availability (depending what's requested).
   718  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   719  	hash := block.Hash()
   720  	peers := pm.peers.PeersWithoutBlock(hash)
   721  
   722  	// Otherwise if the block is indeed in out own chain, announce it
   723  	parentTD := pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)
   724  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   725  		for _, peer := range peers {
   726  			log.Debug("Broadcast announce", "high", block.NumberU64(), "hash", block.Hash(), "to peer", peer)
   727  			peer.AsyncSendNewBlockHash(block, parentTD)
   728  		}
   729  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   730  	}
   731  }
   732  
   733  // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
   734  // already have the given transaction.
   735  func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
   736  	var txset = make(map[*peer]types.Transactions)
   737  
   738  	// Broadcast transactions to a batch of peers not knowing about it
   739  	for _, tx := range txs {
   740  		peers := pm.peers.PeersWithoutTx(tx.Hash())
   741  		for _, peer := range peers {
   742  			txset[peer] = append(txset[peer], tx)
   743  		}
   744  		log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
   745  	}
   746  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   747  	for peer, txs := range txset {
   748  		peer.AsyncSendTransactions(txs)
   749  	}
   750  }
   751  
   752  func (pm *ProtocolManager) BroadcastBftMsg(bftMsg types.BftMsg) {
   753  	peers := pm.peers.PeersForBft()
   754  	log.Trace("BroadcastBftMsg", "type", bftMsg.BftType, "hash", bftMsg.Msg.Hash(), "number of bft peer", len(peers))
   755  
   756  	for _, p := range peers {
   757  		// using goroutine for each peer for peer may connection
   758  		go func(p *peer) {
   759  			log.Trace("BroadcastBftMsg", "to peer", p.id.ToString())
   760  			err := p.SendBftMsg(bftMsg)
   761  			if err != nil {
   762  				log.Error("BroadcastBftMsg error", "to peer", p.id.ToString(), "error", err)
   763  			} else {
   764  				log.Trace("BroadcastBftMsg success", "to peer", p.id.ToString())
   765  			}
   766  		}(p)
   767  	}
   768  
   769  	log.Trace("BroadcastBftMsg exit")
   770  }
   771  
   772  // producedBroadcastLoop
   773  func (pm *ProtocolManager) producedBroadcastLoop() {
   774  	// automatically stops if unsubscribe
   775  	for obj := range pm.producedBlockSub.Chan() {
   776  		switch ev := obj.Data.(type) {
   777  		case core.NewProducedBlockEvent:
   778  			log.Debug("PM receive NewProducedBlockEvent")
   779  			pm.BroadcastBlock(ev.Block, false) // Only announce all the peer
   780  		}
   781  	}
   782  }
   783  
   784  func (pm *ProtocolManager) txBroadcastLoop() {
   785  	for {
   786  		select {
   787  		case event := <-pm.txsCh:
   788  			pm.BroadcastTxs(event.Txs)
   789  
   790  		// Err() channel will be closed when unsubscribing.
   791  		case <-pm.txsSub.Err():
   792  			return
   793  		}
   794  	}
   795  }
   796  
   797  func (pm *ProtocolManager) bftBroadcastLoop() {
   798  	for obj := range pm.bftMsgSub.Chan() {
   799  		switch ev := obj.Data.(type) {
   800  		case core.SendBftMsgEvent:
   801  			pm.BroadcastBftMsg(ev.BftMsg) // First propagate block to peers
   802  		}
   803  	}
   804  }
   805  
   806  func (pm *ProtocolManager) bftPeerLoop() {
   807  	for obj := range pm.bftPeerSub.Chan() {
   808  		switch ev := obj.Data.(type) {
   809  		case core.BftPeerChangeEvent:
   810  			log.Trace("Receive BftPeerChangeEvent")
   811  			pm.urlsCh <- ev.Urls
   812  			// pm.resetBftPeer(ev.Urls) // First propagate block to peers
   813  		}
   814  	}
   815  }
   816  
   817  // NodeInfo represents a short summary of the VNT sub-protocol metadata
   818  // known about the host peer.
   819  type NodeInfo struct {
   820  	Network    uint64              `json:"network"`    // VNT network ID (1=Frontier)
   821  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   822  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   823  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   824  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   825  }
   826  
   827  // NodeInfo retrieves some protocol metadata about the running host node.
   828  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
   829  	currentBlock := pm.blockchain.CurrentBlock()
   830  	return &NodeInfo{
   831  		Network:    pm.networkId,
   832  		Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   833  		Genesis:    pm.blockchain.Genesis().Hash(),
   834  		Config:     pm.blockchain.Config(),
   835  		Head:       currentBlock.Hash(),
   836  	}
   837  }
   838  
   839  func (pm *ProtocolManager) resetBftPeerLoop() {
   840  	log.Debug("resetBftPeerLoop start")
   841  	defer log.Debug("resetBftPeerLoop exit")
   842  
   843  	for urls := range pm.urlsCh {
   844  		pm.resetBftPeer(urls)
   845  	}
   846  }