github.com/coltonfike/e2c@v21.1.0+incompatible/eth/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus"
    31  	"github.com/ethereum/go-ethereum/consensus/clique"
    32  	"github.com/ethereum/go-ethereum/consensus/ethash"
    33  	"github.com/ethereum/go-ethereum/core"
    34  	"github.com/ethereum/go-ethereum/core/forkid"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/eth/downloader"
    38  	"github.com/ethereum/go-ethereum/eth/fetcher"
    39  	"github.com/ethereum/go-ethereum/ethdb"
    40  	"github.com/ethereum/go-ethereum/event"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/p2p"
    43  	"github.com/ethereum/go-ethereum/p2p/enode"
    44  	"github.com/ethereum/go-ethereum/params"
    45  	"github.com/ethereum/go-ethereum/rlp"
    46  	"github.com/ethereum/go-ethereum/trie"
    47  )
    48  
    49  const (
    50  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    51  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    52  
    53  	// txChanSize is the size of channel listening to NewTxsEvent.
    54  	// The number is referenced from the size of tx pool.
    55  	txChanSize = 4096
    56  
    57  	// minimim number of peers to broadcast new blocks to
    58  	minBroadcastPeers = 4
    59  )
    60  
    61  var (
    62  	syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
    63  )
    64  
    65  func errResp(code errCode, format string, v ...interface{}) error {
    66  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    67  }
    68  
    69  type ProtocolManager struct {
    70  	networkID  uint64
    71  	forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
    72  
    73  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    74  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    75  
    76  	checkpointNumber uint64      // Block number for the sync progress validator to cross reference
    77  	checkpointHash   common.Hash // Block hash for the sync progress validator to cross reference
    78  
    79  	txpool     txPool
    80  	blockchain *core.BlockChain
    81  	maxPeers   int
    82  
    83  	downloader *downloader.Downloader
    84  	fetcher    *fetcher.Fetcher
    85  	peers      *peerSet
    86  
    87  	eventMux      *event.TypeMux
    88  	txsCh         chan core.NewTxsEvent
    89  	txsSub        event.Subscription
    90  	minedBlockSub *event.TypeMuxSubscription
    91  
    92  	whitelist map[uint64]common.Hash
    93  
    94  	// channels for fetcher, syncer, txsyncLoop
    95  	newPeerCh   chan *peer
    96  	txsyncCh    chan *txsync
    97  	quitSync    chan struct{}
    98  	noMorePeers chan struct{}
    99  
   100  	// wait group is used for graceful shutdowns during downloading
   101  	// and processing
   102  	wg sync.WaitGroup
   103  
   104  	// Quorum
   105  	raftMode bool
   106  	engine   consensus.Engine
   107  }
   108  
   109  // NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
   110  // with the Ethereum network.
   111  func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCheckpoint, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, cacheLimit int, whitelist map[uint64]common.Hash, raftMode bool) (*ProtocolManager, error) {
   112  	// Create the protocol manager with the base fields
   113  	manager := &ProtocolManager{
   114  		networkID:   networkID,
   115  		forkFilter:  forkid.NewFilter(blockchain),
   116  		eventMux:    mux,
   117  		txpool:      txpool,
   118  		blockchain:  blockchain,
   119  		peers:       newPeerSet(),
   120  		whitelist:   whitelist,
   121  		newPeerCh:   make(chan *peer),
   122  		noMorePeers: make(chan struct{}),
   123  		txsyncCh:    make(chan *txsync),
   124  		quitSync:    make(chan struct{}),
   125  		raftMode:    raftMode,
   126  		engine:      engine,
   127  	}
   128  
   129  	// Quorum
   130  	if handler, ok := manager.engine.(consensus.Handler); ok {
   131  		handler.SetBroadcaster(manager)
   132  	}
   133  	// /Quorum
   134  
   135  	if mode == downloader.FullSync {
   136  		// The database seems empty as the current block is the genesis. Yet the fast
   137  		// block is ahead, so fast sync was enabled for this node at a certain point.
   138  		// The scenarios where this can happen is
   139  		// * if the user manually (or via a bad block) rolled back a fast sync node
   140  		//   below the sync point.
   141  		// * the last fast sync is not finished while user specifies a full sync this
   142  		//   time. But we don't have any recent state for full sync.
   143  		// In these cases however it's safe to reenable fast sync.
   144  		fullBlock, fastBlock := blockchain.CurrentBlock(), blockchain.CurrentFastBlock()
   145  		if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
   146  			manager.fastSync = uint32(1)
   147  			log.Warn("Switch sync mode from full sync to fast sync")
   148  		}
   149  	} else {
   150  		if blockchain.CurrentBlock().NumberU64() > 0 {
   151  			// Print warning log if database is not empty to run fast sync.
   152  			log.Warn("Switch sync mode from fast sync to full sync")
   153  		} else {
   154  			// If fast sync was requested and our database is empty, grant it
   155  			manager.fastSync = uint32(1)
   156  		}
   157  	}
   158  	// If we have trusted checkpoints, enforce them on the chain
   159  	if checkpoint != nil {
   160  		manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1
   161  		manager.checkpointHash = checkpoint.SectionHead
   162  	}
   163  
   164  	// Construct the downloader (long sync) and its backing state bloom if fast
   165  	// sync is requested. The downloader is responsible for deallocating the state
   166  	// bloom when it's done.
   167  	var stateBloom *trie.SyncBloom
   168  	if atomic.LoadUint32(&manager.fastSync) == 1 {
   169  		stateBloom = trie.NewSyncBloom(uint64(cacheLimit), chaindb)
   170  	}
   171  	manager.downloader = downloader.New(manager.checkpointNumber, chaindb, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer)
   172  
   173  	// Construct the fetcher (short sync)
   174  	validator := func(header *types.Header) error {
   175  		return engine.VerifyHeader(blockchain, header, true)
   176  	}
   177  	heighter := func() uint64 {
   178  		return blockchain.CurrentBlock().NumberU64()
   179  	}
   180  	inserter := func(blocks types.Blocks) (int, error) {
   181  		// If sync hasn't reached the checkpoint yet, deny importing weird blocks.
   182  		//
   183  		// Ideally we would also compare the head block's timestamp and similarly reject
   184  		// the propagated block if the head is too old. Unfortunately there is a corner
   185  		// case when starting new networks, where the genesis might be ancient (0 unix)
   186  		// which would prevent full nodes from accepting it.
   187  		if manager.blockchain.CurrentBlock().NumberU64() < manager.checkpointNumber {
   188  			log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   189  			return 0, nil
   190  		}
   191  		// If fast sync is running, deny importing weird blocks. This is a problematic
   192  		// clause when starting up a new network, because fast-syncing miners might not
   193  		// accept each others' blocks until a restart. Unfortunately we haven't figured
   194  		// out a way yet where nodes can decide unilaterally whether the network is new
   195  		// or not. This should be fixed if we figure out a solution.
   196  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   197  			log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   198  			return 0, nil
   199  		}
   200  		n, err := manager.blockchain.InsertChain(blocks)
   201  		if err == nil {
   202  			atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   203  		}
   204  		return n, err
   205  	}
   206  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   207  
   208  	return manager, nil
   209  }
   210  
   211  func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
   212  	// Quorum: Set p2p.Protocol info from engine.Protocol()
   213  	length, ok := pm.engine.Protocol().Lengths[version]
   214  	if !ok {
   215  		panic("makeProtocol for unknown version")
   216  	}
   217  
   218  	return p2p.Protocol{
   219  		Name:    pm.engine.Protocol().Name,
   220  		Version: version,
   221  		Length:  length,
   222  		Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   223  			peer := pm.newPeer(int(version), p, rw)
   224  			select {
   225  			case pm.newPeerCh <- peer:
   226  				pm.wg.Add(1)
   227  				defer pm.wg.Done()
   228  				return pm.handle(peer)
   229  			case <-pm.quitSync:
   230  				return p2p.DiscQuitting
   231  			}
   232  		},
   233  		NodeInfo: func() interface{} {
   234  			return pm.NodeInfo()
   235  		},
   236  		PeerInfo: func(id enode.ID) interface{} {
   237  			if p := pm.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   238  				return p.Info()
   239  			}
   240  			return nil
   241  		},
   242  	}
   243  }
   244  
   245  func (pm *ProtocolManager) removePeer(id string) {
   246  	// Short circuit if the peer was already removed
   247  	peer := pm.peers.Peer(id)
   248  	if peer == nil {
   249  		return
   250  	}
   251  	log.Debug("Removing Ethereum peer", "peer", id)
   252  
   253  	// Unregister the peer from the downloader and Ethereum peer set
   254  	pm.downloader.UnregisterPeer(id)
   255  	if err := pm.peers.Unregister(id); err != nil {
   256  		log.Error("Peer removal failed", "peer", id, "err", err)
   257  	}
   258  	// Hard disconnect at the networking layer
   259  	if peer != nil {
   260  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   261  	}
   262  }
   263  
   264  func (pm *ProtocolManager) Start(maxPeers int) {
   265  	pm.maxPeers = maxPeers
   266  
   267  	// broadcast transactions
   268  	pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
   269  	pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
   270  	go pm.txBroadcastLoop()
   271  
   272  	// Quorum
   273  	if !pm.raftMode {
   274  		// broadcast mined blocks
   275  		pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   276  		go pm.minedBroadcastLoop()
   277  	} else {
   278  		// We set this immediately in raft mode to make sure the miner never drops
   279  		// incoming txes. Raft mode doesn't use the fetcher or downloader, and so
   280  		// this would never be set otherwise.
   281  		atomic.StoreUint32(&pm.acceptTxs, 1)
   282  	}
   283  	// /Quorum
   284  
   285  	// start sync handlers
   286  	go pm.syncer()
   287  	go pm.txsyncLoop()
   288  }
   289  
   290  func (pm *ProtocolManager) Stop() {
   291  	log.Info("Stopping Ethereum protocol")
   292  
   293  	pm.txsSub.Unsubscribe() // quits txBroadcastLoop
   294  	if !pm.raftMode {
   295  		pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   296  	}
   297  
   298  	// Quit the sync loop.
   299  	// After this send has completed, no new peers will be accepted.
   300  	pm.noMorePeers <- struct{}{}
   301  
   302  	// Quit fetcher, txsyncLoop.
   303  	close(pm.quitSync)
   304  
   305  	// Disconnect existing sessions.
   306  	// This also closes the gate for any new registrations on the peer set.
   307  	// sessions which are already established but not added to pm.peers yet
   308  	// will exit when they try to register.
   309  	pm.peers.Close()
   310  
   311  	// Wait for all peer handler goroutines and the loops to come down.
   312  	pm.wg.Wait()
   313  
   314  	log.Info("Ethereum protocol stopped")
   315  }
   316  
   317  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   318  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   319  }
   320  
   321  // handle is the callback invoked to manage the life cycle of an eth peer. When
   322  // this function terminates, the peer is disconnected.
   323  func (pm *ProtocolManager) handle(p *peer) error {
   324  	// Ignore maxPeers if this is a trusted peer
   325  	if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   326  		return p2p.DiscTooManyPeers
   327  	}
   328  	p.Log().Debug("Ethereum peer connected", "name", p.Name())
   329  
   330  	// Execute the Ethereum handshake
   331  	var (
   332  		genesis = pm.blockchain.Genesis()
   333  		head    = pm.blockchain.CurrentHeader()
   334  		hash    = head.Hash()
   335  		number  = head.Number.Uint64()
   336  		td      = pm.blockchain.GetTd(hash, number)
   337  	)
   338  	if err := p.Handshake(pm.networkID, td, hash, genesis.Hash(), forkid.NewID(pm.blockchain), pm.forkFilter, pm.engine.Protocol().Name); err != nil {
   339  		p.Log().Debug("Ethereum handshake failed", "err", err)
   340  		return err
   341  	}
   342  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   343  		rw.Init(p.version)
   344  	}
   345  	// Register the peer locally
   346  	if err := pm.peers.Register(p); err != nil {
   347  		p.Log().Error("Ethereum peer registration failed", "err", err)
   348  		return err
   349  	}
   350  	defer pm.removePeer(p.id)
   351  
   352  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   353  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   354  		return err
   355  	}
   356  	// Propagate existing transactions. new transactions appearing
   357  	// after this will be sent via broadcasts.
   358  	pm.syncTransactions(p)
   359  
   360  	// If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)
   361  	if pm.checkpointHash != (common.Hash{}) {
   362  		// Request the peer's checkpoint header for chain height/weight validation
   363  		if err := p.RequestHeadersByNumber(pm.checkpointNumber, 1, 0, false); err != nil {
   364  			return err
   365  		}
   366  		// Start a timer to disconnect if the peer doesn't reply in time
   367  		p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() {
   368  			p.Log().Warn("Checkpoint challenge timed out, dropping", "addr", p.RemoteAddr(), "type", p.Name())
   369  			pm.removePeer(p.id)
   370  		})
   371  		// Make sure it's cleaned up if the peer dies off
   372  		defer func() {
   373  			if p.syncDrop != nil {
   374  				p.syncDrop.Stop()
   375  				p.syncDrop = nil
   376  			}
   377  		}()
   378  	}
   379  	// If we have any explicit whitelist block hashes, request them
   380  	for number := range pm.whitelist {
   381  		if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil {
   382  			return err
   383  		}
   384  	}
   385  	// Handle incoming messages until the connection is torn down
   386  	for {
   387  		if err := pm.handleMsg(p); err != nil {
   388  			p.Log().Debug("Ethereum message handling failed", "err", err)
   389  			return err
   390  		}
   391  	}
   392  }
   393  
   394  // handleMsg is invoked whenever an inbound message is received from a remote
   395  // peer. The remote connection is torn down upon returning any error.
   396  func (pm *ProtocolManager) handleMsg(p *peer) error {
   397  	// Read the next message from the remote peer, and ensure it's fully consumed
   398  	msg, err := p.rw.ReadMsg()
   399  	if err != nil {
   400  		return err
   401  	}
   402  	if msg.Size > protocolMaxMsgSize {
   403  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
   404  	}
   405  	defer msg.Discard()
   406  
   407  	// Quorum
   408  	if pm.raftMode {
   409  		if msg.Code != TxMsg &&
   410  			msg.Code != GetBlockHeadersMsg && msg.Code != BlockHeadersMsg &&
   411  			msg.Code != GetBlockBodiesMsg && msg.Code != BlockBodiesMsg {
   412  
   413  			log.Info("raft: ignoring message", "code", msg.Code)
   414  
   415  			return nil
   416  		}
   417  	} else if handler, ok := pm.engine.(consensus.Handler); ok {
   418  		pubKey := p.Node().Pubkey()
   419  		addr := crypto.PubkeyToAddress(*pubKey)
   420  		handled, err := handler.HandleMsg(addr, msg)
   421  		if handled {
   422  			return err
   423  		}
   424  	}
   425  	// /Quorum
   426  
   427  	// Handle the message depending on its contents
   428  	switch {
   429  	case msg.Code == StatusMsg:
   430  		// Status messages should never arrive after the handshake
   431  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   432  
   433  	// Block header query, collect the requested headers and reply
   434  	case msg.Code == GetBlockHeadersMsg:
   435  		// Decode the complex header query
   436  		var query getBlockHeadersData
   437  		if err := msg.Decode(&query); err != nil {
   438  			return errResp(ErrDecode, "%v: %v", msg, err)
   439  		}
   440  		hashMode := query.Origin.Hash != (common.Hash{})
   441  		first := true
   442  		maxNonCanonical := uint64(100)
   443  
   444  		// Gather headers until the fetch or network limits is reached
   445  		var (
   446  			bytes   common.StorageSize
   447  			headers []*types.Header
   448  			unknown bool
   449  		)
   450  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   451  			// Retrieve the next header satisfying the query
   452  			var origin *types.Header
   453  			if hashMode {
   454  				if first {
   455  					first = false
   456  					origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   457  					if origin != nil {
   458  						query.Origin.Number = origin.Number.Uint64()
   459  					}
   460  				} else {
   461  					origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   462  				}
   463  			} else {
   464  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   465  			}
   466  			if origin == nil {
   467  				break
   468  			}
   469  			headers = append(headers, origin)
   470  			bytes += estHeaderRlpSize
   471  
   472  			// Advance to the next header of the query
   473  			switch {
   474  			case hashMode && query.Reverse:
   475  				// Hash based traversal towards the genesis block
   476  				ancestor := query.Skip + 1
   477  				if ancestor == 0 {
   478  					unknown = true
   479  				} else {
   480  					query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   481  					unknown = (query.Origin.Hash == common.Hash{})
   482  				}
   483  			case hashMode && !query.Reverse:
   484  				// Hash based traversal towards the leaf block
   485  				var (
   486  					current = origin.Number.Uint64()
   487  					next    = current + query.Skip + 1
   488  				)
   489  				if next <= current {
   490  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   491  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   492  					unknown = true
   493  				} else {
   494  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   495  						nextHash := header.Hash()
   496  						expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   497  						if expOldHash == query.Origin.Hash {
   498  							query.Origin.Hash, query.Origin.Number = nextHash, next
   499  						} else {
   500  							unknown = true
   501  						}
   502  					} else {
   503  						unknown = true
   504  					}
   505  				}
   506  			case query.Reverse:
   507  				// Number based traversal towards the genesis block
   508  				if query.Origin.Number >= query.Skip+1 {
   509  					query.Origin.Number -= query.Skip + 1
   510  				} else {
   511  					unknown = true
   512  				}
   513  
   514  			case !query.Reverse:
   515  				// Number based traversal towards the leaf block
   516  				query.Origin.Number += query.Skip + 1
   517  			}
   518  		}
   519  		return p.SendBlockHeaders(headers)
   520  
   521  	case msg.Code == BlockHeadersMsg:
   522  		// A batch of headers arrived to one of our previous requests
   523  		var headers []*types.Header
   524  		if err := msg.Decode(&headers); err != nil {
   525  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   526  		}
   527  		// If no headers were received, but we're expencting a checkpoint header, consider it that
   528  		if len(headers) == 0 && p.syncDrop != nil {
   529  			// Stop the timer either way, decide later to drop or not
   530  			p.syncDrop.Stop()
   531  			p.syncDrop = nil
   532  
   533  			// If we're doing a fast sync, we must enforce the checkpoint block to avoid
   534  			// eclipse attacks. Unsynced nodes are welcome to connect after we're done
   535  			// joining the network
   536  			if atomic.LoadUint32(&pm.fastSync) == 1 {
   537  				p.Log().Warn("Dropping unsynced node during fast sync", "addr", p.RemoteAddr(), "type", p.Name())
   538  				return errors.New("unsynced node cannot serve fast sync")
   539  			}
   540  		}
   541  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   542  		filter := len(headers) == 1
   543  		if filter {
   544  			// If it's a potential sync progress check, validate the content and advertised chain weight
   545  			if p.syncDrop != nil && headers[0].Number.Uint64() == pm.checkpointNumber {
   546  				// Disable the sync drop timer
   547  				p.syncDrop.Stop()
   548  				p.syncDrop = nil
   549  
   550  				// Validate the header and either drop the peer or continue
   551  				if headers[0].Hash() != pm.checkpointHash {
   552  					return errors.New("checkpoint hash mismatch")
   553  				}
   554  				return nil
   555  			}
   556  			// Otherwise if it's a whitelisted block, validate against the set
   557  			if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok {
   558  				if hash := headers[0].Hash(); want != hash {
   559  					p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
   560  					return errors.New("whitelist block mismatch")
   561  				}
   562  				p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
   563  			}
   564  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   565  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   566  		}
   567  		if len(headers) > 0 || !filter {
   568  			err := pm.downloader.DeliverHeaders(p.id, headers)
   569  			if err != nil {
   570  				log.Debug("Failed to deliver headers", "err", err)
   571  			}
   572  		}
   573  
   574  	case msg.Code == GetBlockBodiesMsg:
   575  		// Decode the retrieval message
   576  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   577  		if _, err := msgStream.List(); err != nil {
   578  			return err
   579  		}
   580  		// Gather blocks until the fetch or network limits is reached
   581  		var (
   582  			hash   common.Hash
   583  			bytes  int
   584  			bodies []rlp.RawValue
   585  		)
   586  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   587  			// Retrieve the hash of the next block
   588  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   589  				break
   590  			} else if err != nil {
   591  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   592  			}
   593  			// Retrieve the requested block body, stopping if enough was found
   594  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   595  				bodies = append(bodies, data)
   596  				bytes += len(data)
   597  			}
   598  		}
   599  		return p.SendBlockBodiesRLP(bodies)
   600  
   601  	case msg.Code == BlockBodiesMsg:
   602  		// A batch of block bodies arrived to one of our previous requests
   603  		var request blockBodiesData
   604  		if err := msg.Decode(&request); err != nil {
   605  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   606  		}
   607  		// Deliver them all to the downloader for queuing
   608  		transactions := make([][]*types.Transaction, len(request))
   609  		uncles := make([][]*types.Header, len(request))
   610  
   611  		for i, body := range request {
   612  			transactions[i] = body.Transactions
   613  			uncles[i] = body.Uncles
   614  		}
   615  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   616  		filter := len(transactions) > 0 || len(uncles) > 0
   617  		if filter {
   618  			transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
   619  		}
   620  		if len(transactions) > 0 || len(uncles) > 0 || !filter {
   621  			err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
   622  			if err != nil {
   623  				log.Debug("Failed to deliver bodies", "err", err)
   624  			}
   625  		}
   626  
   627  	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
   628  		// Decode the retrieval message
   629  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   630  		if _, err := msgStream.List(); err != nil {
   631  			return err
   632  		}
   633  		// Gather state data until the fetch or network limits is reached
   634  		var (
   635  			hash  common.Hash
   636  			bytes int
   637  			data  [][]byte
   638  		)
   639  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   640  			// Retrieve the hash of the next state entry
   641  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   642  				break
   643  			} else if err != nil {
   644  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   645  			}
   646  			// Retrieve the requested state entry, stopping if enough was found
   647  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   648  				data = append(data, entry)
   649  				bytes += len(entry)
   650  			}
   651  		}
   652  		return p.SendNodeData(data)
   653  
   654  	case p.version >= eth63 && msg.Code == NodeDataMsg:
   655  		// A batch of node state data arrived to one of our previous requests
   656  		var data [][]byte
   657  		if err := msg.Decode(&data); err != nil {
   658  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   659  		}
   660  		// Deliver all to the downloader
   661  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   662  			log.Debug("Failed to deliver node state data", "err", err)
   663  		}
   664  
   665  	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
   666  		// Decode the retrieval message
   667  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   668  		if _, err := msgStream.List(); err != nil {
   669  			return err
   670  		}
   671  		// Gather state data until the fetch or network limits is reached
   672  		var (
   673  			hash     common.Hash
   674  			bytes    int
   675  			receipts []rlp.RawValue
   676  		)
   677  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   678  			// Retrieve the hash of the next block
   679  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   680  				break
   681  			} else if err != nil {
   682  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   683  			}
   684  			// Retrieve the requested block's receipts, skipping if unknown to us
   685  			results := pm.blockchain.GetReceiptsByHash(hash)
   686  			if results == nil {
   687  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   688  					continue
   689  				}
   690  			}
   691  			// If known, encode and queue for response packet
   692  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   693  				log.Error("Failed to encode receipt", "err", err)
   694  			} else {
   695  				receipts = append(receipts, encoded)
   696  				bytes += len(encoded)
   697  			}
   698  		}
   699  		return p.SendReceiptsRLP(receipts)
   700  
   701  	case p.version >= eth63 && msg.Code == ReceiptsMsg:
   702  		// A batch of receipts arrived to one of our previous requests
   703  		var receipts [][]*types.Receipt
   704  		if err := msg.Decode(&receipts); err != nil {
   705  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   706  		}
   707  		// Deliver all to the downloader
   708  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   709  			log.Debug("Failed to deliver receipts", "err", err)
   710  		}
   711  
   712  	case msg.Code == NewBlockHashesMsg:
   713  		var announces newBlockHashesData
   714  		if err := msg.Decode(&announces); err != nil {
   715  			return errResp(ErrDecode, "%v: %v", msg, err)
   716  		}
   717  		// Mark the hashes as present at the remote node
   718  		for _, block := range announces {
   719  			p.MarkBlock(block.Hash)
   720  		}
   721  		// Schedule all the unknown hashes for retrieval
   722  		unknown := make(newBlockHashesData, 0, len(announces))
   723  		for _, block := range announces {
   724  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   725  				unknown = append(unknown, block)
   726  			}
   727  		}
   728  		for _, block := range unknown {
   729  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   730  		}
   731  
   732  	case msg.Code == NewBlockMsg:
   733  		// Retrieve and decode the propagated block
   734  		var request newBlockData
   735  		if err := msg.Decode(&request); err != nil {
   736  			return errResp(ErrDecode, "%v: %v", msg, err)
   737  		}
   738  		if err := request.sanityCheck(); err != nil {
   739  			return err
   740  		}
   741  		request.Block.ReceivedAt = msg.ReceivedAt
   742  		request.Block.ReceivedFrom = p
   743  
   744  		// Mark the peer as owning the block and schedule it for import
   745  		p.MarkBlock(request.Block.Hash())
   746  		pm.fetcher.Enqueue(p.id, request.Block)
   747  
   748  		// Assuming the block is importable by the peer, but possibly not yet done so,
   749  		// calculate the head hash and TD that the peer truly must have.
   750  		var (
   751  			trueHead = request.Block.ParentHash()
   752  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   753  		)
   754  		// Update the peer's total difficulty if better than the previous
   755  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   756  			p.SetHead(trueHead, trueTD)
   757  
   758  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   759  			// a single block (as the true TD is below the propagated block), however this
   760  			// scenario should easily be covered by the fetcher.
   761  			currentBlock := pm.blockchain.CurrentBlock()
   762  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   763  				go pm.synchronise(p)
   764  			}
   765  		}
   766  
   767  	case msg.Code == TxMsg:
   768  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   769  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   770  			break
   771  		}
   772  		// Transactions can be processed, parse all of them and deliver to the pool
   773  		var txs []*types.Transaction
   774  		if err := msg.Decode(&txs); err != nil {
   775  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   776  		}
   777  		for i, tx := range txs {
   778  			// Validate and mark the remote transaction
   779  			if tx == nil {
   780  				return errResp(ErrDecode, "transaction %d is nil", i)
   781  			}
   782  			p.MarkTransaction(tx.Hash())
   783  		}
   784  		pm.txpool.AddRemotes(txs)
   785  
   786  	default:
   787  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   788  	}
   789  	return nil
   790  }
   791  
   792  // Quorum
   793  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
   794  	pm.fetcher.Enqueue(id, block)
   795  }
   796  
   797  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   798  // will only announce it's availability (depending what's requested).
   799  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   800  	hash := block.Hash()
   801  	peers := pm.peers.PeersWithoutBlock(hash)
   802  
   803  	// If propagation is requested, send to a subset of the peer
   804  	if propagate {
   805  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   806  		var td *big.Int
   807  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   808  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   809  		} else {
   810  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   811  			return
   812  		}
   813  		// Send the block to a subset of our peers
   814  		transferLen := int(math.Sqrt(float64(len(peers))))
   815  		if transferLen < minBroadcastPeers {
   816  			transferLen = minBroadcastPeers
   817  		}
   818  		if transferLen > len(peers) {
   819  			transferLen = len(peers)
   820  		}
   821  		transfer := peers[:transferLen]
   822  		for _, peer := range transfer {
   823  			peer.AsyncSendNewBlock(block, td)
   824  		}
   825  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   826  		return
   827  	}
   828  	// Otherwise if the block is indeed in out own chain, announce it
   829  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   830  		for _, peer := range peers {
   831  			peer.AsyncSendNewBlockHash(block)
   832  		}
   833  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   834  	}
   835  }
   836  
   837  // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
   838  // already have the given transaction.
   839  func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
   840  	var txset = make(map[*peer]types.Transactions)
   841  
   842  	// Broadcast transactions to a batch of peers not knowing about it
   843  	// NOTE: Raft-based consensus currently assumes that geth broadcasts
   844  	// transactions to all peers in the network. A previous comment here
   845  	// indicated that this logic might change in the future to only send to a
   846  	// subset of peers. If this change occurs upstream, a merge conflict should
   847  	// arise here, and we should add logic to send to *all* peers in raft mode.
   848  
   849  	for _, tx := range txs {
   850  		peers := pm.peers.PeersWithoutTx(tx.Hash())
   851  		for _, peer := range peers {
   852  			txset[peer] = append(txset[peer], tx)
   853  		}
   854  		log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
   855  	}
   856  	// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   857  	for peer, txs := range txset {
   858  		peer.AsyncSendTransactions(txs)
   859  	}
   860  }
   861  
   862  // Mined broadcast loop
   863  func (pm *ProtocolManager) minedBroadcastLoop() {
   864  	// automatically stops if unsubscribe
   865  	for obj := range pm.minedBlockSub.Chan() {
   866  		if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {
   867  			pm.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   868  			pm.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   869  		}
   870  	}
   871  }
   872  
   873  func (pm *ProtocolManager) txBroadcastLoop() {
   874  	for {
   875  		select {
   876  		case event := <-pm.txsCh:
   877  			pm.BroadcastTxs(event.Txs)
   878  
   879  		// Err() channel will be closed when unsubscribing.
   880  		case <-pm.txsSub.Err():
   881  			return
   882  		}
   883  	}
   884  }
   885  
   886  // NodeInfo represents a short summary of the Ethereum sub-protocol metadata
   887  // known about the host peer.
   888  type NodeInfo struct {
   889  	Network    uint64              `json:"network"`    // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   890  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   891  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   892  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   893  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   894  	Consensus  string              `json:"consensus"`  // Consensus mechanism in use
   895  }
   896  
   897  // NodeInfo retrieves some protocol metadata about the running host node.
   898  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
   899  	currentBlock := pm.blockchain.CurrentBlock()
   900  	// //Quorum
   901  	//
   902  	// changes done to fetch maxCodeSize dynamically based on the
   903  	// maxCodeSizeConfig changes
   904  	// /Quorum
   905  	chainConfig := pm.blockchain.Config()
   906  	chainConfig.MaxCodeSize = uint64(chainConfig.GetMaxCodeSize(pm.blockchain.CurrentBlock().Number()) / 1024)
   907  
   908  	return &NodeInfo{
   909  		Network:    pm.networkID,
   910  		Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   911  		Genesis:    pm.blockchain.Genesis().Hash(),
   912  		Config:     chainConfig,
   913  		Head:       currentBlock.Hash(),
   914  		Consensus:  pm.getConsensusAlgorithm(),
   915  	}
   916  }
   917  
   918  // Quorum
   919  func (pm *ProtocolManager) getConsensusAlgorithm() string {
   920  	var consensusAlgo string
   921  	if pm.raftMode { // raft does not use consensus interface
   922  		consensusAlgo = "raft"
   923  	} else {
   924  		switch pm.engine.(type) {
   925  		case consensus.Istanbul:
   926  			consensusAlgo = "istanbul"
   927  		case *clique.Clique:
   928  			consensusAlgo = "clique"
   929  		case *ethash.Ethash:
   930  			consensusAlgo = "ethash"
   931  		default:
   932  			consensusAlgo = "unknown"
   933  		}
   934  	}
   935  	return consensusAlgo
   936  }
   937  
   938  func (self *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
   939  	m := make(map[common.Address]consensus.Peer)
   940  	for _, p := range self.peers.Peers() {
   941  		pubKey := p.Node().Pubkey()
   942  		addr := crypto.PubkeyToAddress(*pubKey)
   943  		if targets[addr] {
   944  			m[addr] = p
   945  		}
   946  	}
   947  	return m
   948  }