github.com/beyonderyue/gochain@v2.2.26+incompatible/eth/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"go.opencensus.io/trace"
    30  
    31  	"github.com/gochain-io/gochain/common"
    32  	"github.com/gochain-io/gochain/consensus"
    33  	"github.com/gochain-io/gochain/core"
    34  	"github.com/gochain-io/gochain/core/types"
    35  	"github.com/gochain-io/gochain/eth/downloader"
    36  	"github.com/gochain-io/gochain/eth/fetcher"
    37  	"github.com/gochain-io/gochain/event"
    38  	"github.com/gochain-io/gochain/log"
    39  	"github.com/gochain-io/gochain/p2p"
    40  	"github.com/gochain-io/gochain/p2p/discover"
    41  	"github.com/gochain-io/gochain/params"
    42  	"github.com/gochain-io/gochain/rlp"
    43  )
    44  
    45  const (
    46  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    47  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    48  
    49  	// txChanSize is the size of channel listening to NewTxsEvent.
    50  	// The number is referenced from the size of tx pool.
    51  	txChanSize = 16384
    52  
    53  	// The smallest subset of peers to broadcast to.
    54  	minBroadcastPeers = 4
    55  )
    56  
    57  // errIncompatibleConfig is returned if the requested protocols and configs are
    58  // not compatible (low protocol version restrictions and high requirements).
    59  var errIncompatibleConfig = errors.New("incompatible configuration")
    60  
    61  func errResp(code errCode, format string, v ...interface{}) error {
    62  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    63  }
    64  
    65  type ProtocolManager struct {
    66  	networkId uint64
    67  
    68  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    69  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    70  
    71  	txpool      txPool
    72  	blockchain  *core.BlockChain
    73  	chainconfig *params.ChainConfig
    74  	maxPeers    int
    75  
    76  	downloader *downloader.Downloader
    77  	fetcher    *fetcher.Fetcher
    78  	peers      *peerSet
    79  
    80  	SubProtocols []p2p.Protocol
    81  
    82  	eventMux      *event.TypeMux
    83  	txsCh         chan core.NewTxsEvent
    84  	minedBlockSub *event.TypeMuxSubscription
    85  
    86  	// channels for fetcher, syncer, txsyncLoop
    87  	newPeerCh   chan *peer
    88  	txsyncCh    chan *txsync
    89  	quitSync    chan struct{}
    90  	noMorePeers chan struct{}
    91  
    92  	// wait group is used for graceful shutdowns during downloading
    93  	// and processing
    94  	wg sync.WaitGroup
    95  }
    96  
    97  // NewProtocolManager returns a new ethereum sub protocol manager. The GoChain sub protocol manages peers capable
    98  // with the ethereum network.
    99  func NewProtocolManager(ctx context.Context, config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb common.Database) (*ProtocolManager, error) {
   100  	// Create the protocol manager with the base fields
   101  	manager := &ProtocolManager{
   102  		networkId:   networkId,
   103  		eventMux:    mux,
   104  		txpool:      txpool,
   105  		blockchain:  blockchain,
   106  		chainconfig: config,
   107  		peers:       newPeerSet(),
   108  		newPeerCh:   make(chan *peer),
   109  		noMorePeers: make(chan struct{}),
   110  		txsyncCh:    make(chan *txsync),
   111  		quitSync:    make(chan struct{}),
   112  	}
   113  	// Figure out whether to allow fast sync or not
   114  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   115  		log.Warn("Blockchain not empty, fast sync disabled")
   116  		mode = downloader.FullSync
   117  	}
   118  	if mode == downloader.FastSync {
   119  		manager.fastSync = uint32(1)
   120  	}
   121  	// Initiate a sub-protocol for every implemented version we can handle
   122  	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
   123  	for i, version := range ProtocolVersions {
   124  		// Skip protocol version if incompatible with the mode of operation
   125  		if mode == downloader.FastSync && version < eth63 {
   126  			continue
   127  		}
   128  		// Compatible; initialise the sub-protocol
   129  		version := version // Closure for the run
   130  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   131  			Name:    ProtocolName,
   132  			Version: version,
   133  			Length:  ProtocolLengths[i],
   134  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   135  				peer := manager.newPeer(int(version), p, rw)
   136  				select {
   137  				case manager.newPeerCh <- peer:
   138  					manager.wg.Add(1)
   139  					defer manager.wg.Done()
   140  					return manager.handle(peer)
   141  				case <-manager.quitSync:
   142  					return p2p.DiscQuitting
   143  				}
   144  			},
   145  			NodeInfo: func() interface{} {
   146  				return manager.NodeInfo()
   147  			},
   148  			PeerInfo: func(id discover.NodeID) interface{} {
   149  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   150  					return p.Info()
   151  				}
   152  				return nil
   153  			},
   154  		})
   155  	}
   156  	if len(manager.SubProtocols) == 0 {
   157  		return nil, errIncompatibleConfig
   158  	}
   159  	// Construct the different synchronisation mechanisms
   160  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   161  
   162  	getBlock := func(ctx context.Context, hash common.Hash) *types.Block {
   163  		ctx, span := trace.StartSpan(ctx, "getBlock")
   164  		defer span.End()
   165  		return blockchain.GetBlockByHash(hash)
   166  	}
   167  	verifyHeader := func(ctx context.Context, header *types.Header) error {
   168  		return engine.VerifyHeader(ctx, blockchain, header)
   169  	}
   170  	heighter := func() uint64 {
   171  		return blockchain.CurrentBlock().NumberU64()
   172  	}
   173  	inserter := func(ctx context.Context, blocks types.Blocks) (int, error) {
   174  		// If fast sync is running, deny importing weird blocks
   175  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   176  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   177  			return 0, nil
   178  		}
   179  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   180  		return manager.blockchain.InsertChain(ctx, blocks)
   181  	}
   182  	manager.fetcher = fetcher.New(getBlock, verifyHeader, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   183  
   184  	return manager, nil
   185  }
   186  
   187  func (pm *ProtocolManager) removePeer(id string) {
   188  	// Short circuit if the peer was already removed
   189  	peer := pm.peers.Peer(id)
   190  	if peer == nil {
   191  		return
   192  	}
   193  	log.Debug("Removing GoChain peer", "peer", id)
   194  
   195  	// Unregister the peer from the downloader and GoChain peer set
   196  	if err := pm.downloader.UnregisterPeer(id); err != nil {
   197  		log.Error("Cannot unregister peer from downloader", "id", id, "err", err)
   198  	}
   199  	if err := pm.peers.Unregister(id); err != nil {
   200  		log.Error("Peer removal failed", "peer", id, "err", err)
   201  	}
   202  	// Hard disconnect at the networking layer
   203  	if peer != nil {
   204  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   205  	}
   206  }
   207  
   208  func (pm *ProtocolManager) Start(maxPeers int) {
   209  	pm.maxPeers = maxPeers
   210  
   211  	// broadcast transactions
   212  	pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
   213  	pm.txpool.SubscribeNewTxsEvent(pm.txsCh, "eth.ProtocolManager")
   214  	go pm.txBroadcastLoop()
   215  
   216  	// broadcast mined blocks
   217  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   218  	go pm.minedBroadcastLoop()
   219  
   220  	// start sync handlers
   221  	go pm.syncer()
   222  	go pm.txsyncLoop()
   223  	go pm.txResyncLoop()
   224  }
   225  
   226  func (pm *ProtocolManager) Stop() {
   227  	log.Info("Stopping GoChain protocol")
   228  
   229  	pm.txpool.UnsubscribeNewTxsEvent(pm.txsCh) // quits txBroadcastLoop
   230  	pm.minedBlockSub.Unsubscribe()             // quits blockBroadcastLoop
   231  
   232  	// Quit the sync loop.
   233  	// After this send has completed, no new peers will be accepted.
   234  	pm.noMorePeers <- struct{}{}
   235  
   236  	// Quit fetcher, txsyncLoop.
   237  	close(pm.quitSync)
   238  
   239  	// Disconnect existing sessions.
   240  	// This also closes the gate for any new registrations on the peer set.
   241  	// sessions which are already established but not added to pm.peers yet
   242  	// will exit when they try to register.
   243  	pm.peers.Close()
   244  
   245  	// Wait for all peer handler goroutines and the loops to come down.
   246  	pm.wg.Wait()
   247  
   248  	log.Info("GoChain protocol stopped")
   249  }
   250  
   251  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   252  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   253  }
   254  
   255  // handle is the callback invoked to manage the life cycle of an eth peer. When
   256  // this function terminates, the peer is disconnected.
   257  func (pm *ProtocolManager) handle(p *peer) error {
   258  	// Ignore maxPeers if this is a trusted peer
   259  	if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   260  		return p2p.DiscTooManyPeers
   261  	}
   262  	p.Log().Debug("GoChain peer connected", "name", p.Name())
   263  
   264  	// Execute the GoChain handshake
   265  	var (
   266  		genesis = pm.blockchain.Genesis()
   267  		head    = pm.blockchain.CurrentHeader()
   268  		hash    = head.Hash()
   269  		number  = head.Number.Uint64()
   270  		td      = pm.blockchain.GetTd(hash, number)
   271  	)
   272  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   273  		p.Log().Debug("GoChain handshake failed", "err", err)
   274  		return err
   275  	}
   276  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   277  		rw.Init(p.version)
   278  	}
   279  	// Register the peer locally
   280  	if err := pm.peers.Register(p); err != nil {
   281  		p.Log().Error("GoChain peer registration failed", "err", err)
   282  		return err
   283  	}
   284  	defer pm.removePeer(p.id)
   285  
   286  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   287  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   288  		return err
   289  	}
   290  	// Propagate existing transactions. new transactions appearing
   291  	// after this will be sent via broadcasts.
   292  	pm.syncTransactions(context.Background(), p)
   293  
   294  	// main loop. handle incoming messages.
   295  	for {
   296  		if err := pm.handleMsg(p); err != nil {
   297  			p.Log().Error("GoChain message handling failed", "err", err)
   298  			return err
   299  		}
   300  	}
   301  }
   302  
   303  // handleMsg is invoked whenever an inbound message is received from a remote
   304  // peer. The remote connection is torn down upon returning any error.
   305  func (pm *ProtocolManager) handleMsg(p *peer) error {
   306  	// Read the next message from the remote peer, and ensure it's fully consumed
   307  	msg, err := p.rw.ReadMsg()
   308  	if err != nil {
   309  		return err
   310  	}
   311  	if msg.Size > ProtocolMaxMsgSize {
   312  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   313  	}
   314  	defer msg.Discard()
   315  
   316  	ctx, span := trace.StartSpan(context.Background(), "ProtocolManager.handleMsg")
   317  	defer span.End()
   318  	span.AddAttributes(trace.StringAttribute("code", p2p.MsgCodeString(msg.Code)))
   319  
   320  	// Handle the message depending on its contents
   321  	switch {
   322  	case msg.Code == StatusMsg:
   323  		// Status messages should never arrive after the handshake
   324  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   325  
   326  	// Block header query, collect the requested headers and reply
   327  	case msg.Code == GetBlockHeadersMsg:
   328  		// Decode the complex header query
   329  		var query getBlockHeadersData
   330  		if err := msg.Decode(&query); err != nil {
   331  			return errResp(ErrDecode, "%v: %v", msg, err)
   332  		}
   333  		hashMode := query.Origin.Hash != (common.Hash{})
   334  		first := true
   335  		maxNonCanonical := uint64(100)
   336  
   337  		// Gather headers until the fetch or network limits is reached
   338  		var (
   339  			bytes   common.StorageSize
   340  			headers []*types.Header
   341  			unknown bool
   342  		)
   343  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   344  			// Retrieve the next header satisfying the query
   345  			var origin *types.Header
   346  			if hashMode {
   347  				if first {
   348  					first = false
   349  					origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   350  					if origin != nil {
   351  						query.Origin.Number = origin.Number.Uint64()
   352  					}
   353  				} else {
   354  					origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
   355  				}
   356  			} else {
   357  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   358  			}
   359  			if origin == nil {
   360  				break
   361  			}
   362  			headers = append(headers, origin)
   363  			bytes += estHeaderRlpSize
   364  
   365  			// Advance to the next header of the query
   366  			switch {
   367  			case hashMode && query.Reverse:
   368  				// Hash based traversal towards the genesis block
   369  				ancestor := query.Skip + 1
   370  				if ancestor == 0 {
   371  					unknown = true
   372  				} else {
   373  					query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
   374  					unknown = (query.Origin.Hash == common.Hash{})
   375  				}
   376  			case hashMode && !query.Reverse:
   377  				// Hash based traversal towards the leaf block
   378  				var (
   379  					current = origin.Number.Uint64()
   380  					next    = current + query.Skip + 1
   381  				)
   382  				if next <= current {
   383  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   384  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   385  					unknown = true
   386  				} else {
   387  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   388  						nextHash := header.Hash()
   389  						expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
   390  						if expOldHash == query.Origin.Hash {
   391  							query.Origin.Hash, query.Origin.Number = nextHash, next
   392  						} else {
   393  							unknown = true
   394  						}
   395  					} else {
   396  						unknown = true
   397  					}
   398  				}
   399  			case query.Reverse:
   400  				// Number based traversal towards the genesis block
   401  				if query.Origin.Number >= query.Skip+1 {
   402  					query.Origin.Number -= query.Skip + 1
   403  				} else {
   404  					unknown = true
   405  				}
   406  
   407  			case !query.Reverse:
   408  				// Number based traversal towards the leaf block
   409  				query.Origin.Number += query.Skip + 1
   410  			}
   411  		}
   412  		return p.SendBlockHeaders(ctx, headers)
   413  
   414  	case msg.Code == BlockHeadersMsg:
   415  		// A batch of headers arrived to one of our previous requests
   416  		var headers []*types.Header
   417  		if err := msg.Decode(&headers); err != nil {
   418  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   419  		}
   420  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   421  		filter := len(headers) == 1
   422  		if filter {
   423  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   424  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   425  		}
   426  		if len(headers) > 0 || !filter {
   427  			err := pm.downloader.DeliverHeaders(p.id, headers)
   428  			if err != nil {
   429  				log.Debug("Failed to deliver headers", "err", err)
   430  			}
   431  		}
   432  
   433  	case msg.Code == GetBlockBodiesMsg:
   434  		// Decode the retrieval message
   435  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   436  		defer rlp.Discard(msgStream)
   437  		if _, err := msgStream.List(); err != nil {
   438  			return err
   439  		}
   440  		// Gather blocks until the fetch or network limits is reached
   441  		var (
   442  			hash   common.Hash
   443  			bytes  int
   444  			bodies []rlp.RawValue
   445  		)
   446  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   447  			// Retrieve the hash of the next block
   448  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   449  				break
   450  			} else if err != nil {
   451  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   452  			}
   453  			// Retrieve the requested block body, stopping if enough was found
   454  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   455  				bodies = append(bodies, data)
   456  				bytes += len(data)
   457  			}
   458  		}
   459  		return p.SendBlockBodiesRLP(ctx, bodies)
   460  
   461  	case msg.Code == BlockBodiesMsg:
   462  		// A batch of block bodies arrived to one of our previous requests
   463  		var request blockBodiesData
   464  		if err := msg.Decode(&request); err != nil {
   465  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   466  		}
   467  		// Deliver them all to the downloader for queuing
   468  		trasactions := make([][]*types.Transaction, len(request))
   469  
   470  		for i, body := range request {
   471  			trasactions[i] = body.Transactions
   472  		}
   473  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   474  		if len(trasactions) > 0 {
   475  			trasactions = pm.fetcher.FilterBodies(p.id, trasactions, time.Now())
   476  		}
   477  		if len(trasactions) > 0 {
   478  			err := pm.downloader.DeliverBodies(p.id, trasactions)
   479  			if err != nil {
   480  				log.Debug("Failed to deliver bodies", "err", err)
   481  			}
   482  		}
   483  
   484  	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
   485  		// Decode the retrieval message
   486  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   487  		defer rlp.Discard(msgStream)
   488  		if _, err := msgStream.List(); err != nil {
   489  			return err
   490  		}
   491  		// Gather state data until the fetch or network limits is reached
   492  		var (
   493  			hash  common.Hash
   494  			bytes int
   495  			data  [][]byte
   496  		)
   497  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   498  			// Retrieve the hash of the next state entry
   499  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   500  				break
   501  			} else if err != nil {
   502  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   503  			}
   504  			// Retrieve the requested state entry, stopping if enough was found
   505  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   506  				data = append(data, entry)
   507  				bytes += len(entry)
   508  			}
   509  		}
   510  		return p.SendNodeData(ctx, data)
   511  
   512  	case p.version >= eth63 && msg.Code == NodeDataMsg:
   513  		// A batch of node state data arrived to one of our previous requests
   514  		var data [][]byte
   515  		if err := msg.Decode(&data); err != nil {
   516  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   517  		}
   518  		// Deliver all to the downloader
   519  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   520  			log.Debug("Failed to deliver node state data", "err", err)
   521  		}
   522  
   523  	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
   524  		// Decode the retrieval message
   525  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   526  		defer rlp.Discard(msgStream)
   527  		if _, err := msgStream.List(); err != nil {
   528  			return err
   529  		}
   530  		// Gather state data until the fetch or network limits is reached
   531  		var (
   532  			hash     common.Hash
   533  			bytes    int
   534  			receipts []rlp.RawValue
   535  		)
   536  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   537  			// Retrieve the hash of the next block
   538  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   539  				break
   540  			} else if err != nil {
   541  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   542  			}
   543  			// Retrieve the requested block's receipts, skipping if unknown to us
   544  			results := pm.blockchain.GetReceiptsByHash(hash)
   545  			if results == nil {
   546  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   547  					continue
   548  				}
   549  			}
   550  			// If known, encode and queue for response packet
   551  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   552  				log.Error("Failed to encode receipt", "err", err)
   553  			} else {
   554  				receipts = append(receipts, encoded)
   555  				bytes += len(encoded)
   556  			}
   557  		}
   558  		return p.SendReceiptsRLP(ctx, receipts)
   559  
   560  	case p.version >= eth63 && msg.Code == ReceiptsMsg:
   561  		// A batch of receipts arrived to one of our previous requests
   562  		var receipts [][]*types.Receipt
   563  		if err := msg.Decode(&receipts); err != nil {
   564  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   565  		}
   566  		// Deliver all to the downloader
   567  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   568  			log.Debug("Failed to deliver receipts", "err", err)
   569  		}
   570  
   571  	case msg.Code == NewBlockHashesMsg:
   572  		var announces newBlockHashesData
   573  		if err := msg.Decode(&announces); err != nil {
   574  			span.SetStatus(trace.Status{
   575  				Code:    trace.StatusCodeInternal,
   576  				Message: err.Error(),
   577  			})
   578  			return errResp(ErrDecode, "%v: %v", msg, err)
   579  		}
   580  		span.AddAttributes(trace.Int64Attribute("cnt", int64(len(announces))))
   581  		// Mark the hashes as present at the remote node
   582  		for _, block := range announces {
   583  			p.MarkBlock(ctx, block.Hash)
   584  		}
   585  		// Schedule all the unknown hashes for retrieval
   586  		unknown := make(newBlockHashesData, 0, len(announces))
   587  		for _, block := range announces {
   588  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   589  				unknown = append(unknown, block)
   590  			}
   591  		}
   592  		go func() {
   593  			_, ns := trace.StartSpan(context.Background(), "ProtocolManager.handleMsg-notify-unknown-blocks")
   594  			defer ns.End()
   595  			ns.AddAttributes(trace.Int64Attribute("cnt", int64(len(unknown))))
   596  			parent := span.SpanContext()
   597  			ns.AddLink(trace.Link{
   598  				TraceID: parent.TraceID,
   599  				SpanID:  parent.SpanID,
   600  				Type:    trace.LinkTypeParent,
   601  			})
   602  			for _, block := range unknown {
   603  				if err := pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies); err != nil {
   604  					log.Error("Cannot notify fetcher of new block hashes", "err", err)
   605  				}
   606  			}
   607  		}()
   608  
   609  	case msg.Code == NewBlockMsg:
   610  		// Retrieve and decode the propagated block
   611  		var request newBlockData
   612  		if err := msg.Decode(&request); err != nil {
   613  			return errResp(ErrDecode, "%v: %v", msg, err)
   614  		}
   615  		request.Block.ReceivedAt = msg.ReceivedAt
   616  		request.Block.ReceivedFrom = p
   617  
   618  		// Mark the peer as owning the block and schedule it for import
   619  		p.MarkBlock(ctx, request.Block.Hash())
   620  		if err := pm.fetcher.Enqueue(p.id, request.Block); err != nil {
   621  			log.Error("Cannot enqueue new block to fetcher", "id", p.id, "err", err)
   622  		}
   623  
   624  		// Assuming the block is importable by the peer, but possibly not yet done so,
   625  		// calculate the head hash and TD that the peer truly must have.
   626  		var (
   627  			trueHead = request.Block.ParentHash()
   628  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   629  		)
   630  		// Update the peer's head if better than the previous, or if same and hash has changed.
   631  		head, td := p.Head()
   632  		if cmp := trueTD.Cmp(td); cmp > 0 || cmp == 0 && head != trueHead {
   633  			p.SetHead(trueHead, trueTD)
   634  
   635  			// Schedule a sync if above ours, or same and different hash. Note, this will not fire a sync for a gap of
   636  			// a singe block (as the true TD is below the propagated block), however this
   637  			// scenario should easily be covered by the fetcher.
   638  			currentBlock := pm.blockchain.CurrentBlock()
   639  			currentHash := currentBlock.Hash()
   640  			currentTD := pm.blockchain.GetTd(currentHash, currentBlock.NumberU64())
   641  			if cmp := trueTD.Cmp(currentTD); cmp > 0 || cmp == 0 && trueHead != currentHash {
   642  				go pm.synchronise(ctx, p)
   643  			}
   644  		}
   645  
   646  	case msg.Code == TxMsg:
   647  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   648  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   649  			break
   650  		}
   651  		// Transactions can be processed, parse all of them and deliver to the pool
   652  		var txs []*types.Transaction
   653  		_, ds := trace.StartSpan(ctx, "Msg.Decode")
   654  		if err := msg.Decode(&txs); err != nil {
   655  			err := errResp(ErrDecode, "msg %v: %v", msg, err)
   656  			ds.SetStatus(trace.Status{
   657  				Code:    trace.StatusCodeInvalidArgument,
   658  				Message: err.Error(),
   659  			})
   660  			ds.End()
   661  			return err
   662  		}
   663  		ds.End()
   664  		_, ms := trace.StartSpan(ctx, "peer.MarkTransaction")
   665  		ms.AddAttributes(trace.Int64Attribute("txs", int64(len(txs))))
   666  		for i, tx := range txs {
   667  			// Validate and mark the remote transaction
   668  			if tx == nil {
   669  				err := errResp(ErrDecode, "transaction %d is nil", i)
   670  				ms.SetStatus(trace.Status{
   671  					Code:    trace.StatusCodeInvalidArgument,
   672  					Message: err.Error(),
   673  				})
   674  				ms.End()
   675  				return err
   676  			}
   677  			p.MarkTransaction(ctx, tx.Hash())
   678  		}
   679  		ms.End()
   680  		pm.txpool.AddRemotes(ctx, txs)
   681  
   682  	default:
   683  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   684  	}
   685  	return nil
   686  }
   687  
   688  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   689  // will only announce it's availability (depending what's requested).
   690  func (pm *ProtocolManager) BroadcastBlock(ctx context.Context, block *types.Block, propagate bool) {
   691  	ctx, span := trace.StartSpan(ctx, "ProtocolManager.BroadcastBlock")
   692  	defer span.End()
   693  
   694  	hash := block.Hash()
   695  	peers := pm.peers.PeersWithoutBlock(ctx, hash)
   696  
   697  	// If propagation is requested, send to all peers.
   698  	if propagate {
   699  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   700  		var td *big.Int
   701  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   702  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   703  		} else {
   704  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   705  			return
   706  		}
   707  		for _, p := range peers {
   708  			p.SendNewBlockAsync(block, td)
   709  		}
   710  		return
   711  	}
   712  	// Otherwise if the block is indeed in our own chain, announce it
   713  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   714  		for _, p := range peers {
   715  			p.SendNewBlockHashAsync(block)
   716  		}
   717  	}
   718  }
   719  
   720  // BroadcastTxs propagates a batch of transactions to a subset of peers which are not known to already have them.
   721  // Returns without blocking after launching each peer send in separate concurrent goroutines.
   722  func (pm *ProtocolManager) BroadcastTxs(ctx context.Context, txs types.Transactions) {
   723  	for p, txs := range pm.peers.PeersWithoutTxs(ctx, txs) {
   724  		p.SendTransactionsAsync(txs)
   725  	}
   726  }
   727  
   728  // Mined broadcast loop
   729  func (pm *ProtocolManager) minedBroadcastLoop() {
   730  	// automatically stops if unsubscribe
   731  	for obj := range pm.minedBlockSub.Chan() {
   732  		switch ev := obj.Data.(type) {
   733  		case core.NewMinedBlockEvent:
   734  			ctx, span := trace.StartSpan(context.Background(), "ProtocolManager.minedBroadcastLoop-NewMinedBlockEvent")
   735  			pm.BroadcastBlock(ctx, ev.Block, true)  // First propagate block to peers
   736  			pm.BroadcastBlock(ctx, ev.Block, false) // Only then announce to the rest
   737  			span.End()
   738  		}
   739  	}
   740  }
   741  
   742  func (pm *ProtocolManager) txBroadcastLoop() {
   743  	for event := range pm.txsCh {
   744  		ctx, span := trace.StartSpan(context.Background(), "ProtocolManager.txBroadcastLoop-txsCh")
   745  		pm.BroadcastTxs(ctx, event.Txs)
   746  		span.End()
   747  	}
   748  }
   749  
   750  // NodeInfo represents a short summary of the GoChain sub-protocol metadata
   751  // known about the host peer.
   752  type NodeInfo struct {
   753  	Network    uint64              `json:"network"`    // GoChain network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   754  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   755  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   756  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   757  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   758  }
   759  
   760  // NodeInfo retrieves some protocol metadata about the running host node.
   761  func (pm *ProtocolManager) NodeInfo() *NodeInfo {
   762  	currentBlock := pm.blockchain.CurrentBlock()
   763  	return &NodeInfo{
   764  		Network:    pm.networkId,
   765  		Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   766  		Genesis:    pm.blockchain.Genesis().Hash(),
   767  		Config:     pm.blockchain.Config(),
   768  		Head:       currentBlock.Hash(),
   769  	}
   770  }