gitlab.com/aquachain/aquachain@v1.17.16-rc3.0.20221018032414-e3ddf1e1c055/aqua/handler.go (about)

     1  // Copyright 2018 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package aqua
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"gitlab.com/aquachain/aquachain/aqua/downloader"
    30  	"gitlab.com/aquachain/aquachain/aqua/event"
    31  	"gitlab.com/aquachain/aquachain/aqua/fetcher"
    32  	"gitlab.com/aquachain/aquachain/aquadb"
    33  	"gitlab.com/aquachain/aquachain/common"
    34  	"gitlab.com/aquachain/aquachain/common/log"
    35  	"gitlab.com/aquachain/aquachain/consensus"
    36  	"gitlab.com/aquachain/aquachain/core"
    37  	"gitlab.com/aquachain/aquachain/core/types"
    38  	"gitlab.com/aquachain/aquachain/p2p"
    39  	"gitlab.com/aquachain/aquachain/p2p/discover"
    40  	"gitlab.com/aquachain/aquachain/params"
    41  	"gitlab.com/aquachain/aquachain/rlp"
    42  )
    43  
    44  const (
    45  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    46  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    47  
    48  	// txChanSize is the size of channel listening to TxPreEvent.
    49  	// The number is referenced from the size of tx pool.
    50  	txChanSize = 4096
    51  )
    52  
    53  // errIncompatibleConfig is returned if the requested protocols and configs are
    54  // not compatible (low protocol version restrictions and high requirements).
    55  var errIncompatibleConfig = errors.New("incompatible configuration")
    56  
    57  func errResp(code errCode, format string, v ...interface{}) error {
    58  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    59  }
    60  
    61  type ProtocolManager struct {
    62  	networkId uint64
    63  
    64  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    65  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    66  
    67  	txpool      txPool
    68  	blockchain  *core.BlockChain
    69  	chainconfig *params.ChainConfig
    70  	maxPeers    int
    71  
    72  	downloader *downloader.Downloader
    73  	fetcher    *fetcher.Fetcher
    74  	peers      *peerSet
    75  
    76  	SubProtocols []p2p.Protocol
    77  
    78  	eventMux      *event.TypeMux
    79  	txCh          chan core.TxPreEvent
    80  	txSub         event.Subscription
    81  	minedBlockSub *event.TypeMuxSubscription
    82  
    83  	// channels for fetcher, syncer, txsyncLoop
    84  	newPeerCh   chan *peer
    85  	txsyncCh    chan *txsync
    86  	quitSync    chan struct{}
    87  	noMorePeers chan struct{}
    88  
    89  	// wait group is used for graceful shutdowns during downloading
    90  	// and processing
    91  	wg sync.WaitGroup
    92  }
    93  
    94  // NewProtocolManager returns a new aquachain sub protocol manager. The Aquachain sub protocol manages peers capable
    95  // with the aquachain network.
    96  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb aquadb.Database) (*ProtocolManager, error) {
    97  	// Create the protocol manager with the base fields
    98  	if mode == downloader.OfflineSync {
    99  		return nil, nil
   100  	}
   101  	manager := &ProtocolManager{
   102  		networkId:   networkId,
   103  		eventMux:    mux,
   104  		txpool:      txpool,
   105  		blockchain:  blockchain,
   106  		chainconfig: config,
   107  		peers:       newPeerSet(),
   108  		newPeerCh:   make(chan *peer),
   109  		noMorePeers: make(chan struct{}),
   110  		txsyncCh:    make(chan *txsync),
   111  		quitSync:    make(chan struct{}),
   112  	}
   113  	// Figure out whether to allow fast sync or not
   114  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   115  		log.Warn("Blockchain not empty, fast sync disabled")
   116  		mode = downloader.FullSync
   117  	}
   118  	if mode == downloader.FastSync {
   119  		manager.fastSync = uint32(1)
   120  	}
   121  	// Initiate a sub-protocol for every implemented version we can handle
   122  	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
   123  	for i, version := range ProtocolVersions {
   124  		// Skip protocol version if incompatible with the mode of operation
   125  		if mode == downloader.FastSync && version < 63 {
   126  			continue
   127  		}
   128  		// Compatible; initialise the sub-protocol
   129  		version := version // Closure for the run
   130  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   131  			Name:    ProtocolName,
   132  			Version: version,
   133  			Length:  ProtocolLengths[i],
   134  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   135  				peer := manager.newPeer(int(version), p, rw)
   136  				select {
   137  				case manager.newPeerCh <- peer:
   138  					manager.wg.Add(1)
   139  					defer manager.wg.Done()
   140  					return manager.handle(peer)
   141  				case <-manager.quitSync:
   142  					return p2p.DiscQuitting
   143  				}
   144  			},
   145  			NodeInfo: func() interface{} {
   146  				return manager.NodeInfo()
   147  			},
   148  			PeerInfo: func(id discover.NodeID) interface{} {
   149  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   150  					return p.Info()
   151  				}
   152  				return nil
   153  			},
   154  		})
   155  	}
   156  	if len(manager.SubProtocols) == 0 {
   157  		return nil, errIncompatibleConfig
   158  	}
   159  	// Construct the different synchronisation mechanisms
   160  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   161  
   162  	validator := func(header *types.Header) error {
   163  		header.Version = manager.chainconfig.GetBlockVersion(header.Number)
   164  		return engine.VerifyHeader(blockchain, header, true)
   165  	}
   166  	heighter := func() uint64 {
   167  		return blockchain.CurrentBlock().NumberU64()
   168  	}
   169  	inserter := func(blocks types.Blocks) (int, error) {
   170  		// If fast sync is running, deny importing weird blocks
   171  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   172  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   173  			return 0, nil
   174  		}
   175  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   176  		return manager.blockchain.InsertChain(blocks)
   177  	}
   178  	manager.fetcher = fetcher.New(config.GetBlockVersion, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   179  
   180  	return manager, nil
   181  }
   182  
   183  func (pm *ProtocolManager) removePeer(id string) {
   184  	// Short circuit if the peer was already removed
   185  	peer := pm.peers.Peer(id)
   186  	if peer == nil {
   187  		return
   188  	}
   189  	log.Debug("Removing Aquachain peer", "peer", id)
   190  
   191  	// Unregister the peer from the downloader and Aquachain peer set
   192  	pm.downloader.UnregisterPeer(id)
   193  	if err := pm.peers.Unregister(id); err != nil {
   194  		log.Error("Peer removal failed", "peer", id, "err", err)
   195  	}
   196  	// Hard disconnect at the networking layer
   197  	if peer != nil {
   198  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   199  	}
   200  }
   201  
   202  func (pm *ProtocolManager) Start(maxPeers int) {
   203  	pm.maxPeers = maxPeers
   204  
   205  	// broadcast transactions
   206  	pm.txCh = make(chan core.TxPreEvent, txChanSize)
   207  	pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
   208  	go pm.txBroadcastLoop()
   209  
   210  	// broadcast mined blocks
   211  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   212  	go pm.minedBroadcastLoop()
   213  
   214  	// start sync handlers
   215  	go pm.syncer()
   216  	go pm.txsyncLoop()
   217  }
   218  
   219  func (pm *ProtocolManager) Stop() {
   220  	log.Info("Stopping Aquachain protocol")
   221  
   222  	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
   223  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   224  
   225  	// Quit the sync loop.
   226  	// After this send has completed, no new peers will be accepted.
   227  	pm.noMorePeers <- struct{}{}
   228  
   229  	// Quit fetcher, txsyncLoop.
   230  	close(pm.quitSync)
   231  
   232  	// Disconnect existing sessions.
   233  	// This also closes the gate for any new registrations on the peer set.
   234  	// sessions which are already established but not added to pm.peers yet
   235  	// will exit when they try to register.
   236  	pm.peers.Close()
   237  
   238  	// Wait for all peer handler goroutines and the loops to come down.
   239  	pm.wg.Wait()
   240  
   241  	log.Info("Aquachain protocol stopped")
   242  }
   243  
   244  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   245  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   246  }
   247  
   248  // handle is the callback invoked to manage the life cycle of an aqua peer. When
   249  // this function terminates, the peer is disconnected.
   250  func (pm *ProtocolManager) handle(p *peer) error {
   251  	if pm.peers.Len() >= pm.maxPeers {
   252  		return p2p.DiscTooManyPeers
   253  	}
   254  	p.Log().Trace("Aquachain peer connected", "name", p.Name())
   255  
   256  	// Execute the Aquachain handshake
   257  	var (
   258  		genesis = pm.blockchain.Genesis()
   259  		head    = pm.blockchain.CurrentHeader()
   260  		hash    = head.Hash()
   261  		number  = head.Number.Uint64()
   262  		td      = pm.blockchain.GetTd(hash, number)
   263  	)
   264  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   265  		p.Log().Trace("Aquachain handshake failed", "err", err)
   266  		return err
   267  	}
   268  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   269  		rw.Init(p.version)
   270  	}
   271  	// Register the peer locally
   272  	if err := pm.peers.Register(p); err != nil {
   273  		p.Log().Error("Aquachain peer registration failed", "err", err)
   274  		return err
   275  	}
   276  	defer pm.removePeer(p.id)
   277  
   278  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   279  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   280  		return err
   281  	}
   282  	p.Log().Debug("Aquachain peer registered", "name", p.Name())
   283  	// Propagate existing transactions. new transactions appearing
   284  	// after this will be sent via broadcasts.
   285  	pm.syncTransactions(p)
   286  
   287  	// main loop. handle incoming messages.
   288  	for {
   289  		if err := pm.handleMsg(p); err != nil {
   290  			p.Log().Debug("Aquachain message handling failed", "err", err)
   291  			return err
   292  		}
   293  	}
   294  }
   295  
   296  // handleMsg is invoked whenever an inbound message is received from a remote
   297  // peer. The remote connection is torn down upon returning any error.
   298  func (pm *ProtocolManager) handleMsg(p *peer) error {
   299  	// Read the next message from the remote peer, and ensure it's fully consumed
   300  	msg, err := p.rw.ReadMsg()
   301  	if err != nil {
   302  		return err
   303  	}
   304  	if msg.Size > ProtocolMaxMsgSize {
   305  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   306  	}
   307  	defer msg.Discard()
   308  
   309  	// Handle the message depending on its contents
   310  	switch {
   311  	case msg.Code == StatusMsg:
   312  		// Status messages should never arrive after the handshake
   313  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   314  
   315  	// Block header query, collect the requested headers and reply
   316  	case msg.Code == GetBlockHeadersMsg:
   317  		// Decode the complex header query
   318  		var query getBlockHeadersData
   319  		if err := msg.Decode(&query); err != nil {
   320  			return errResp(ErrDecode, "%v: %v", msg, err)
   321  		}
   322  		hashMode := query.Origin.Hash != (common.Hash{})
   323  
   324  		// Gather headers until the fetch or network limits is reached
   325  		var (
   326  			bytes   common.StorageSize
   327  			headers []*types.Header
   328  			unknown bool
   329  		)
   330  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   331  			// Retrieve the next header satisfying the query
   332  			var origin *types.Header
   333  			if hashMode {
   334  				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   335  			} else {
   336  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   337  			}
   338  			if origin == nil {
   339  				break
   340  			}
   341  			number := origin.Number.Uint64()
   342  			headers = append(headers, origin)
   343  			bytes += estHeaderRlpSize
   344  
   345  			// Advance to the next header of the query
   346  			switch {
   347  			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   348  				// Hash based traversal towards the genesis block
   349  				for i := 0; i < int(query.Skip)+1; i++ {
   350  					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   351  						query.Origin.Hash = header.ParentHash
   352  						number--
   353  					} else {
   354  						unknown = true
   355  						break
   356  					}
   357  				}
   358  			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   359  				// Hash based traversal towards the leaf block
   360  				var (
   361  					current = origin.Number.Uint64()
   362  					next    = current + query.Skip + 1
   363  				)
   364  				if next <= current {
   365  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   366  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   367  					unknown = true
   368  				} else {
   369  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   370  						if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   371  							query.Origin.Hash = header.Hash()
   372  						} else {
   373  							unknown = true
   374  						}
   375  					} else {
   376  						unknown = true
   377  					}
   378  				}
   379  			case query.Reverse:
   380  				// Number based traversal towards the genesis block
   381  				if query.Origin.Number >= query.Skip+1 {
   382  					query.Origin.Number -= query.Skip + 1
   383  				} else {
   384  					unknown = true
   385  				}
   386  
   387  			case !query.Reverse:
   388  				// Number based traversal towards the leaf block
   389  				query.Origin.Number += query.Skip + 1
   390  			}
   391  		}
   392  		return p.SendBlockHeaders(headers)
   393  
   394  	case msg.Code == BlockHeadersMsg:
   395  		// A batch of headers arrived to one of our previous requests
   396  		var headers []*types.Header
   397  		if err := msg.Decode(&headers); err != nil {
   398  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   399  		}
   400  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   401  		filter := len(headers) == 1
   402  		if filter {
   403  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   404  		}
   405  		if len(headers) > 0 || !filter {
   406  			err := pm.downloader.DeliverHeaders(p.id, headers)
   407  			if err != nil {
   408  				log.Debug("Failed to deliver headers", "err", err)
   409  			}
   410  		}
   411  
   412  	case msg.Code == GetBlockBodiesMsg:
   413  		// Decode the retrieval message
   414  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   415  		if _, err := msgStream.List(); err != nil {
   416  			return err
   417  		}
   418  		// Gather blocks until the fetch or network limits is reached
   419  		var (
   420  			hash   common.Hash
   421  			bytes  int
   422  			bodies []rlp.RawValue
   423  		)
   424  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   425  			// Retrieve the hash of the next block
   426  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   427  				break
   428  			} else if err != nil {
   429  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   430  			}
   431  			// Retrieve the requested block body, stopping if enough was found
   432  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   433  				bodies = append(bodies, data)
   434  				bytes += len(data)
   435  			}
   436  		}
   437  		return p.SendBlockBodiesRLP(bodies)
   438  
   439  	case msg.Code == BlockBodiesMsg:
   440  		// A batch of block bodies arrived to one of our previous requests
   441  		var request blockBodiesData
   442  		if err := msg.Decode(&request); err != nil {
   443  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   444  		}
   445  		// Deliver them all to the downloader for queuing
   446  		trasactions := make([][]*types.Transaction, len(request))
   447  		uncles := make([][]*types.Header, len(request))
   448  
   449  		for i, body := range request {
   450  			trasactions[i] = body.Transactions
   451  			uncles[i] = body.Uncles
   452  		}
   453  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   454  		filter := len(trasactions) > 0 || len(uncles) > 0
   455  		if filter {
   456  			trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
   457  		}
   458  		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
   459  			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
   460  			if err != nil {
   461  				log.Debug("Failed to deliver bodies", "err", err)
   462  			}
   463  		}
   464  
   465  	case msg.Code == GetNodeDataMsg:
   466  		// Decode the retrieval message
   467  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   468  		if _, err := msgStream.List(); err != nil {
   469  			return err
   470  		}
   471  		// Gather state data until the fetch or network limits is reached
   472  		var (
   473  			hash  common.Hash
   474  			bytes int
   475  			data  [][]byte
   476  		)
   477  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   478  			// Retrieve the hash of the next state entry
   479  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   480  				break
   481  			} else if err != nil {
   482  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   483  			}
   484  			// Retrieve the requested state entry, stopping if enough was found
   485  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   486  				data = append(data, entry)
   487  				bytes += len(entry)
   488  			}
   489  		}
   490  		return p.SendNodeData(data)
   491  
   492  	case msg.Code == NodeDataMsg:
   493  		// A batch of node state data arrived to one of our previous requests
   494  		var data [][]byte
   495  		if err := msg.Decode(&data); err != nil {
   496  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   497  		}
   498  		// Deliver all to the downloader
   499  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   500  			log.Debug("Failed to deliver node state data", "err", err)
   501  		}
   502  
   503  	case msg.Code == GetReceiptsMsg:
   504  		// Decode the retrieval message
   505  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   506  		if _, err := msgStream.List(); err != nil {
   507  			return err
   508  		}
   509  		// Gather state data until the fetch or network limits is reached
   510  		var (
   511  			hash     common.Hash
   512  			bytes    int
   513  			receipts []rlp.RawValue
   514  		)
   515  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   516  			// Retrieve the hash of the next block
   517  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   518  				break
   519  			} else if err != nil {
   520  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   521  			}
   522  			// Retrieve the requested block's receipts, skipping if unknown to us
   523  			results := pm.blockchain.GetReceiptsByHash(hash)
   524  			if results == nil {
   525  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   526  					continue
   527  				}
   528  			}
   529  			// If known, encode and queue for response packet
   530  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   531  				log.Error("Failed to encode receipt", "err", err)
   532  			} else {
   533  				receipts = append(receipts, encoded)
   534  				bytes += len(encoded)
   535  			}
   536  		}
   537  		return p.SendReceiptsRLP(receipts)
   538  
   539  	case msg.Code == ReceiptsMsg:
   540  		// A batch of receipts arrived to one of our previous requests
   541  		var receipts [][]*types.Receipt
   542  		if err := msg.Decode(&receipts); err != nil {
   543  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   544  		}
   545  		// Deliver all to the downloader
   546  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   547  			log.Debug("Failed to deliver receipts", "err", err)
   548  		}
   549  
   550  	case msg.Code == NewBlockHashesMsg:
   551  		var announces newBlockHashesData
   552  		if err := msg.Decode(&announces); err != nil {
   553  			return errResp(ErrDecode, "%v: %v", msg, err)
   554  		}
   555  		// Mark the hashes as present at the remote node
   556  		for _, block := range announces {
   557  			p.MarkBlock(block.Hash)
   558  		}
   559  		// Schedule all the unknown hashes for retrieval
   560  		unknown := make(newBlockHashesData, 0, len(announces))
   561  		for _, block := range announces {
   562  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   563  				unknown = append(unknown, block)
   564  			}
   565  		}
   566  		for _, block := range unknown {
   567  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   568  		}
   569  
   570  	case msg.Code == NewBlockMsg:
   571  		// Retrieve and decode the propagated block
   572  		var request newBlockData
   573  		if err := msg.Decode(&request); err != nil {
   574  			return errResp(ErrDecode, "%v: %v", msg, err)
   575  		}
   576  		request.Block.ReceivedAt = msg.ReceivedAt
   577  		request.Block.ReceivedFrom = p
   578  
   579  		// Mark the peer as owning the block and schedule it for import
   580  		p.MarkBlock(request.Block.SetVersion(pm.chainconfig.GetBlockVersion(request.Block.Number())))
   581  		pm.fetcher.Enqueue(p.id, request.Block)
   582  
   583  		// Assuming the block is importable by the peer, but possibly not yet done so,
   584  		// calculate the head hash and TD that the peer truly must have.
   585  		var (
   586  			trueHead = request.Block.ParentHash()
   587  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   588  		)
   589  		// Update the peers total difficulty if better than the previous
   590  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   591  			p.SetHead(trueHead, trueTD)
   592  
   593  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   594  			// a singe block (as the true TD is below the propagated block), however this
   595  			// scenario should easily be covered by the fetcher.
   596  			currentBlock := pm.blockchain.CurrentBlock()
   597  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   598  				go pm.synchronise(p)
   599  			}
   600  		}
   601  
   602  	case msg.Code == TxMsg:
   603  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   604  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   605  			break
   606  		}
   607  		// Transactions can be processed, parse all of them and deliver to the pool
   608  		var txs []*types.Transaction
   609  		if err := msg.Decode(&txs); err != nil {
   610  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   611  		}
   612  		for i, tx := range txs {
   613  			// Validate and mark the remote transaction
   614  			if tx == nil {
   615  				return errResp(ErrDecode, "transaction %d is nil", i)
   616  			}
   617  			p.MarkTransaction(tx.Hash())
   618  		}
   619  		pm.txpool.AddRemotes(txs)
   620  
   621  	default:
   622  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   623  	}
   624  	return nil
   625  }
   626  
   627  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   628  // will only announce it's availability (depending what's requested).
   629  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   630  	hash := block.Hash()
   631  	peers := pm.peers.PeersWithoutBlock(hash)
   632  
   633  	// If propagation is requested, send to a subset of the peer
   634  	if propagate {
   635  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   636  		var td *big.Int
   637  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   638  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   639  		} else {
   640  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   641  			return
   642  		}
   643  		// Send the block to a subset of our peers
   644  		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
   645  		for _, peer := range transfer {
   646  			peer.SendNewBlock(block, td)
   647  		}
   648  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   649  		return
   650  	}
   651  	// Otherwise if the block is indeed in out own chain, announce it
   652  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   653  		for _, peer := range peers {
   654  			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
   655  		}
   656  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   657  	}
   658  }
   659  
   660  // BroadcastTx will propagate a transaction to all peers which are not known to
   661  // already have the given transaction.
   662  func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
   663  	// Broadcast transaction to a batch of peers not knowing about it
   664  	peers := pm.peers.PeersWithoutTx(hash)
   665  	//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   666  	for _, peer := range peers {
   667  		peer.SendTransactions(types.Transactions{tx})
   668  	}
   669  	log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
   670  }
   671  
   672  // Mined broadcast loop
   673  func (self *ProtocolManager) minedBroadcastLoop() {
   674  	// automatically stops if unsubscribe
   675  	for obj := range self.minedBlockSub.Chan() {
   676  		switch ev := obj.Data.(type) {
   677  		case core.NewMinedBlockEvent:
   678  			self.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   679  			self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   680  		}
   681  	}
   682  }
   683  
   684  func (self *ProtocolManager) txBroadcastLoop() {
   685  	for {
   686  		select {
   687  		case event := <-self.txCh:
   688  			self.BroadcastTx(event.Tx.Hash(), event.Tx)
   689  
   690  		// Err() channel will be closed when unsubscribing.
   691  		case <-self.txSub.Err():
   692  			return
   693  		}
   694  	}
   695  }
   696  
   697  // NodeInfo represents a short summary of the Aquachain sub-protocol metadata
   698  // known about the host peer.
   699  type NodeInfo struct {
   700  	Network    uint64              `json:"network"`    // Aquachain network ID
   701  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   702  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   703  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   704  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   705  }
   706  
   707  // NodeInfo retrieves some protocol metadata about the running host node.
   708  func (self *ProtocolManager) NodeInfo() *NodeInfo {
   709  	currentBlock := self.blockchain.CurrentBlock()
   710  	return &NodeInfo{
   711  		Network:    self.networkId,
   712  		Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   713  		Genesis:    self.blockchain.Genesis().Hash(),
   714  		Config:     self.blockchain.Config(),
   715  		Head:       currentBlock.Hash(),
   716  	}
   717  }