github.com/aquanetwork/aquachain@v1.7.8/aqua/handler.go (about)

     1  // Copyright 2015 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package aqua
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"gitlab.com/aquachain/aquachain/aqua/downloader"
    30  	"gitlab.com/aquachain/aquachain/aqua/event"
    31  	"gitlab.com/aquachain/aquachain/aqua/fetcher"
    32  	"gitlab.com/aquachain/aquachain/aquadb"
    33  	"gitlab.com/aquachain/aquachain/common"
    34  	"gitlab.com/aquachain/aquachain/common/log"
    35  	"gitlab.com/aquachain/aquachain/consensus"
    36  	"gitlab.com/aquachain/aquachain/core"
    37  	"gitlab.com/aquachain/aquachain/core/types"
    38  	"gitlab.com/aquachain/aquachain/p2p"
    39  	"gitlab.com/aquachain/aquachain/p2p/discover"
    40  	"gitlab.com/aquachain/aquachain/params"
    41  	"gitlab.com/aquachain/aquachain/rlp"
    42  )
    43  
    44  const (
    45  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    46  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    47  
    48  	// txChanSize is the size of channel listening to TxPreEvent.
    49  	// The number is referenced from the size of tx pool.
    50  	txChanSize = 4096
    51  )
    52  
    53  // errIncompatibleConfig is returned if the requested protocols and configs are
    54  // not compatible (low protocol version restrictions and high requirements).
    55  var errIncompatibleConfig = errors.New("incompatible configuration")
    56  
    57  func errResp(code errCode, format string, v ...interface{}) error {
    58  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    59  }
    60  
    61  type ProtocolManager struct {
    62  	networkId uint64
    63  
    64  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    65  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    66  
    67  	txpool      txPool
    68  	blockchain  *core.BlockChain
    69  	chainconfig *params.ChainConfig
    70  	maxPeers    int
    71  
    72  	downloader *downloader.Downloader
    73  	fetcher    *fetcher.Fetcher
    74  	peers      *peerSet
    75  
    76  	SubProtocols []p2p.Protocol
    77  
    78  	eventMux      *event.TypeMux
    79  	txCh          chan core.TxPreEvent
    80  	txSub         event.Subscription
    81  	minedBlockSub *event.TypeMuxSubscription
    82  
    83  	// channels for fetcher, syncer, txsyncLoop
    84  	newPeerCh   chan *peer
    85  	txsyncCh    chan *txsync
    86  	quitSync    chan struct{}
    87  	noMorePeers chan struct{}
    88  
    89  	// wait group is used for graceful shutdowns during downloading
    90  	// and processing
    91  	wg sync.WaitGroup
    92  }
    93  
    94  // NewProtocolManager returns a new aquachain sub protocol manager. The AquaChain sub protocol manages peers capable
    95  // with the aquachain network.
    96  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb aquadb.Database) (*ProtocolManager, error) {
    97  	// Create the protocol manager with the base fields
    98  	manager := &ProtocolManager{
    99  		networkId:   networkId,
   100  		eventMux:    mux,
   101  		txpool:      txpool,
   102  		blockchain:  blockchain,
   103  		chainconfig: config,
   104  		peers:       newPeerSet(),
   105  		newPeerCh:   make(chan *peer),
   106  		noMorePeers: make(chan struct{}),
   107  		txsyncCh:    make(chan *txsync),
   108  		quitSync:    make(chan struct{}),
   109  	}
   110  	// Figure out whether to allow fast sync or not
   111  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   112  		log.Warn("Blockchain not empty, fast sync disabled")
   113  		mode = downloader.FullSync
   114  	}
   115  	if mode == downloader.FastSync {
   116  		manager.fastSync = uint32(1)
   117  	}
   118  	// Initiate a sub-protocol for every implemented version we can handle
   119  	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
   120  	for i, version := range ProtocolVersions {
   121  		// Skip protocol version if incompatible with the mode of operation
   122  		if mode == downloader.FastSync && version < aqua64 {
   123  			continue
   124  		}
   125  		// Compatible; initialise the sub-protocol
   126  		version := version // Closure for the run
   127  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   128  			Name:    ProtocolName,
   129  			Version: version,
   130  			Length:  ProtocolLengths[i],
   131  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   132  				peer := manager.newPeer(int(version), p, rw)
   133  				select {
   134  				case manager.newPeerCh <- peer:
   135  					manager.wg.Add(1)
   136  					defer manager.wg.Done()
   137  					return manager.handle(peer)
   138  				case <-manager.quitSync:
   139  					return p2p.DiscQuitting
   140  				}
   141  			},
   142  			NodeInfo: func() interface{} {
   143  				return manager.NodeInfo()
   144  			},
   145  			PeerInfo: func(id discover.NodeID) interface{} {
   146  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   147  					return p.Info()
   148  				}
   149  				return nil
   150  			},
   151  		})
   152  	}
   153  	if len(manager.SubProtocols) == 0 {
   154  		return nil, errIncompatibleConfig
   155  	}
   156  	// Construct the different synchronisation mechanisms
   157  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
   158  
   159  	validator := func(header *types.Header) error {
   160  		header.Version = manager.chainconfig.GetBlockVersion(header.Number)
   161  		return engine.VerifyHeader(blockchain, header, true)
   162  	}
   163  	heighter := func() uint64 {
   164  		return blockchain.CurrentBlock().NumberU64()
   165  	}
   166  	inserter := func(blocks types.Blocks) (int, error) {
   167  		// If fast sync is running, deny importing weird blocks
   168  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   169  			log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   170  			return 0, nil
   171  		}
   172  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   173  		return manager.blockchain.InsertChain(blocks)
   174  	}
   175  	manager.fetcher = fetcher.New(config.GetBlockVersion, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   176  
   177  	return manager, nil
   178  }
   179  
   180  func (pm *ProtocolManager) removePeer(id string) {
   181  	// Short circuit if the peer was already removed
   182  	peer := pm.peers.Peer(id)
   183  	if peer == nil {
   184  		return
   185  	}
   186  	log.Debug("Removing AquaChain peer", "peer", id)
   187  
   188  	// Unregister the peer from the downloader and AquaChain peer set
   189  	pm.downloader.UnregisterPeer(id)
   190  	if err := pm.peers.Unregister(id); err != nil {
   191  		log.Error("Peer removal failed", "peer", id, "err", err)
   192  	}
   193  	// Hard disconnect at the networking layer
   194  	if peer != nil {
   195  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   196  	}
   197  }
   198  
   199  func (pm *ProtocolManager) Start(maxPeers int) {
   200  	pm.maxPeers = maxPeers
   201  
   202  	// broadcast transactions
   203  	pm.txCh = make(chan core.TxPreEvent, txChanSize)
   204  	pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
   205  	go pm.txBroadcastLoop()
   206  
   207  	// broadcast mined blocks
   208  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   209  	go pm.minedBroadcastLoop()
   210  
   211  	// start sync handlers
   212  	go pm.syncer()
   213  	go pm.txsyncLoop()
   214  }
   215  
   216  func (pm *ProtocolManager) Stop() {
   217  	log.Info("Stopping AquaChain protocol")
   218  
   219  	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
   220  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   221  
   222  	// Quit the sync loop.
   223  	// After this send has completed, no new peers will be accepted.
   224  	pm.noMorePeers <- struct{}{}
   225  
   226  	// Quit fetcher, txsyncLoop.
   227  	close(pm.quitSync)
   228  
   229  	// Disconnect existing sessions.
   230  	// This also closes the gate for any new registrations on the peer set.
   231  	// sessions which are already established but not added to pm.peers yet
   232  	// will exit when they try to register.
   233  	pm.peers.Close()
   234  
   235  	// Wait for all peer handler goroutines and the loops to come down.
   236  	pm.wg.Wait()
   237  
   238  	log.Info("AquaChain protocol stopped")
   239  }
   240  
   241  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   242  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   243  }
   244  
   245  // handle is the callback invoked to manage the life cycle of an aqua peer. When
   246  // this function terminates, the peer is disconnected.
   247  func (pm *ProtocolManager) handle(p *peer) error {
   248  	if pm.peers.Len() >= pm.maxPeers {
   249  		return p2p.DiscTooManyPeers
   250  	}
   251  	p.Log().Trace("AquaChain peer connected", "name", p.Name())
   252  
   253  	// Execute the AquaChain handshake
   254  	var (
   255  		genesis = pm.blockchain.Genesis()
   256  		head    = pm.blockchain.CurrentHeader()
   257  		hash    = head.Hash()
   258  		number  = head.Number.Uint64()
   259  		td      = pm.blockchain.GetTd(hash, number)
   260  	)
   261  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   262  		p.Log().Trace("AquaChain handshake failed", "err", err)
   263  		return err
   264  	}
   265  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   266  		rw.Init(p.version)
   267  	}
   268  	// Register the peer locally
   269  	if err := pm.peers.Register(p); err != nil {
   270  		p.Log().Error("AquaChain peer registration failed", "err", err)
   271  		return err
   272  	}
   273  	defer pm.removePeer(p.id)
   274  
   275  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   276  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   277  		return err
   278  	}
   279  	p.Log().Debug("AquaChain peer registered", "name", p.Name())
   280  	// Propagate existing transactions. new transactions appearing
   281  	// after this will be sent via broadcasts.
   282  	pm.syncTransactions(p)
   283  
   284  	// main loop. handle incoming messages.
   285  	for {
   286  		if err := pm.handleMsg(p); err != nil {
   287  			p.Log().Debug("AquaChain message handling failed", "err", err)
   288  			return err
   289  		}
   290  	}
   291  }
   292  
   293  // handleMsg is invoked whenever an inbound message is received from a remote
   294  // peer. The remote connection is torn down upon returning any error.
   295  func (pm *ProtocolManager) handleMsg(p *peer) error {
   296  	// Read the next message from the remote peer, and ensure it's fully consumed
   297  	msg, err := p.rw.ReadMsg()
   298  	if err != nil {
   299  		return err
   300  	}
   301  	if msg.Size > ProtocolMaxMsgSize {
   302  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   303  	}
   304  	defer msg.Discard()
   305  
   306  	// Handle the message depending on its contents
   307  	switch {
   308  	case msg.Code == StatusMsg:
   309  		// Status messages should never arrive after the handshake
   310  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   311  
   312  	// Block header query, collect the requested headers and reply
   313  	case msg.Code == GetBlockHeadersMsg:
   314  		// Decode the complex header query
   315  		var query getBlockHeadersData
   316  		if err := msg.Decode(&query); err != nil {
   317  			return errResp(ErrDecode, "%v: %v", msg, err)
   318  		}
   319  		hashMode := query.Origin.Hash != (common.Hash{})
   320  
   321  		// Gather headers until the fetch or network limits is reached
   322  		var (
   323  			bytes   common.StorageSize
   324  			headers []*types.Header
   325  			unknown bool
   326  		)
   327  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   328  			// Retrieve the next header satisfying the query
   329  			var origin *types.Header
   330  			if hashMode {
   331  				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   332  			} else {
   333  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   334  			}
   335  			if origin == nil {
   336  				break
   337  			}
   338  			number := origin.Number.Uint64()
   339  			headers = append(headers, origin)
   340  			bytes += estHeaderRlpSize
   341  
   342  			// Advance to the next header of the query
   343  			switch {
   344  			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   345  				// Hash based traversal towards the genesis block
   346  				for i := 0; i < int(query.Skip)+1; i++ {
   347  					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   348  						query.Origin.Hash = header.ParentHash
   349  						number--
   350  					} else {
   351  						unknown = true
   352  						break
   353  					}
   354  				}
   355  			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   356  				// Hash based traversal towards the leaf block
   357  				var (
   358  					current = origin.Number.Uint64()
   359  					next    = current + query.Skip + 1
   360  				)
   361  				if next <= current {
   362  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   363  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   364  					unknown = true
   365  				} else {
   366  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   367  						if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   368  							query.Origin.Hash = header.Hash()
   369  						} else {
   370  							unknown = true
   371  						}
   372  					} else {
   373  						unknown = true
   374  					}
   375  				}
   376  			case query.Reverse:
   377  				// Number based traversal towards the genesis block
   378  				if query.Origin.Number >= query.Skip+1 {
   379  					query.Origin.Number -= query.Skip + 1
   380  				} else {
   381  					unknown = true
   382  				}
   383  
   384  			case !query.Reverse:
   385  				// Number based traversal towards the leaf block
   386  				query.Origin.Number += query.Skip + 1
   387  			}
   388  		}
   389  		return p.SendBlockHeaders(headers)
   390  
   391  	case msg.Code == BlockHeadersMsg:
   392  		// A batch of headers arrived to one of our previous requests
   393  		var headers []*types.Header
   394  		if err := msg.Decode(&headers); err != nil {
   395  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   396  		}
   397  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   398  		filter := len(headers) == 1
   399  		if filter {
   400  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   401  		}
   402  		if len(headers) > 0 || !filter {
   403  			err := pm.downloader.DeliverHeaders(p.id, headers)
   404  			if err != nil {
   405  				log.Debug("Failed to deliver headers", "err", err)
   406  			}
   407  		}
   408  
   409  	case msg.Code == GetBlockBodiesMsg:
   410  		// Decode the retrieval message
   411  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   412  		if _, err := msgStream.List(); err != nil {
   413  			return err
   414  		}
   415  		// Gather blocks until the fetch or network limits is reached
   416  		var (
   417  			hash   common.Hash
   418  			bytes  int
   419  			bodies []rlp.RawValue
   420  		)
   421  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   422  			// Retrieve the hash of the next block
   423  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   424  				break
   425  			} else if err != nil {
   426  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   427  			}
   428  			// Retrieve the requested block body, stopping if enough was found
   429  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   430  				bodies = append(bodies, data)
   431  				bytes += len(data)
   432  			}
   433  		}
   434  		return p.SendBlockBodiesRLP(bodies)
   435  
   436  	case msg.Code == BlockBodiesMsg:
   437  		// A batch of block bodies arrived to one of our previous requests
   438  		var request blockBodiesData
   439  		if err := msg.Decode(&request); err != nil {
   440  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   441  		}
   442  		// Deliver them all to the downloader for queuing
   443  		trasactions := make([][]*types.Transaction, len(request))
   444  		uncles := make([][]*types.Header, len(request))
   445  
   446  		for i, body := range request {
   447  			trasactions[i] = body.Transactions
   448  			uncles[i] = body.Uncles
   449  		}
   450  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   451  		filter := len(trasactions) > 0 || len(uncles) > 0
   452  		if filter {
   453  			trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
   454  		}
   455  		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
   456  			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
   457  			if err != nil {
   458  				log.Debug("Failed to deliver bodies", "err", err)
   459  			}
   460  		}
   461  
   462  	case msg.Code == GetNodeDataMsg:
   463  		// Decode the retrieval message
   464  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   465  		if _, err := msgStream.List(); err != nil {
   466  			return err
   467  		}
   468  		// Gather state data until the fetch or network limits is reached
   469  		var (
   470  			hash  common.Hash
   471  			bytes int
   472  			data  [][]byte
   473  		)
   474  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   475  			// Retrieve the hash of the next state entry
   476  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   477  				break
   478  			} else if err != nil {
   479  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   480  			}
   481  			// Retrieve the requested state entry, stopping if enough was found
   482  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   483  				data = append(data, entry)
   484  				bytes += len(entry)
   485  			}
   486  		}
   487  		return p.SendNodeData(data)
   488  
   489  	case msg.Code == NodeDataMsg:
   490  		// A batch of node state data arrived to one of our previous requests
   491  		var data [][]byte
   492  		if err := msg.Decode(&data); err != nil {
   493  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   494  		}
   495  		// Deliver all to the downloader
   496  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   497  			log.Debug("Failed to deliver node state data", "err", err)
   498  		}
   499  
   500  	case msg.Code == GetReceiptsMsg:
   501  		// Decode the retrieval message
   502  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   503  		if _, err := msgStream.List(); err != nil {
   504  			return err
   505  		}
   506  		// Gather state data until the fetch or network limits is reached
   507  		var (
   508  			hash     common.Hash
   509  			bytes    int
   510  			receipts []rlp.RawValue
   511  		)
   512  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   513  			// Retrieve the hash of the next block
   514  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   515  				break
   516  			} else if err != nil {
   517  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   518  			}
   519  			// Retrieve the requested block's receipts, skipping if unknown to us
   520  			results := pm.blockchain.GetReceiptsByHash(hash)
   521  			if results == nil {
   522  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   523  					continue
   524  				}
   525  			}
   526  			// If known, encode and queue for response packet
   527  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   528  				log.Error("Failed to encode receipt", "err", err)
   529  			} else {
   530  				receipts = append(receipts, encoded)
   531  				bytes += len(encoded)
   532  			}
   533  		}
   534  		return p.SendReceiptsRLP(receipts)
   535  
   536  	case msg.Code == ReceiptsMsg:
   537  		// A batch of receipts arrived to one of our previous requests
   538  		var receipts [][]*types.Receipt
   539  		if err := msg.Decode(&receipts); err != nil {
   540  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   541  		}
   542  		// Deliver all to the downloader
   543  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   544  			log.Debug("Failed to deliver receipts", "err", err)
   545  		}
   546  
   547  	case msg.Code == NewBlockHashesMsg:
   548  		var announces newBlockHashesData
   549  		if err := msg.Decode(&announces); err != nil {
   550  			return errResp(ErrDecode, "%v: %v", msg, err)
   551  		}
   552  		// Mark the hashes as present at the remote node
   553  		for _, block := range announces {
   554  			p.MarkBlock(block.Hash)
   555  		}
   556  		// Schedule all the unknown hashes for retrieval
   557  		unknown := make(newBlockHashesData, 0, len(announces))
   558  		for _, block := range announces {
   559  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   560  				unknown = append(unknown, block)
   561  			}
   562  		}
   563  		for _, block := range unknown {
   564  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   565  		}
   566  
   567  	case msg.Code == NewBlockMsg:
   568  		// Retrieve and decode the propagated block
   569  		var request newBlockData
   570  		if err := msg.Decode(&request); err != nil {
   571  			return errResp(ErrDecode, "%v: %v", msg, err)
   572  		}
   573  		request.Block.ReceivedAt = msg.ReceivedAt
   574  		request.Block.ReceivedFrom = p
   575  
   576  		// Mark the peer as owning the block and schedule it for import
   577  		p.MarkBlock(request.Block.SetVersion(pm.chainconfig.GetBlockVersion(request.Block.Number())))
   578  		pm.fetcher.Enqueue(p.id, request.Block)
   579  
   580  		// Assuming the block is importable by the peer, but possibly not yet done so,
   581  		// calculate the head hash and TD that the peer truly must have.
   582  		var (
   583  			trueHead = request.Block.ParentHash()
   584  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   585  		)
   586  		// Update the peers total difficulty if better than the previous
   587  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   588  			p.SetHead(trueHead, trueTD)
   589  
   590  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   591  			// a singe block (as the true TD is below the propagated block), however this
   592  			// scenario should easily be covered by the fetcher.
   593  			currentBlock := pm.blockchain.CurrentBlock()
   594  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   595  				go pm.synchronise(p)
   596  			}
   597  		}
   598  
   599  	case msg.Code == TxMsg:
   600  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   601  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   602  			break
   603  		}
   604  		// Transactions can be processed, parse all of them and deliver to the pool
   605  		var txs []*types.Transaction
   606  		if err := msg.Decode(&txs); err != nil {
   607  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   608  		}
   609  		for i, tx := range txs {
   610  			// Validate and mark the remote transaction
   611  			if tx == nil {
   612  				return errResp(ErrDecode, "transaction %d is nil", i)
   613  			}
   614  			p.MarkTransaction(tx.Hash())
   615  		}
   616  		pm.txpool.AddRemotes(txs)
   617  
   618  	default:
   619  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   620  	}
   621  	return nil
   622  }
   623  
   624  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   625  // will only announce it's availability (depending what's requested).
   626  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   627  	hash := block.Hash()
   628  	peers := pm.peers.PeersWithoutBlock(hash)
   629  
   630  	// If propagation is requested, send to a subset of the peer
   631  	if propagate {
   632  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   633  		var td *big.Int
   634  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   635  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   636  		} else {
   637  			log.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   638  			return
   639  		}
   640  		// Send the block to a subset of our peers
   641  		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
   642  		for _, peer := range transfer {
   643  			peer.SendNewBlock(block, td)
   644  		}
   645  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   646  		return
   647  	}
   648  	// Otherwise if the block is indeed in out own chain, announce it
   649  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   650  		for _, peer := range peers {
   651  			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
   652  		}
   653  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   654  	}
   655  }
   656  
   657  // BroadcastTx will propagate a transaction to all peers which are not known to
   658  // already have the given transaction.
   659  func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
   660  	// Broadcast transaction to a batch of peers not knowing about it
   661  	peers := pm.peers.PeersWithoutTx(hash)
   662  	//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   663  	for _, peer := range peers {
   664  		peer.SendTransactions(types.Transactions{tx})
   665  	}
   666  	log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
   667  }
   668  
   669  // Mined broadcast loop
   670  func (self *ProtocolManager) minedBroadcastLoop() {
   671  	// automatically stops if unsubscribe
   672  	for obj := range self.minedBlockSub.Chan() {
   673  		switch ev := obj.Data.(type) {
   674  		case core.NewMinedBlockEvent:
   675  			self.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   676  			self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   677  		}
   678  	}
   679  }
   680  
   681  func (self *ProtocolManager) txBroadcastLoop() {
   682  	for {
   683  		select {
   684  		case event := <-self.txCh:
   685  			self.BroadcastTx(event.Tx.Hash(), event.Tx)
   686  
   687  		// Err() channel will be closed when unsubscribing.
   688  		case <-self.txSub.Err():
   689  			return
   690  		}
   691  	}
   692  }
   693  
   694  // NodeInfo represents a short summary of the AquaChain sub-protocol metadata
   695  // known about the host peer.
   696  type NodeInfo struct {
   697  	Network    uint64              `json:"network"`    // AquaChain network ID
   698  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   699  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   700  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   701  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   702  }
   703  
   704  // NodeInfo retrieves some protocol metadata about the running host node.
   705  func (self *ProtocolManager) NodeInfo() *NodeInfo {
   706  	currentBlock := self.blockchain.CurrentBlock()
   707  	return &NodeInfo{
   708  		Network:    self.networkId,
   709  		Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   710  		Genesis:    self.blockchain.Genesis().Hash(),
   711  		Config:     self.blockchain.Config(),
   712  		Head:       currentBlock.Hash(),
   713  	}
   714  }