github.com/dominant-strategies/go-quai@v0.28.2/eth/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"errors"
    21  	"math"
    22  	"math/big"
    23  	"math/rand"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/dominant-strategies/go-quai/common"
    29  	"github.com/dominant-strategies/go-quai/core"
    30  	"github.com/dominant-strategies/go-quai/core/forkid"
    31  	"github.com/dominant-strategies/go-quai/core/types"
    32  	"github.com/dominant-strategies/go-quai/eth/downloader"
    33  	"github.com/dominant-strategies/go-quai/eth/fetcher"
    34  	"github.com/dominant-strategies/go-quai/eth/protocols/eth"
    35  	"github.com/dominant-strategies/go-quai/ethdb"
    36  	"github.com/dominant-strategies/go-quai/event"
    37  	"github.com/dominant-strategies/go-quai/log"
    38  	"github.com/dominant-strategies/go-quai/p2p"
    39  	lru "github.com/hashicorp/golang-lru"
    40  )
    41  
    42  const (
    43  	// txChanSize is the size of channel listening to NewTxsEvent.
    44  	// The number is referenced from the size of tx pool.
    45  	txChanSize = 4096
    46  
    47  	// missingBlockChanSize is the size of channel listening to the MissingBlockEvent
    48  	missingBlockChanSize = 60
    49  
    50  	// minPeerSend is the threshold for sending the block updates. If
    51  	// sqrt of len(peers) is less than 5 we make the block announcement
    52  	// to as much as minPeerSend peers otherwise send it to sqrt of len(peers).
    53  	minPeerSend = 5
    54  
    55  	// minPeerRequest is the threshold for requesting the body. If
    56  	// sqrt of len(peers) is less than minPeerRequest we make the body request
    57  	// to as much as minPeerSend peers otherwise send it to sqrt of len(peers).
    58  	minPeerRequest = 3
    59  
    60  	// minPeerSendTx is the minimum number of peers that will receive a new transaction.
    61  	minPeerSendTx = 2
    62  
    63  	// c_broadcastCacheSize is the Max number of broadcast block hashes to be kept for Logging
    64  	c_broadcastCacheSize = 10
    65  
    66  	// c_subSyncCacheSize is the Max number of block hashes requested from peers
    67  	c_subSyncCacheSize = 100000
    68  )
    69  
    70  // txPool defines the methods needed from a transaction pool implementation to
    71  // support all the operations needed by the Quai chain protocols.
    72  type txPool interface {
    73  	// Has returns an indicator whether txpool has a transaction
    74  	// cached with the given hash.
    75  	Has(hash common.Hash) bool
    76  
    77  	// Get retrieves the transaction from local txpool with given
    78  	// tx hash.
    79  	Get(hash common.Hash) *types.Transaction
    80  
    81  	// AddRemotes should add the given transactions to the pool.
    82  	AddRemotes([]*types.Transaction) []error
    83  
    84  	// Pending should return pending transactions.
    85  	// The slice should be modifiable by the caller.
    86  	TxPoolPending(enforceTips bool, etxSet types.EtxSet) (map[common.AddressBytes]types.Transactions, error)
    87  
    88  	// SubscribeNewTxsEvent should return an event subscription of
    89  	// NewTxsEvent and send events to the given channel.
    90  	SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
    91  }
    92  
    93  // handlerConfig is the collection of initialization parameters to create a full
    94  // node network handler.
    95  type handlerConfig struct {
    96  	Database      ethdb.Database         // Database for direct sync insertions
    97  	Core          *core.Core             // Core to serve data from
    98  	TxPool        txPool                 // Transaction pool to propagate from
    99  	Network       uint64                 // Network identifier to adfvertise
   100  	Sync          downloader.SyncMode    // Whether to fast or full sync
   101  	BloomCache    uint64                 // Megabytes to alloc for fast sync bloom
   102  	EventMux      *event.TypeMux         // Legacy event mux, deprecate for `feed`
   103  	Whitelist     map[uint64]common.Hash // Hard coded whitelist for sync challenged
   104  	SlicesRunning []common.Location      // Slices run by the node
   105  }
   106  
   107  type handler struct {
   108  	networkID     uint64
   109  	forkFilter    forkid.Filter     // Fork ID filter, constant across the lifetime of the node
   110  	slicesRunning []common.Location // Slices running on the node
   111  
   112  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
   113  
   114  	database ethdb.Database
   115  	txpool   txPool
   116  	core     *core.Core
   117  	maxPeers int
   118  
   119  	downloader   *downloader.Downloader
   120  	blockFetcher *fetcher.BlockFetcher
   121  	txFetcher    *fetcher.TxFetcher
   122  	peers        *peerSet
   123  
   124  	eventMux        *event.TypeMux
   125  	txsCh           chan core.NewTxsEvent
   126  	txsSub          event.Subscription
   127  	minedBlockSub   *event.TypeMuxSubscription
   128  	missingBlockCh  chan types.BlockRequest
   129  	missingBlockSub event.Subscription
   130  	subSyncQueue    *lru.Cache
   131  
   132  	whitelist map[uint64]common.Hash
   133  
   134  	// channels for fetcher, syncer, txsyncLoop
   135  	txsyncCh chan *txsync
   136  	quitSync chan struct{}
   137  
   138  	chainSync *chainSyncer
   139  	wg        sync.WaitGroup
   140  	peerWG    sync.WaitGroup
   141  
   142  	broadcastCache *lru.Cache
   143  }
   144  
   145  // newHandler returns a handler for all Quai chain management protocol.
   146  func newHandler(config *handlerConfig) (*handler, error) {
   147  	nodeCtx := common.NodeLocation.Context()
   148  	// Create the protocol manager with the base fields
   149  	if config.EventMux == nil {
   150  		config.EventMux = new(event.TypeMux) // Nicety initialization for tests
   151  	}
   152  
   153  	h := &handler{
   154  		networkID:     config.Network,
   155  		slicesRunning: config.SlicesRunning,
   156  		forkFilter:    forkid.NewFilter(config.Core),
   157  		eventMux:      config.EventMux,
   158  		database:      config.Database,
   159  		txpool:        config.TxPool,
   160  		core:          config.Core,
   161  		peers:         newPeerSet(),
   162  		whitelist:     config.Whitelist,
   163  		txsyncCh:      make(chan *txsync),
   164  		quitSync:      make(chan struct{}),
   165  	}
   166  
   167  	broadcastCache, _ := lru.New(c_broadcastCacheSize)
   168  	h.broadcastCache = broadcastCache
   169  
   170  	subSyncQueue, _ := lru.New(c_subSyncCacheSize)
   171  	h.subSyncQueue = subSyncQueue
   172  
   173  	h.downloader = downloader.New(h.eventMux, h.core, h.removePeer)
   174  
   175  	// Construct the fetcher (short sync)
   176  	validator := func(header *types.Header) error {
   177  		return h.core.Engine().VerifyHeader(h.core, header)
   178  	}
   179  	verifySeal := func(header *types.Header) (common.Hash, error) {
   180  		return h.core.Engine().VerifySeal(header)
   181  	}
   182  	heighter := func() uint64 {
   183  		return h.core.CurrentHeader().NumberU64()
   184  	}
   185  	currentThresholdS := func() *big.Int {
   186  		return h.core.Engine().IntrinsicLogS(h.core.CurrentHeader().Hash())
   187  	}
   188  	currentS := func() *big.Int {
   189  		// This is the sync target entropy which updates based on the block broadcasts
   190  		entropy, _ := h.core.SyncTargetEntropy()
   191  		return entropy
   192  	}
   193  	currentDifficulty := func() *big.Int {
   194  		return h.core.CurrentHeader().Difficulty()
   195  	}
   196  	// writeBlock writes the block to the DB
   197  	writeBlock := func(block *types.Block) {
   198  		if nodeCtx == common.ZONE_CTX && block.NumberU64()-1 == h.core.CurrentHeader().NumberU64() && h.core.ProcessingState() {
   199  			if atomic.LoadUint32(&h.acceptTxs) != 1 {
   200  				atomic.StoreUint32(&h.acceptTxs, 1)
   201  			}
   202  		}
   203  		h.core.WriteBlock(block)
   204  	}
   205  	h.blockFetcher = fetcher.NewBlockFetcher(h.core.GetBlockOrCandidateByHash, writeBlock, validator, verifySeal, h.BroadcastBlock, heighter, currentThresholdS, currentS, currentDifficulty, h.removePeer, h.core.IsBlockHashABadHash)
   206  
   207  	// Only initialize the Tx fetcher in zone
   208  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   209  		fetchTx := func(peer string, hashes []common.Hash) error {
   210  			p := h.peers.peer(peer)
   211  			if p == nil {
   212  				return errors.New("unknown peer")
   213  			}
   214  			return p.RequestTxs(hashes)
   215  		}
   216  		h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx)
   217  	}
   218  	h.chainSync = newChainSyncer(h)
   219  	return h, nil
   220  }
   221  
   222  // runEthPeer registers an eth peer into the joint eth peerset, adds it to
   223  // various subsystems and starts handling messages.
   224  func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
   225  	nodeCtx := common.NodeLocation.Context()
   226  	if !h.chainSync.handlePeerEvent(peer) {
   227  		return p2p.DiscQuitting
   228  	}
   229  	h.peerWG.Add(1)
   230  	defer h.peerWG.Done()
   231  
   232  	// Execute the Quai handshake
   233  	var (
   234  		genesis = h.core.Genesis()
   235  		head    = h.core.CurrentHeader()
   236  		hash    = head.Hash()
   237  		entropy = h.core.CurrentLogEntropy()
   238  	)
   239  	forkID := forkid.NewID(h.core.Config(), h.core.Genesis().Hash(), h.core.CurrentHeader().Number().Uint64())
   240  	if err := peer.Handshake(h.networkID, h.slicesRunning, entropy, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
   241  		peer.Log().Debug("Quai handshake failed", "err", err)
   242  		return err
   243  	}
   244  	reject := false // reserved peer slots
   245  	// Ignore maxPeers if this is a trusted peer
   246  	if !peer.Peer.Info().Network.Trusted {
   247  		if reject || h.peers.len() >= h.maxPeers {
   248  			return p2p.DiscTooManyPeers
   249  		}
   250  	}
   251  	peer.Log().Debug("Quai peer connected", "name", peer.Name())
   252  
   253  	// Register the peer locally
   254  	if err := h.peers.registerPeer(peer); err != nil {
   255  		peer.Log().Error("Quai peer registration failed", "err", err)
   256  		return err
   257  	}
   258  	defer h.unregisterPeer(peer.ID())
   259  
   260  	p := h.peers.peer(peer.ID())
   261  	if p == nil {
   262  		return errors.New("peer dropped during handling")
   263  	}
   264  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   265  	if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil {
   266  		peer.Log().Error("Failed to register peer in eth syncer", "err", err)
   267  		return err
   268  	}
   269  
   270  	h.chainSync.handlePeerEvent(peer)
   271  
   272  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   273  		// Propagate existing transactions. new transactions appearing
   274  		// after this will be sent via broadcasts.
   275  		h.syncTransactions(peer)
   276  	}
   277  
   278  	// If we have any explicit whitelist block hashes, request them
   279  	for number := range h.whitelist {
   280  		if err := peer.RequestHeadersByNumber(number, 1, 1, 0, false, false); err != nil {
   281  			return err
   282  		}
   283  	}
   284  	// Handle incoming messages until the connection is torn down
   285  	return handler(peer)
   286  }
   287  
   288  // removePeer requests disconnection of a peer.
   289  func (h *handler) removePeer(id string) {
   290  	peer := h.peers.peer(id)
   291  	if peer != nil {
   292  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   293  	}
   294  }
   295  
   296  // unregisterPeer removes a peer from the downloader, fetchers and main peer set.
   297  func (h *handler) unregisterPeer(id string) {
   298  	// Create a custom logger to avoid printing the entire id
   299  	var logger log.Logger
   300  	if len(id) < 16 {
   301  		// Tests use short IDs, don't choke on them
   302  		logger = log.Log
   303  	} else {
   304  		logger = log.Log
   305  	}
   306  	// Abort if the peer does not exist
   307  	peer := h.peers.peer(id)
   308  	if peer == nil {
   309  		logger.Error("Quai peer removal failed", "err", errPeerNotRegistered)
   310  		return
   311  	}
   312  
   313  	h.downloader.UnregisterPeer(id)
   314  	nodeCtx := common.NodeLocation.Context()
   315  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   316  		h.txFetcher.Drop(id)
   317  	}
   318  
   319  	if err := h.peers.unregisterPeer(id); err != nil {
   320  		logger.Error("Quai peer removal failed", "err", err)
   321  	}
   322  }
   323  
   324  func (h *handler) Start(maxPeers int) {
   325  	h.maxPeers = maxPeers
   326  
   327  	nodeCtx := common.NodeLocation.Context()
   328  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   329  		// broadcast transactions
   330  		h.wg.Add(1)
   331  		h.txsCh = make(chan core.NewTxsEvent, txChanSize)
   332  		h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh)
   333  		go h.txBroadcastLoop()
   334  	}
   335  
   336  	h.wg.Add(1)
   337  	h.missingBlockCh = make(chan types.BlockRequest, missingBlockChanSize)
   338  	h.missingBlockSub = h.core.SubscribeMissingBlockEvent(h.missingBlockCh)
   339  	go h.missingBlockLoop()
   340  
   341  	// broadcast mined blocks
   342  	h.wg.Add(1)
   343  	h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{})
   344  	go h.minedBroadcastLoop()
   345  
   346  	// start sync handlers
   347  	h.wg.Add(1)
   348  	go h.chainSync.loop()
   349  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   350  		h.wg.Add(1)
   351  		go h.txsyncLoop64() //Legacy initial tx echange, drop with eth/64.
   352  	}
   353  }
   354  
   355  func (h *handler) Stop() {
   356  	nodeCtx := common.NodeLocation.Context()
   357  	if nodeCtx == common.ZONE_CTX && h.core.ProcessingState() {
   358  		h.txsSub.Unsubscribe() // quits txBroadcastLoop
   359  	}
   360  	h.minedBlockSub.Unsubscribe()   // quits blockBroadcastLoop
   361  	h.missingBlockSub.Unsubscribe() // quits missingBlockLoop
   362  
   363  	// Quit chainSync and txsync64.
   364  	// After this is done, no new peers will be accepted.
   365  	close(h.quitSync)
   366  	h.wg.Wait()
   367  
   368  	// Disconnect existing sessions.
   369  	// This also closes the gate for any new registrations on the peer set.
   370  	// sessions which are already established but not added to h.peers yet
   371  	// will exit when they try to register.
   372  	h.peers.close()
   373  	h.peerWG.Wait()
   374  
   375  	log.Info("Quai protocol stopped")
   376  }
   377  
   378  // BroadcastBlock will either propagate a block to a subset of its peers, or
   379  // will only announce its availability (depending what's requested).
   380  func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
   381  	hash := block.Hash()
   382  	peers := h.peers.peersWithoutBlock(hash)
   383  
   384  	// If propagation is requested, send to a subset of the peer
   385  	if propagate {
   386  		// Send the block to a subset of our peers
   387  		var peerThreshold int
   388  		sqrtNumPeers := int(math.Sqrt(float64(len(peers))))
   389  		if sqrtNumPeers < minPeerSend {
   390  			peerThreshold = len(peers)
   391  		} else {
   392  			peerThreshold = sqrtNumPeers
   393  		}
   394  		transfer := peers[:peerThreshold]
   395  		for _, peer := range transfer {
   396  			currentHead := h.core.CurrentHeader()
   397  			entropy := big.NewInt(0)
   398  			if currentHead != nil {
   399  				entropy = h.core.Engine().TotalLogS(h.core.CurrentHeader())
   400  			}
   401  			peer.AsyncSendNewBlock(block, entropy)
   402  		}
   403  		log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   404  		return
   405  	}
   406  	// Otherwise if the block is indeed in out own chain, announce it
   407  	if h.core.HasBlock(hash, block.NumberU64()) {
   408  		for _, peer := range peers {
   409  			peer.AsyncSendNewBlockHash(block)
   410  		}
   411  		log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   412  	}
   413  }
   414  
   415  // BroadcastTransactions will propagate a batch of transactions
   416  // - To a square root of all peers
   417  // - And, separately, as announcements to all peers which are not known to
   418  // already have the given transaction.
   419  func (h *handler) BroadcastTransactions(txs types.Transactions) {
   420  	var (
   421  		annoCount   int // Count of announcements made
   422  		annoPeers   int
   423  		directCount int // Count of the txs sent directly to peers
   424  		directPeers int // Count of the peers that were sent transactions directly
   425  
   426  		txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
   427  		annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
   428  
   429  	)
   430  	// Broadcast transactions to a batch of peers not knowing about it
   431  	for _, tx := range txs {
   432  		peers := h.peers.peersWithoutTransaction(tx.Hash())
   433  		// Send the tx unconditionally to a subset of our peers
   434  		numDirect := int(math.Sqrt(float64(len(peers))))
   435  		subset := peers[:numDirect]
   436  		if len(subset) < minPeerSendTx {
   437  			// If we have less peers than the minimum, send to all peers
   438  			if len(peers) < minPeerSendTx {
   439  				subset = peers
   440  			} else {
   441  				// If our subset is less than the minimum, send to the minimum
   442  				subset = peers[:minPeerSendTx] // The high bound is exclusive
   443  			}
   444  		}
   445  		for _, peer := range subset {
   446  			txset[peer] = append(txset[peer], tx.Hash())
   447  		}
   448  		// For the remaining peers, send announcement only
   449  		for _, peer := range peers[numDirect:] {
   450  			annos[peer] = append(annos[peer], tx.Hash())
   451  		}
   452  	}
   453  	for peer, hashes := range txset {
   454  		directPeers++
   455  		directCount += len(hashes)
   456  		peer.AsyncSendTransactions(hashes)
   457  	}
   458  	for peer, hashes := range annos {
   459  		annoPeers++
   460  		annoCount += len(hashes)
   461  		peer.AsyncSendPooledTransactionHashes(hashes)
   462  	}
   463  	log.Debug("Transaction broadcast", "txs", len(txs),
   464  		"announce packs", annoPeers, "announced hashes", annoCount,
   465  		"tx packs", directPeers, "broadcast txs", directCount)
   466  }
   467  
   468  // minedBroadcastLoop sends mined blocks to connected peers.
   469  func (h *handler) minedBroadcastLoop() {
   470  	defer h.wg.Done()
   471  
   472  	for obj := range h.minedBlockSub.Chan() {
   473  		if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {
   474  			h.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   475  			h.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   476  		}
   477  	}
   478  }
   479  
   480  // txBroadcastLoop announces new transactions to connected peers.
   481  func (h *handler) txBroadcastLoop() {
   482  	defer h.wg.Done()
   483  	for {
   484  		select {
   485  		case event := <-h.txsCh:
   486  			h.BroadcastTransactions(event.Txs)
   487  		case <-h.txsSub.Err():
   488  			return
   489  		}
   490  	}
   491  }
   492  
   493  // missingBlockLoop announces new pendingEtxs to connected peers.
   494  func (h *handler) missingBlockLoop() {
   495  	defer h.wg.Done()
   496  	for {
   497  		select {
   498  		case blockRequest := <-h.missingBlockCh:
   499  			headerRequested := 0
   500  			// Check if any of the peers have the body
   501  			allPeers := h.peers.allPeers()
   502  			// shuffle the filteredPeers
   503  			rand.Shuffle(len(allPeers), func(i, j int) { allPeers[i], allPeers[j] = allPeers[j], allPeers[i] })
   504  
   505  			for _, peer := range allPeers {
   506  				log.Trace("Fetching the missing parent from", "peer", peer.ID(), "hash", blockRequest.Hash)
   507  				_, _, peerEntropy, _ := peer.Head()
   508  				if peerEntropy != nil {
   509  					if peerEntropy.Cmp(blockRequest.Entropy) > 0 {
   510  						peer.RequestBlockByHash(blockRequest.Hash)
   511  						headerRequested++
   512  					}
   513  				}
   514  				if headerRequested == minPeerRequest {
   515  					break
   516  				}
   517  			}
   518  
   519  			h.subSyncQueue.ContainsOrAdd(blockRequest.Hash, blockRequest)
   520  
   521  		case <-h.missingBlockSub.Err():
   522  			return
   523  		}
   524  	}
   525  }