github.com/theQRL/go-zond@v0.2.1/zond/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package zond
    18  
    19  import (
    20  	"errors"
    21  	"math"
    22  	"math/big"
    23  	"sync"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"github.com/theQRL/go-zond/common"
    28  	"github.com/theQRL/go-zond/core"
    29  	"github.com/theQRL/go-zond/core/forkid"
    30  	"github.com/theQRL/go-zond/core/rawdb"
    31  	"github.com/theQRL/go-zond/core/txpool"
    32  	"github.com/theQRL/go-zond/core/types"
    33  	"github.com/theQRL/go-zond/crypto"
    34  	"github.com/theQRL/go-zond/event"
    35  	"github.com/theQRL/go-zond/log"
    36  	"github.com/theQRL/go-zond/metrics"
    37  	"github.com/theQRL/go-zond/p2p"
    38  	"github.com/theQRL/go-zond/p2p/enode"
    39  	"github.com/theQRL/go-zond/trie/triedb/pathdb"
    40  	"github.com/theQRL/go-zond/zond/downloader"
    41  	"github.com/theQRL/go-zond/zond/fetcher"
    42  	"github.com/theQRL/go-zond/zond/protocols/snap"
    43  	"github.com/theQRL/go-zond/zond/protocols/zond"
    44  	"github.com/theQRL/go-zond/zonddb"
    45  )
    46  
    47  const (
    48  	// txChanSize is the size of channel listening to NewTxsEvent.
    49  	// The number is referenced from the size of tx pool.
    50  	txChanSize = 4096
    51  
    52  	// txMaxBroadcastSize is the max size of a transaction that will be broadcasted.
    53  	// All transactions with a higher size will be announced and need to be fetched
    54  	// by the peer.
    55  	txMaxBroadcastSize = 4096
    56  )
    57  
    58  var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
    59  
    60  // txPool defines the methods needed from a transaction pool implementation to
    61  // support all the operations needed by the Zond chain protocols.
    62  type txPool interface {
    63  	// Has returns an indicator whether txpool has a transaction
    64  	// cached with the given hash.
    65  	Has(hash common.Hash) bool
    66  
    67  	// Get retrieves the transaction from local txpool with given
    68  	// tx hash.
    69  	Get(hash common.Hash) *types.Transaction
    70  
    71  	// Add should add the given transactions to the pool.
    72  	Add(txs []*types.Transaction, local bool, sync bool) []error
    73  
    74  	// Pending should return pending transactions.
    75  	// The slice should be modifiable by the caller.
    76  	Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction
    77  
    78  	// SubscribeTransactions subscribes to new transaction events. The subscriber
    79  	// can decide whether to receive notifications only for newly seen transactions
    80  	// or also for reorged out ones.
    81  	SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription
    82  }
    83  
    84  // handlerConfig is the collection of initialization parameters to create a full
    85  // node network handler.
    86  type handlerConfig struct {
    87  	NodeID         enode.ID               // P2P node ID used for tx propagation topology
    88  	Database       zonddb.Database        // Database for direct sync insertions
    89  	Chain          *core.BlockChain       // Blockchain to serve data from
    90  	TxPool         txPool                 // Transaction pool to propagate from
    91  	Network        uint64                 // Network identifier to advertise
    92  	Sync           downloader.SyncMode    // Whether to snap or full sync
    93  	BloomCache     uint64                 // Megabytes to alloc for snap sync bloom
    94  	EventMux       *event.TypeMux         // Legacy event mux, deprecate for `feed`
    95  	RequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
    96  }
    97  
    98  type handler struct {
    99  	nodeID     enode.ID
   100  	networkID  uint64
   101  	forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
   102  
   103  	snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks)
   104  	synced   atomic.Bool // Flag whether we're considered synchronised (enables transaction processing)
   105  
   106  	database zonddb.Database
   107  	txpool   txPool
   108  	chain    *core.BlockChain
   109  	maxPeers int
   110  
   111  	downloader *downloader.Downloader
   112  	txFetcher  *fetcher.TxFetcher
   113  	peers      *peerSet
   114  
   115  	eventMux *event.TypeMux
   116  	txsCh    chan core.NewTxsEvent
   117  	txsSub   event.Subscription
   118  
   119  	requiredBlocks map[uint64]common.Hash
   120  
   121  	// channels for fetcher, syncer, txsyncLoop
   122  	quitSync chan struct{}
   123  
   124  	wg sync.WaitGroup
   125  
   126  	handlerStartCh chan struct{}
   127  	handlerDoneCh  chan struct{}
   128  }
   129  
   130  // newHandler returns a handler for all Zond chain management protocol.
   131  func newHandler(config *handlerConfig) (*handler, error) {
   132  	// Create the protocol manager with the base fields
   133  	if config.EventMux == nil {
   134  		config.EventMux = new(event.TypeMux) // Nicety initialization for tests
   135  	}
   136  	h := &handler{
   137  		nodeID:         config.NodeID,
   138  		networkID:      config.Network,
   139  		forkFilter:     forkid.NewFilter(config.Chain),
   140  		eventMux:       config.EventMux,
   141  		database:       config.Database,
   142  		txpool:         config.TxPool,
   143  		chain:          config.Chain,
   144  		peers:          newPeerSet(),
   145  		requiredBlocks: config.RequiredBlocks,
   146  		quitSync:       make(chan struct{}),
   147  		handlerDoneCh:  make(chan struct{}),
   148  		handlerStartCh: make(chan struct{}),
   149  	}
   150  	if config.Sync == downloader.FullSync {
   151  		// The database seems empty as the current block is the genesis. Yet the snap
   152  		// block is ahead, so snap sync was enabled for this node at a certain point.
   153  		// The scenarios where this can happen is
   154  		// * if the user manually (or via a bad block) rolled back a snap sync node
   155  		//   below the sync point.
   156  		// * the last snap sync is not finished while user specifies a full sync this
   157  		//   time. But we don't have any recent state for full sync.
   158  		// In these cases however it's safe to reenable snap sync.
   159  		fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
   160  		if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
   161  			h.snapSync.Store(true)
   162  			log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete")
   163  		} else if !h.chain.HasState(fullBlock.Root) {
   164  			h.snapSync.Store(true)
   165  			log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
   166  		}
   167  	} else {
   168  		head := h.chain.CurrentBlock()
   169  		if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
   170  			// Print warning log if database is not empty to run snap sync.
   171  			log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
   172  		} else {
   173  			// If snap sync was requested and our database is empty, grant it
   174  			h.snapSync.Store(true)
   175  			log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
   176  		}
   177  	}
   178  	// If snap sync is requested but snapshots are disabled, fail loudly
   179  	if h.snapSync.Load() && config.Chain.Snapshots() == nil {
   180  		return nil, errors.New("snap sync not supported with snapshots disabled")
   181  	}
   182  	// Construct the downloader (long sync)
   183  	h.downloader = downloader.New(config.Database, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures)
   184  
   185  	fetchTx := func(peer string, hashes []common.Hash) error {
   186  		p := h.peers.peer(peer)
   187  		if p == nil {
   188  			return errors.New("unknown peer")
   189  		}
   190  		return p.RequestTxs(hashes)
   191  	}
   192  	addTxs := func(txs []*types.Transaction) []error {
   193  		return h.txpool.Add(txs, false, false)
   194  	}
   195  	h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
   196  	return h, nil
   197  }
   198  
   199  // protoTracker tracks the number of active protocol handlers.
   200  func (h *handler) protoTracker() {
   201  	defer h.wg.Done()
   202  	var active int
   203  	for {
   204  		select {
   205  		case <-h.handlerStartCh:
   206  			active++
   207  		case <-h.handlerDoneCh:
   208  			active--
   209  		case <-h.quitSync:
   210  			// Wait for all active handlers to finish.
   211  			for ; active > 0; active-- {
   212  				<-h.handlerDoneCh
   213  			}
   214  			return
   215  		}
   216  	}
   217  }
   218  
   219  // incHandlers signals to increment the number of active handlers if not
   220  // quitting.
   221  func (h *handler) incHandlers() bool {
   222  	select {
   223  	case h.handlerStartCh <- struct{}{}:
   224  		return true
   225  	case <-h.quitSync:
   226  		return false
   227  	}
   228  }
   229  
   230  // decHandlers signals to decrement the number of active handlers.
   231  func (h *handler) decHandlers() {
   232  	h.handlerDoneCh <- struct{}{}
   233  }
   234  
   235  // runZondPeer registers a zond peer into the joint zond/snap peerset, adds it to
   236  // various subsystems and starts handling messages.
   237  func (h *handler) runZondPeer(peer *zond.Peer, handler zond.Handler) error {
   238  	if !h.incHandlers() {
   239  		return p2p.DiscQuitting
   240  	}
   241  	defer h.decHandlers()
   242  
   243  	// If the peer has a `snap` extension, wait for it to connect so we can have
   244  	// a uniform initialization/teardown mechanism
   245  	snap, err := h.peers.waitSnapExtension(peer)
   246  	if err != nil {
   247  		peer.Log().Error("Snapshot extension barrier failed", "err", err)
   248  		return err
   249  	}
   250  
   251  	// Execute the Zond handshake
   252  	var (
   253  		genesis = h.chain.Genesis()
   254  		head    = h.chain.CurrentHeader()
   255  		hash    = head.Hash()
   256  		number  = head.Number.Uint64()
   257  	)
   258  	forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time)
   259  	if err := peer.Handshake(h.networkID, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
   260  		peer.Log().Debug("Zond handshake failed", "err", err)
   261  		return err
   262  	}
   263  	reject := false // reserved peer slots
   264  	if h.snapSync.Load() {
   265  		if snap == nil {
   266  			// If we are running snap-sync, we want to reserve roughly half the peer
   267  			// slots for peers supporting the snap protocol.
   268  			// The logic here is; we only allow up to 5 more non-snap peers than snap-peers.
   269  			if all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 {
   270  				reject = true
   271  			}
   272  		}
   273  	}
   274  	// Ignore maxPeers if this is a trusted peer
   275  	if !peer.Peer.Info().Network.Trusted {
   276  		if reject || h.peers.len() >= h.maxPeers {
   277  			return p2p.DiscTooManyPeers
   278  		}
   279  	}
   280  	peer.Log().Debug("Zond peer connected", "name", peer.Name())
   281  
   282  	// Register the peer locally
   283  	if err := h.peers.registerPeer(peer, snap); err != nil {
   284  		peer.Log().Error("Zond peer registration failed", "err", err)
   285  		return err
   286  	}
   287  	defer h.unregisterPeer(peer.ID())
   288  
   289  	p := h.peers.peer(peer.ID())
   290  	if p == nil {
   291  		return errors.New("peer dropped during handling")
   292  	}
   293  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   294  	if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil {
   295  		peer.Log().Error("Failed to register peer in zond syncer", "err", err)
   296  		return err
   297  	}
   298  	if snap != nil {
   299  		if err := h.downloader.SnapSyncer.Register(snap); err != nil {
   300  			peer.Log().Error("Failed to register peer in snap syncer", "err", err)
   301  			return err
   302  		}
   303  	}
   304  
   305  	// Propagate existing transactions. new transactions appearing
   306  	// after this will be sent via broadcasts.
   307  	h.syncTransactions(peer)
   308  
   309  	// Create a notification channel for pending requests if the peer goes down
   310  	dead := make(chan struct{})
   311  	defer close(dead)
   312  
   313  	// If we have any explicit peer required block hashes, request them
   314  	for number, hash := range h.requiredBlocks {
   315  		resCh := make(chan *zond.Response)
   316  
   317  		req, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh)
   318  		if err != nil {
   319  			return err
   320  		}
   321  		go func(number uint64, hash common.Hash, req *zond.Request) {
   322  			// Ensure the request gets cancelled in case of error/drop
   323  			defer req.Close()
   324  
   325  			timeout := time.NewTimer(syncChallengeTimeout)
   326  			defer timeout.Stop()
   327  
   328  			select {
   329  			case res := <-resCh:
   330  				headers := ([]*types.Header)(*res.Res.(*zond.BlockHeadersRequest))
   331  				if len(headers) == 0 {
   332  					// Required blocks are allowed to be missing if the remote
   333  					// node is not yet synced
   334  					res.Done <- nil
   335  					return
   336  				}
   337  				// Validate the header and either drop the peer or continue
   338  				if len(headers) > 1 {
   339  					res.Done <- errors.New("too many headers in required block response")
   340  					return
   341  				}
   342  				if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
   343  					peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
   344  					res.Done <- errors.New("required block mismatch")
   345  					return
   346  				}
   347  				peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
   348  				res.Done <- nil
   349  			case <-timeout.C:
   350  				peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
   351  				h.removePeer(peer.ID())
   352  			}
   353  		}(number, hash, req)
   354  	}
   355  	// Handle incoming messages until the connection is torn down
   356  	return handler(peer)
   357  }
   358  
   359  // runSnapExtension registers a `snap` peer into the joint zond/snap peerset and
   360  // starts handling inbound messages. As `snap` is only a satellite protocol to
   361  // `zond`, all subsystem registrations and lifecycle management will be done by
   362  // the main `zond` handler to prevent strange races.
   363  func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error {
   364  	if !h.incHandlers() {
   365  		return p2p.DiscQuitting
   366  	}
   367  	defer h.decHandlers()
   368  
   369  	if err := h.peers.registerSnapExtension(peer); err != nil {
   370  		if metrics.Enabled {
   371  			if peer.Inbound() {
   372  				snap.IngressRegistrationErrorMeter.Mark(1)
   373  			} else {
   374  				snap.EgressRegistrationErrorMeter.Mark(1)
   375  			}
   376  		}
   377  		peer.Log().Debug("Snapshot extension registration failed", "err", err)
   378  		return err
   379  	}
   380  	return handler(peer)
   381  }
   382  
   383  // removePeer requests disconnection of a peer.
   384  func (h *handler) removePeer(id string) {
   385  	peer := h.peers.peer(id)
   386  	if peer != nil {
   387  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   388  	}
   389  }
   390  
   391  // unregisterPeer removes a peer from the downloader, fetchers and main peer set.
   392  func (h *handler) unregisterPeer(id string) {
   393  	// Create a custom logger to avoid printing the entire id
   394  	var logger log.Logger
   395  	if len(id) < 16 {
   396  		// Tests use short IDs, don't choke on them
   397  		logger = log.New("peer", id)
   398  	} else {
   399  		logger = log.New("peer", id[:8])
   400  	}
   401  	// Abort if the peer does not exist
   402  	peer := h.peers.peer(id)
   403  	if peer == nil {
   404  		logger.Error("Zond peer removal failed", "err", errPeerNotRegistered)
   405  		return
   406  	}
   407  	// Remove the `zond` peer if it exists
   408  	logger.Debug("Removing Zond peer", "snap", peer.snapExt != nil)
   409  
   410  	// Remove the `snap` extension if it exists
   411  	if peer.snapExt != nil {
   412  		h.downloader.SnapSyncer.Unregister(id)
   413  	}
   414  	h.downloader.UnregisterPeer(id)
   415  	h.txFetcher.Drop(id)
   416  
   417  	if err := h.peers.unregisterPeer(id); err != nil {
   418  		logger.Error("Zond peer removal failed", "err", err)
   419  	}
   420  }
   421  
   422  func (h *handler) Start(maxPeers int) {
   423  	h.maxPeers = maxPeers
   424  
   425  	// broadcast and announce transactions (only new ones, not resurrected ones)
   426  	h.wg.Add(1)
   427  	h.txsCh = make(chan core.NewTxsEvent, txChanSize)
   428  	h.txsSub = h.txpool.SubscribeTransactions(h.txsCh)
   429  	go h.txBroadcastLoop()
   430  
   431  	// start sync handlers
   432  	h.txFetcher.Start()
   433  
   434  	// start peer handler tracker
   435  	h.wg.Add(1)
   436  	go h.protoTracker()
   437  }
   438  
   439  func (h *handler) Stop() {
   440  	h.txsSub.Unsubscribe() // quits txBroadcastLoop
   441  	h.txFetcher.Stop()
   442  	h.downloader.Terminate()
   443  
   444  	// Quit chainSync and txsync64.
   445  	// After this is done, no new peers will be accepted.
   446  	close(h.quitSync)
   447  
   448  	// Disconnect existing sessions.
   449  	// This also closes the gate for any new registrations on the peer set.
   450  	// sessions which are already established but not added to h.peers yet
   451  	// will exit when they try to register.
   452  	h.peers.close()
   453  	h.wg.Wait()
   454  
   455  	log.Info("Zond protocol stopped")
   456  }
   457  
   458  // BroadcastTransactions will propagate a batch of transactions
   459  // - To a square root of all peers
   460  // - And, separately, as announcements to all peers which are not known to
   461  // already have the given transaction.
   462  func (h *handler) BroadcastTransactions(txs types.Transactions) {
   463  	var (
   464  		largeTxs int // Number of large transactions to announce only
   465  
   466  		directCount int // Number of transactions sent directly to peers (duplicates included)
   467  		annCount    int // Number of transactions announced across all peers (duplicates included)
   468  
   469  		txset = make(map[*zondPeer][]common.Hash) // Set peer->hash to transfer directly
   470  		annos = make(map[*zondPeer][]common.Hash) // Set peer->hash to announce
   471  
   472  	)
   473  	// Broadcast transactions to a batch of peers not knowing about it
   474  	direct := big.NewInt(int64(math.Sqrt(float64(h.peers.len())))) // Approximate number of peers to broadcast to
   475  	if direct.BitLen() == 0 {
   476  		direct = big.NewInt(1)
   477  	}
   478  	total := new(big.Int).Exp(direct, big.NewInt(2), nil) // Stabilise total peer count a bit based on sqrt peers
   479  
   480  	var (
   481  		signer = types.LatestSignerForChainID(h.chain.Config().ChainID) // Don't care about chain status, we just need *a* sender
   482  		hasher = crypto.NewKeccakState()
   483  		hash   = make([]byte, 32)
   484  	)
   485  	for _, tx := range txs {
   486  		var maybeDirect bool
   487  		switch {
   488  		case tx.Size() > txMaxBroadcastSize:
   489  			largeTxs++
   490  		default:
   491  			maybeDirect = true
   492  		}
   493  		// Send the transaction (if it's small enough) directly to a subset of
   494  		// the peers that have not received it yet, ensuring that the flow of
   495  		// transactions is grouped by account to (try and) avoid nonce gaps.
   496  		//
   497  		// To do this, we hash the local enode IW with together with a peer's
   498  		// enode ID together with the transaction sender and broadcast if
   499  		// `sha(self, peer, sender) mod peers < sqrt(peers)`.
   500  		for _, peer := range h.peers.peersWithoutTransaction(tx.Hash()) {
   501  			var broadcast bool
   502  			if maybeDirect {
   503  				hasher.Reset()
   504  				hasher.Write(h.nodeID.Bytes())
   505  				hasher.Write(peer.Node().ID().Bytes())
   506  
   507  				from, _ := types.Sender(signer, tx) // Ignore error, we only use the addr as a propagation target splitter
   508  				hasher.Write(from.Bytes())
   509  
   510  				hasher.Read(hash)
   511  				if new(big.Int).Mod(new(big.Int).SetBytes(hash), total).Cmp(direct) < 0 {
   512  					broadcast = true
   513  				}
   514  			}
   515  			if broadcast {
   516  				txset[peer] = append(txset[peer], tx.Hash())
   517  			} else {
   518  				annos[peer] = append(annos[peer], tx.Hash())
   519  			}
   520  		}
   521  	}
   522  	for peer, hashes := range txset {
   523  		directCount += len(hashes)
   524  		peer.AsyncSendTransactions(hashes)
   525  	}
   526  	for peer, hashes := range annos {
   527  		annCount += len(hashes)
   528  		peer.AsyncSendPooledTransactionHashes(hashes)
   529  	}
   530  	log.Debug("Distributed transactions", "plaintxs", len(txs)-largeTxs, "largetxs", largeTxs,
   531  		"bcastpeers", len(txset), "bcastcount", directCount, "annpeers", len(annos), "anncount", annCount)
   532  }
   533  
   534  // txBroadcastLoop announces new transactions to connected peers.
   535  func (h *handler) txBroadcastLoop() {
   536  	defer h.wg.Done()
   537  	for {
   538  		select {
   539  		case event := <-h.txsCh:
   540  			h.BroadcastTransactions(event.Txs)
   541  		case <-h.txsSub.Err():
   542  			return
   543  		}
   544  	}
   545  }
   546  
   547  // enableSyncedFeatures enables the post-sync functionalities when the initial
   548  // sync is finished.
   549  func (h *handler) enableSyncedFeatures() {
   550  	// Mark the local node as synced.
   551  	h.synced.Store(true)
   552  
   553  	// If we were running snap sync and it finished, disable doing another
   554  	// round on next sync cycle
   555  	if h.snapSync.Load() {
   556  		log.Info("Snap sync complete, auto disabling")
   557  		h.snapSync.Store(false)
   558  	}
   559  	if h.chain.TrieDB().Scheme() == rawdb.PathScheme {
   560  		h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize)
   561  	}
   562  }