git.pirl.io/community/pirl@v0.0.0-20201111064343-9d3d31ff74be/eth/sync.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package eth
    18  
    19  import (
    20  	"math/rand"
    21  	"sync/atomic"
    22  	"time"
    23  
    24  	"git.pirl.io/community/pirl/common"
    25  	"git.pirl.io/community/pirl/core/types"
    26  	"git.pirl.io/community/pirl/eth/downloader"
    27  	"git.pirl.io/community/pirl/log"
    28  	"git.pirl.io/community/pirl/p2p/enode"
    29  )
    30  
    31  const (
    32  	forceSyncCycle      = 10 * time.Second // Time interval to force syncs, even if few peers are available
    33  	minDesiredPeerCount = 5                // Amount of peers desired to start syncing
    34  
    35  	// This is the target size for the packs of transactions sent by txsyncLoop.
    36  	// A pack can get larger than this if a single transactions exceeds this size.
    37  	txsyncPackSize = 100 * 1024
    38  )
    39  
    40  type txsync struct {
    41  	p   *peer
    42  	txs []*types.Transaction
    43  }
    44  
    45  // syncTransactions starts sending all currently pending transactions to the given peer.
    46  func (pm *ProtocolManager) syncTransactions(p *peer) {
    47  	// Assemble the set of transaction to broadcast or announce to the remote
    48  	// peer. Fun fact, this is quite an expensive operation as it needs to sort
    49  	// the transactions if the sorting is not cached yet. However, with a random
    50  	// order, insertions could overflow the non-executable queues and get dropped.
    51  	//
    52  	// TODO(karalabe): Figure out if we could get away with random order somehow
    53  	var txs types.Transactions
    54  	pending, _ := pm.txpool.Pending()
    55  	for _, batch := range pending {
    56  		txs = append(txs, batch...)
    57  	}
    58  	if len(txs) == 0 {
    59  		return
    60  	}
    61  	// The eth/65 protocol introduces proper transaction announcements, so instead
    62  	// of dripping transactions across multiple peers, just send the entire list as
    63  	// an announcement and let the remote side decide what they need (likely nothing).
    64  	if p.version >= eth65 {
    65  		hashes := make([]common.Hash, len(txs))
    66  		for i, tx := range txs {
    67  			hashes[i] = tx.Hash()
    68  		}
    69  		p.AsyncSendPooledTransactionHashes(hashes)
    70  		return
    71  	}
    72  	// Out of luck, peer is running legacy protocols, drop the txs over
    73  	select {
    74  	case pm.txsyncCh <- &txsync{p: p, txs: txs}:
    75  	case <-pm.quitSync:
    76  	}
    77  }
    78  
    79  // txsyncLoop64 takes care of the initial transaction sync for each new
    80  // connection. When a new peer appears, we relay all currently pending
    81  // transactions. In order to minimise egress bandwidth usage, we send
    82  // the transactions in small packs to one peer at a time.
    83  func (pm *ProtocolManager) txsyncLoop64() {
    84  	var (
    85  		pending = make(map[enode.ID]*txsync)
    86  		sending = false               // whether a send is active
    87  		pack    = new(txsync)         // the pack that is being sent
    88  		done    = make(chan error, 1) // result of the send
    89  	)
    90  	// send starts a sending a pack of transactions from the sync.
    91  	send := func(s *txsync) {
    92  		if s.p.version >= eth65 {
    93  			panic("initial transaction syncer running on eth/65+")
    94  		}
    95  		// Fill pack with transactions up to the target size.
    96  		size := common.StorageSize(0)
    97  		pack.p = s.p
    98  		pack.txs = pack.txs[:0]
    99  		for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
   100  			pack.txs = append(pack.txs, s.txs[i])
   101  			size += s.txs[i].Size()
   102  		}
   103  		// Remove the transactions that will be sent.
   104  		s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
   105  		if len(s.txs) == 0 {
   106  			delete(pending, s.p.ID())
   107  		}
   108  		// Send the pack in the background.
   109  		s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
   110  		sending = true
   111  		go func() { done <- pack.p.SendTransactions64(pack.txs) }()
   112  	}
   113  
   114  	// pick chooses the next pending sync.
   115  	pick := func() *txsync {
   116  		if len(pending) == 0 {
   117  			return nil
   118  		}
   119  		n := rand.Intn(len(pending)) + 1
   120  		for _, s := range pending {
   121  			if n--; n == 0 {
   122  				return s
   123  			}
   124  		}
   125  		return nil
   126  	}
   127  
   128  	for {
   129  		select {
   130  		case s := <-pm.txsyncCh:
   131  			pending[s.p.ID()] = s
   132  			if !sending {
   133  				send(s)
   134  			}
   135  		case err := <-done:
   136  			sending = false
   137  			// Stop tracking peers that cause send failures.
   138  			if err != nil {
   139  				pack.p.Log().Debug("Transaction send failed", "err", err)
   140  				delete(pending, pack.p.ID())
   141  			}
   142  			// Schedule the next send.
   143  			if s := pick(); s != nil {
   144  				send(s)
   145  			}
   146  		case <-pm.quitSync:
   147  			return
   148  		}
   149  	}
   150  }
   151  
   152  // syncer is responsible for periodically synchronising with the network, both
   153  // downloading hashes and blocks as well as handling the announcement handler.
   154  func (pm *ProtocolManager) syncer() {
   155  	// Start and ensure cleanup of sync mechanisms
   156  	pm.blockFetcher.Start()
   157  	pm.txFetcher.Start()
   158  	defer pm.blockFetcher.Stop()
   159  	defer pm.txFetcher.Stop()
   160  	defer pm.downloader.Terminate()
   161  
   162  	// Wait for different events to fire synchronisation operations
   163  	forceSync := time.NewTicker(forceSyncCycle)
   164  	defer forceSync.Stop()
   165  
   166  	for {
   167  		select {
   168  		case <-pm.newPeerCh:
   169  			// Make sure we have peers to select from, then sync
   170  			if pm.peers.Len() < minDesiredPeerCount {
   171  				break
   172  			}
   173  			go pm.synchronise(pm.peers.BestPeer())
   174  
   175  		case <-forceSync.C:
   176  			// Force a sync even if not enough peers are present
   177  			go pm.synchronise(pm.peers.BestPeer())
   178  
   179  		case <-pm.noMorePeers:
   180  			return
   181  		}
   182  	}
   183  }
   184  
   185  // synchronise tries to sync up our local block chain with a remote peer.
   186  func (pm *ProtocolManager) synchronise(peer *peer) {
   187  	// Short circuit if no peers are available
   188  	if peer == nil {
   189  		return
   190  	}
   191  	// Make sure the peer's TD is higher than our own
   192  	currentBlock := pm.blockchain.CurrentBlock()
   193  	td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
   194  
   195  	pHead, pTd := peer.Head()
   196  	if pTd.Cmp(td) <= 0 {
   197  		return
   198  	}
   199  	// Otherwise try to sync with the downloader
   200  	mode := downloader.FullSync
   201  	if atomic.LoadUint32(&pm.fastSync) == 1 {
   202  		// Fast sync was explicitly requested, and explicitly granted
   203  		mode = downloader.FastSync
   204  	}
   205  	if mode == downloader.FastSync {
   206  		// Make sure the peer's total difficulty we are synchronizing is higher.
   207  		if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 {
   208  			return
   209  		}
   210  	}
   211  	// Run the sync cycle, and disable fast sync if we've went past the pivot block
   212  	if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
   213  		return
   214  	}
   215  	if atomic.LoadUint32(&pm.fastSync) == 1 {
   216  		log.Info("Fast sync complete, auto disabling")
   217  		atomic.StoreUint32(&pm.fastSync, 0)
   218  	}
   219  	// If we've successfully finished a sync cycle and passed any required checkpoint,
   220  	// enable accepting transactions from the network.
   221  	head := pm.blockchain.CurrentBlock()
   222  	if head.NumberU64() >= pm.checkpointNumber {
   223  		// Checkpoint passed, sanity check the timestamp to have a fallback mechanism
   224  		// for non-checkpointed (number = 0) private networks.
   225  		if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
   226  			atomic.StoreUint32(&pm.acceptTxs, 1)
   227  		}
   228  	}
   229  	if head.NumberU64() > 0 {
   230  		// We've completed a sync cycle, notify all peers of new state. This path is
   231  		// essential in star-topology networks where a gateway node needs to notify
   232  		// all its out-of-date peers of the availability of a new block. This failure
   233  		// scenario will most often crop up in private and hackathon networks with
   234  		// degenerate connectivity, but it should be healthy for the mainnet too to
   235  		// more reliably update peers or the local TD state.
   236  		go pm.BroadcastBlock(head, false)
   237  	}
   238  }