github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/gossip/sync.go (about)

     1  package gossip
     2  
     3  import (
     4  	"math/rand"
     5  	"sync/atomic"
     6  	"time"
     7  
     8  	"github.com/unicornultrafoundation/go-helios/native/idx"
     9  	"github.com/unicornultrafoundation/go-u2u/common"
    10  	"github.com/unicornultrafoundation/go-u2u/p2p/enode"
    11  )
    12  
    13  type syncStage uint32
    14  
    15  type syncStatus struct {
    16  	stage       uint32
    17  	maybeSynced uint32
    18  }
    19  
    20  const (
    21  	ssUnknown syncStage = iota
    22  	ssSnaps
    23  	ssEvmSnapGen
    24  	ssEvents
    25  )
    26  
    27  const (
    28  	snapsyncMinEndAge   = 14 * 24 * time.Hour
    29  	snapsyncMaxStartAge = 6 * time.Hour
    30  )
    31  
    32  func (ss *syncStatus) Is(s ...syncStage) bool {
    33  	self := &ss.stage
    34  	for _, v := range s {
    35  		if atomic.LoadUint32(self) == uint32(v) {
    36  			return true
    37  		}
    38  	}
    39  	return false
    40  }
    41  
    42  func (ss *syncStatus) Set(s syncStage) {
    43  	atomic.StoreUint32(&ss.stage, uint32(s))
    44  }
    45  
    46  func (ss *syncStatus) MaybeSynced() bool {
    47  	return atomic.LoadUint32(&ss.maybeSynced) != 0
    48  }
    49  
    50  func (ss *syncStatus) MarkMaybeSynced() {
    51  	atomic.StoreUint32(&ss.maybeSynced, uint32(1))
    52  }
    53  
    54  func (ss *syncStatus) AcceptEvents() bool {
    55  	return ss.Is(ssEvents)
    56  }
    57  
    58  func (ss *syncStatus) AcceptBlockRecords() bool {
    59  	return !ss.Is(ssEvents)
    60  }
    61  
    62  func (ss *syncStatus) AcceptTxs() bool {
    63  	return ss.MaybeSynced() && ss.Is(ssEvents)
    64  }
    65  
    66  func (ss *syncStatus) RequestLLR() bool {
    67  	return !ss.Is(ssEvents) || ss.MaybeSynced()
    68  }
    69  
    70  type txsync struct {
    71  	p     *peer
    72  	txids []common.Hash
    73  }
    74  
    75  // syncTransactions starts sending all currently pending transactions to the given peer.
    76  func (h *handler) syncTransactions(p *peer, txids []common.Hash) {
    77  	if len(txids) == 0 {
    78  		return
    79  	}
    80  	select {
    81  	case h.txsyncCh <- &txsync{p, txids}:
    82  	case <-h.quitSync:
    83  	}
    84  }
    85  
    86  // txsyncLoop takes care of the initial transaction sync for each new
    87  // connection. When a new peer appears, we relay all currently pending
    88  // transactions. In order to minimise egress bandwidth usage, we send
    89  // the transactions in small packs to one peer at a time.
    90  func (h *handler) txsyncLoop() {
    91  	var (
    92  		pending = make(map[enode.ID]*txsync)
    93  		sending = false               // whether a send is active
    94  		pack    = new(txsync)         // the pack that is being sent
    95  		done    = make(chan error, 1) // result of the send
    96  	)
    97  
    98  	// send starts a sending a pack of transactions from the sync.
    99  	send := func(s *txsync) {
   100  		// Fill pack with transactions up to the target size.
   101  		pack.p = s.p
   102  		pack.txids = pack.txids[:0]
   103  		for i := 0; i < len(s.txids) && len(pack.txids) < softLimitItems; i++ {
   104  			pack.txids = append(pack.txids, s.txids[i])
   105  		}
   106  		// Remove the transactions that will be sent.
   107  		s.txids = s.txids[len(pack.txids):]
   108  		if len(s.txids) == 0 {
   109  			delete(pending, s.p.ID())
   110  		}
   111  		// Send the pack in the background.
   112  		s.p.Log().Trace("Sending batch of transaction hashes", "count", len(pack.txids))
   113  		sending = true
   114  		go func() {
   115  			if len(pack.txids) != 0 {
   116  				done <- pack.p.SendTransactionHashes(pack.txids)
   117  			} else {
   118  				done <- nil
   119  			}
   120  		}()
   121  	}
   122  
   123  	// pick chooses the next pending sync.
   124  	pick := func() *txsync {
   125  		if len(pending) == 0 {
   126  			return nil
   127  		}
   128  		n := rand.Intn(len(pending)) + 1
   129  		for _, s := range pending {
   130  			if n--; n == 0 {
   131  				return s
   132  			}
   133  		}
   134  		return nil
   135  	}
   136  
   137  	for {
   138  		select {
   139  		case s := <-h.txsyncCh:
   140  			pending[s.p.ID()] = s
   141  			if !sending {
   142  				send(s)
   143  			}
   144  		case err := <-done:
   145  			sending = false
   146  			// Stop tracking peers that cause send failures.
   147  			if err != nil {
   148  				pack.p.Log().Debug("Transaction send failed", "err", err)
   149  				delete(pending, pack.p.ID())
   150  			}
   151  			// Schedule the next send.
   152  			if s := pick(); s != nil {
   153  				send(s)
   154  			}
   155  		case <-h.quitSync:
   156  			return
   157  		}
   158  	}
   159  }
   160  
   161  func (h *handler) updateSnapsyncStage() {
   162  	// never allow fullsync while EVM snap is still generating, as it may lead to a race condition
   163  	snapGenOngoing, _ := h.store.evm.Snaps.Generating()
   164  	fullsyncPossibleEver := h.store.evm.HasStateDB(h.store.GetBlockState().FinalizedStateRoot)
   165  	fullsyncPossibleNow := fullsyncPossibleEver && !snapGenOngoing
   166  	// never allow to stop fullsync as it may lead to a race condition due to overwritten EVM snapshot by snapsync
   167  	snapsyncPossible := h.config.AllowSnapsync && (h.syncStatus.Is(ssUnknown) || h.syncStatus.Is(ssSnaps))
   168  	snapsyncNeeded := !fullsyncPossibleEver || time.Since(h.store.GetEpochState().EpochStart.Time()) > snapsyncMinEndAge
   169  
   170  	if snapsyncPossible && snapsyncNeeded {
   171  		h.syncStatus.Set(ssSnaps)
   172  	} else if snapGenOngoing {
   173  		h.syncStatus.Set(ssEvmSnapGen)
   174  	} else if fullsyncPossibleNow {
   175  		if !h.syncStatus.Is(ssEvents) {
   176  			h.Log.Info("Start/Switch to fullsync mode...")
   177  		}
   178  		h.syncStatus.Set(ssEvents)
   179  	}
   180  }
   181  
   182  func (h *handler) snapsyncStageTick() {
   183  	// check if existing snapsync process can be resulted
   184  	h.updateSnapsyncStage()
   185  	llrs := h.store.GetLlrState()
   186  	if h.syncStatus.Is(ssSnaps) {
   187  		for i := 0; i < 3; i++ {
   188  			epoch := llrs.LowestEpochToFill - 1 - idx.Epoch(i)
   189  			if epoch <= h.store.GetEpoch() {
   190  				continue
   191  			}
   192  			bs, _ := h.store.GetHistoryBlockEpochState(epoch)
   193  			if bs == nil {
   194  				continue
   195  			}
   196  			if !h.store.evm.HasStateDB(bs.FinalizedStateRoot) {
   197  				continue
   198  			}
   199  			if llrs.LowestBlockToFill <= bs.LastBlock.Idx {
   200  				continue
   201  			}
   202  			if time.Since(bs.LastBlock.Time.Time()) > snapsyncMinEndAge {
   203  				continue
   204  			}
   205  			// cancel snapsync activity to prevent race condition
   206  			done := make(chan struct{})
   207  			h.snapState.updatesCh <- snapsyncStateUpd{
   208  				snapsyncCancelCmd: &snapsyncCancelCmd{done},
   209  			}
   210  			<-done
   211  			// finalize snapsync
   212  			if err := h.process.SwitchEpochTo(epoch); err != nil {
   213  				h.Log.Error("Failed to result snapsync", "epoch", epoch, "block", bs.LastBlock.Idx, "err", err)
   214  			} else {
   215  				h.Log.Info("Snapsync is finalized at", "epoch", epoch, "block", bs.LastBlock.Idx, "root", bs.FinalizedStateRoot)
   216  				// switch state to non-snapsync and thus not allow ssSnaps ever again
   217  				h.syncStatus.Set(ssEvmSnapGen)
   218  			}
   219  		}
   220  	}
   221  	// push new data into an existing snapsync process
   222  	if h.syncStatus.Is(ssSnaps) {
   223  		lastEpoch := llrs.LowestEpochToFill - 1
   224  		lastBs, _ := h.store.GetHistoryBlockEpochState(lastEpoch)
   225  		if lastBs != nil && time.Since(lastBs.LastBlock.Time.Time()) < snapsyncMaxStartAge {
   226  			h.snapState.updatesCh <- snapsyncStateUpd{
   227  				snapsyncEpochUpd: &snapsyncEpochUpd{
   228  					epoch: lastEpoch,
   229  					root:  common.Hash(lastBs.FinalizedStateRoot),
   230  				},
   231  			}
   232  		}
   233  	}
   234  	// resume events downloading if events sync is enabled
   235  	if h.syncStatus.Is(ssEvents) {
   236  		h.dagLeecher.Resume()
   237  		h.brLeecher.Pause()
   238  	} else {
   239  		h.dagLeecher.Pause()
   240  		h.brLeecher.Resume()
   241  	}
   242  }
   243  
   244  func (h *handler) snapsyncStageLoop() {
   245  	ticker := time.NewTicker(200 * time.Millisecond)
   246  	defer ticker.Stop()
   247  	defer h.loopsWg.Done()
   248  	for {
   249  		select {
   250  		case <-ticker.C:
   251  			h.snapsyncStageTick()
   252  		case <-h.snapState.quit:
   253  			return
   254  		}
   255  	}
   256  }
   257  
   258  // mayCancel cancels existing snapsync process if any
   259  func (ss *snapsyncState) mayCancel() error {
   260  	if ss.cancel != nil {
   261  		err := ss.cancel()
   262  		ss.cancel = nil
   263  		return err
   264  	}
   265  	return nil
   266  }
   267  
   268  func (h *handler) snapsyncStateLoop() {
   269  	defer h.loopsWg.Done()
   270  	for {
   271  		select {
   272  		case cmd := <-h.snapState.updatesCh:
   273  			if cmd.snapsyncEpochUpd != nil {
   274  				upd := cmd.snapsyncEpochUpd
   275  				// check if epoch has advanced
   276  				if h.snapState.epoch >= upd.epoch {
   277  					continue
   278  				}
   279  				h.snapState.epoch = upd.epoch
   280  				_ = h.snapState.mayCancel()
   281  				// start new snapsync state
   282  				h.Log.Info("Update snapsync epoch", "epoch", upd.epoch, "root", upd.root)
   283  				h.process.PauseEvmSnapshot()
   284  				ss := h.snapLeecher.SyncState(upd.root)
   285  				h.snapState.cancel = ss.Cancel
   286  			}
   287  			if cmd.snapsyncCancelCmd != nil {
   288  				_ = h.snapState.mayCancel()
   289  				cmd.snapsyncCancelCmd.done <- struct{}{}
   290  			}
   291  		case <-h.snapState.quit:
   292  			_ = h.snapState.mayCancel()
   293  			return
   294  		}
   295  	}
   296  }