github.com/badrootd/nibiru-cometbft@v0.37.5-0.20240307173500-2a75559eee9b/mempool/v1/reactor.go (about)

     1  // Deprecated: Priority mempool will be removed in the next major release.
     2  package v1
     3  
     4  import (
     5  	"errors"
     6  	"fmt"
     7  	"time"
     8  
     9  	cfg "github.com/badrootd/nibiru-cometbft/config"
    10  	"github.com/badrootd/nibiru-cometbft/libs/clist"
    11  	"github.com/badrootd/nibiru-cometbft/libs/log"
    12  	cmtsync "github.com/badrootd/nibiru-cometbft/libs/sync"
    13  	"github.com/badrootd/nibiru-cometbft/mempool"
    14  	"github.com/badrootd/nibiru-cometbft/p2p"
    15  	protomem "github.com/badrootd/nibiru-cometbft/proto/tendermint/mempool"
    16  	"github.com/badrootd/nibiru-cometbft/types"
    17  )
    18  
    19  // Reactor handles mempool tx broadcasting amongst peers.
    20  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    21  // peers you received it from.
    22  type Reactor struct {
    23  	p2p.BaseReactor
    24  	config  *cfg.MempoolConfig
    25  	mempool *TxMempool
    26  	ids     *mempoolIDs
    27  }
    28  
    29  type mempoolIDs struct {
    30  	mtx       cmtsync.RWMutex
    31  	peerMap   map[p2p.ID]uint16
    32  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    33  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    34  }
    35  
    36  // Reserve searches for the next unused ID and assigns it to the
    37  // peer.
    38  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    39  	ids.mtx.Lock()
    40  	defer ids.mtx.Unlock()
    41  
    42  	curID := ids.nextPeerID()
    43  	ids.peerMap[peer.ID()] = curID
    44  	ids.activeIDs[curID] = struct{}{}
    45  }
    46  
    47  // nextPeerID returns the next unused peer ID to use.
    48  // This assumes that ids's mutex is already locked.
    49  func (ids *mempoolIDs) nextPeerID() uint16 {
    50  	if len(ids.activeIDs) == mempool.MaxActiveIDs {
    51  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
    52  	}
    53  
    54  	_, idExists := ids.activeIDs[ids.nextID]
    55  	for idExists {
    56  		ids.nextID++
    57  		_, idExists = ids.activeIDs[ids.nextID]
    58  	}
    59  	curID := ids.nextID
    60  	ids.nextID++
    61  	return curID
    62  }
    63  
    64  // Reclaim returns the ID reserved for the peer back to unused pool.
    65  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    66  	ids.mtx.Lock()
    67  	defer ids.mtx.Unlock()
    68  
    69  	removedID, ok := ids.peerMap[peer.ID()]
    70  	if ok {
    71  		delete(ids.activeIDs, removedID)
    72  		delete(ids.peerMap, peer.ID())
    73  	}
    74  }
    75  
    76  // GetForPeer returns an ID reserved for the peer.
    77  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    78  	ids.mtx.RLock()
    79  	defer ids.mtx.RUnlock()
    80  
    81  	return ids.peerMap[peer.ID()]
    82  }
    83  
    84  func newMempoolIDs() *mempoolIDs {
    85  	return &mempoolIDs{
    86  		peerMap:   make(map[p2p.ID]uint16),
    87  		activeIDs: map[uint16]struct{}{0: {}},
    88  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
    89  	}
    90  }
    91  
    92  // NewReactor returns a new Reactor with the given config and mempool.
    93  func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
    94  	memR := &Reactor{
    95  		config:  config,
    96  		mempool: mempool,
    97  		ids:     newMempoolIDs(),
    98  	}
    99  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   100  	return memR
   101  }
   102  
   103  // InitPeer implements Reactor by creating a state for the peer.
   104  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   105  	memR.ids.ReserveForPeer(peer)
   106  	return peer
   107  }
   108  
   109  // SetLogger sets the Logger on the reactor and the underlying mempool.
   110  func (memR *Reactor) SetLogger(l log.Logger) {
   111  	memR.Logger = l
   112  }
   113  
   114  // OnStart implements p2p.BaseReactor.
   115  func (memR *Reactor) OnStart() error {
   116  	if !memR.config.Broadcast {
   117  		memR.Logger.Info("Tx broadcasting is disabled")
   118  	}
   119  	return nil
   120  }
   121  
   122  // GetChannels implements Reactor by returning the list of channels for this
   123  // reactor.
   124  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   125  	largestTx := make([]byte, memR.config.MaxTxBytes)
   126  	batchMsg := protomem.Message{
   127  		Sum: &protomem.Message_Txs{
   128  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   129  		},
   130  	}
   131  
   132  	return []*p2p.ChannelDescriptor{
   133  		{
   134  			ID:                  mempool.MempoolChannel,
   135  			Priority:            5,
   136  			RecvMessageCapacity: batchMsg.Size(),
   137  			MessageType:         &protomem.Message{},
   138  		},
   139  	}
   140  }
   141  
   142  // AddPeer implements Reactor.
   143  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   144  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   145  	if memR.config.Broadcast {
   146  		go memR.broadcastTxRoutine(peer)
   147  	}
   148  }
   149  
   150  // RemovePeer implements Reactor.
   151  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   152  	memR.ids.Reclaim(peer)
   153  	// broadcast routine checks if peer is gone and returns
   154  }
   155  
   156  // Receive implements Reactor.
   157  // It adds any received transactions to the mempool.
   158  func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   159  	memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   160  	switch msg := e.Message.(type) {
   161  	case *protomem.Txs:
   162  		protoTxs := msg.GetTxs()
   163  		if len(protoTxs) == 0 {
   164  			memR.Logger.Error("received tmpty txs from peer", "src", e.Src)
   165  			return
   166  		}
   167  		txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
   168  		if e.Src != nil {
   169  			txInfo.SenderP2PID = e.Src.ID()
   170  		}
   171  
   172  		var err error
   173  		for _, tx := range protoTxs {
   174  			ntx := types.Tx(tx)
   175  			err = memR.mempool.CheckTx(ntx, nil, txInfo)
   176  			if errors.Is(err, mempool.ErrTxInCache) {
   177  				memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
   178  			} else if err != nil {
   179  				memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
   180  			}
   181  		}
   182  	default:
   183  		memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   184  		memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
   185  		return
   186  	}
   187  
   188  	// broadcasting happens from go routines per peer
   189  }
   190  
   191  // PeerState describes the state of a peer.
   192  type PeerState interface {
   193  	GetHeight() int64
   194  }
   195  
   196  // Send new mempool txs to peer.
   197  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   198  	peerID := memR.ids.GetForPeer(peer)
   199  	var next *clist.CElement
   200  
   201  	for {
   202  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   203  		if !memR.IsRunning() || !peer.IsRunning() {
   204  			return
   205  		}
   206  
   207  		// This happens because the CElement we were looking at got garbage
   208  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   209  		// start from the beginning.
   210  		if next == nil {
   211  			select {
   212  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   213  				if next = memR.mempool.TxsFront(); next == nil {
   214  					continue
   215  				}
   216  
   217  			case <-peer.Quit():
   218  				return
   219  
   220  			case <-memR.Quit():
   221  				return
   222  			}
   223  		}
   224  
   225  		// Make sure the peer is up to date.
   226  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   227  		if !ok {
   228  			// Peer does not have a state yet. We set it in the consensus reactor, but
   229  			// when we add peer in Switch, the order we call reactors#AddPeer is
   230  			// different every time due to us using a map. Sometimes other reactors
   231  			// will be initialized before the consensus reactor. We should wait a few
   232  			// milliseconds and retry.
   233  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   234  			continue
   235  		}
   236  
   237  		// Allow for a lag of 1 block.
   238  		memTx := next.Value.(*WrappedTx)
   239  		if peerState.GetHeight() < memTx.height-1 {
   240  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   241  			continue
   242  		}
   243  
   244  		// NOTE: Transaction batching was disabled due to
   245  		// https://github.com/tendermint/tendermint/issues/5796
   246  		if !memTx.HasPeer(peerID) {
   247  			success := peer.SendEnvelope(p2p.Envelope{
   248  				ChannelID: mempool.MempoolChannel,
   249  				Message:   &protomem.Txs{Txs: [][]byte{memTx.tx}},
   250  			})
   251  			if !success {
   252  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   253  				continue
   254  			}
   255  		}
   256  
   257  		select {
   258  		case <-next.NextWaitChan():
   259  			// see the start of the for loop for nil check
   260  			next = next.Next()
   261  
   262  		case <-peer.Quit():
   263  			return
   264  
   265  		case <-memR.Quit():
   266  			return
   267  		}
   268  	}
   269  }
   270  
   271  //-----------------------------------------------------------------------------
   272  // Messages
   273  
   274  // TxsMessage is a Message containing transactions.
   275  type TxsMessage struct {
   276  	Txs []types.Tx
   277  }
   278  
   279  // String returns a string representation of the TxsMessage.
   280  func (m *TxsMessage) String() string {
   281  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   282  }