github.com/vipernet-xyz/tm@v0.34.24/mempool/v1/reactor.go (about)

     1  package v1
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"time"
     7  
     8  	"github.com/gogo/protobuf/proto"
     9  
    10  	cfg "github.com/vipernet-xyz/tm/config"
    11  	"github.com/vipernet-xyz/tm/libs/clist"
    12  	"github.com/vipernet-xyz/tm/libs/log"
    13  	tmsync "github.com/vipernet-xyz/tm/libs/sync"
    14  	"github.com/vipernet-xyz/tm/mempool"
    15  	"github.com/vipernet-xyz/tm/p2p"
    16  	protomem "github.com/vipernet-xyz/tm/proto/tendermint/mempool"
    17  	"github.com/vipernet-xyz/tm/types"
    18  )
    19  
    20  // Reactor handles mempool tx broadcasting amongst peers.
    21  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    22  // peers you received it from.
    23  type Reactor struct {
    24  	p2p.BaseReactor
    25  	config  *cfg.MempoolConfig
    26  	mempool *TxMempool
    27  	ids     *mempoolIDs
    28  }
    29  
    30  type mempoolIDs struct {
    31  	mtx       tmsync.RWMutex
    32  	peerMap   map[p2p.ID]uint16
    33  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    34  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    35  }
    36  
    37  // Reserve searches for the next unused ID and assigns it to the
    38  // peer.
    39  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    40  	ids.mtx.Lock()
    41  	defer ids.mtx.Unlock()
    42  
    43  	curID := ids.nextPeerID()
    44  	ids.peerMap[peer.ID()] = curID
    45  	ids.activeIDs[curID] = struct{}{}
    46  }
    47  
    48  // nextPeerID returns the next unused peer ID to use.
    49  // This assumes that ids's mutex is already locked.
    50  func (ids *mempoolIDs) nextPeerID() uint16 {
    51  	if len(ids.activeIDs) == mempool.MaxActiveIDs {
    52  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
    53  	}
    54  
    55  	_, idExists := ids.activeIDs[ids.nextID]
    56  	for idExists {
    57  		ids.nextID++
    58  		_, idExists = ids.activeIDs[ids.nextID]
    59  	}
    60  	curID := ids.nextID
    61  	ids.nextID++
    62  	return curID
    63  }
    64  
    65  // Reclaim returns the ID reserved for the peer back to unused pool.
    66  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    67  	ids.mtx.Lock()
    68  	defer ids.mtx.Unlock()
    69  
    70  	removedID, ok := ids.peerMap[peer.ID()]
    71  	if ok {
    72  		delete(ids.activeIDs, removedID)
    73  		delete(ids.peerMap, peer.ID())
    74  	}
    75  }
    76  
    77  // GetForPeer returns an ID reserved for the peer.
    78  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    79  	ids.mtx.RLock()
    80  	defer ids.mtx.RUnlock()
    81  
    82  	return ids.peerMap[peer.ID()]
    83  }
    84  
    85  func newMempoolIDs() *mempoolIDs {
    86  	return &mempoolIDs{
    87  		peerMap:   make(map[p2p.ID]uint16),
    88  		activeIDs: map[uint16]struct{}{0: {}},
    89  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
    90  	}
    91  }
    92  
    93  // NewReactor returns a new Reactor with the given config and mempool.
    94  func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
    95  	memR := &Reactor{
    96  		config:  config,
    97  		mempool: mempool,
    98  		ids:     newMempoolIDs(),
    99  	}
   100  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   101  	return memR
   102  }
   103  
   104  // InitPeer implements Reactor by creating a state for the peer.
   105  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   106  	memR.ids.ReserveForPeer(peer)
   107  	return peer
   108  }
   109  
   110  // SetLogger sets the Logger on the reactor and the underlying mempool.
   111  func (memR *Reactor) SetLogger(l log.Logger) {
   112  	memR.Logger = l
   113  }
   114  
   115  // OnStart implements p2p.BaseReactor.
   116  func (memR *Reactor) OnStart() error {
   117  	if !memR.config.Broadcast {
   118  		memR.Logger.Info("Tx broadcasting is disabled")
   119  	}
   120  	return nil
   121  }
   122  
   123  // GetChannels implements Reactor by returning the list of channels for this
   124  // reactor.
   125  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   126  	largestTx := make([]byte, memR.config.MaxTxBytes)
   127  	batchMsg := protomem.Message{
   128  		Sum: &protomem.Message_Txs{
   129  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   130  		},
   131  	}
   132  
   133  	return []*p2p.ChannelDescriptor{
   134  		{
   135  			ID:                  mempool.MempoolChannel,
   136  			Priority:            5,
   137  			RecvMessageCapacity: batchMsg.Size(),
   138  			MessageType:         &protomem.Message{},
   139  		},
   140  	}
   141  }
   142  
   143  // AddPeer implements Reactor.
   144  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   145  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   146  	if memR.config.Broadcast {
   147  		go memR.broadcastTxRoutine(peer)
   148  	}
   149  }
   150  
   151  // RemovePeer implements Reactor.
   152  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   153  	memR.ids.Reclaim(peer)
   154  	// broadcast routine checks if peer is gone and returns
   155  }
   156  
   157  // Receive implements Reactor.
   158  // It adds any received transactions to the mempool.
   159  func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   160  	memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   161  	switch msg := e.Message.(type) {
   162  	case *protomem.Txs:
   163  		protoTxs := msg.GetTxs()
   164  		if len(protoTxs) == 0 {
   165  			memR.Logger.Error("received tmpty txs from peer", "src", e.Src)
   166  			return
   167  		}
   168  		txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
   169  		if e.Src != nil {
   170  			txInfo.SenderP2PID = e.Src.ID()
   171  		}
   172  
   173  		var err error
   174  		for _, tx := range protoTxs {
   175  			ntx := types.Tx(tx)
   176  			err = memR.mempool.CheckTx(ntx, nil, txInfo)
   177  			if errors.Is(err, mempool.ErrTxInCache) {
   178  				memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
   179  			} else if err != nil {
   180  				memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
   181  			}
   182  		}
   183  	default:
   184  		memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   185  		memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
   186  		return
   187  	}
   188  
   189  	// broadcasting happens from go routines per peer
   190  }
   191  
   192  func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   193  	msg := &protomem.Message{}
   194  	err := proto.Unmarshal(msgBytes, msg)
   195  	if err != nil {
   196  		panic(err)
   197  	}
   198  	uw, err := msg.Unwrap()
   199  	if err != nil {
   200  		panic(err)
   201  	}
   202  	memR.ReceiveEnvelope(p2p.Envelope{
   203  		ChannelID: chID,
   204  		Src:       peer,
   205  		Message:   uw,
   206  	})
   207  }
   208  
   209  // PeerState describes the state of a peer.
   210  type PeerState interface {
   211  	GetHeight() int64
   212  }
   213  
   214  // Send new mempool txs to peer.
   215  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   216  	peerID := memR.ids.GetForPeer(peer)
   217  	var next *clist.CElement
   218  
   219  	for {
   220  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   221  		if !memR.IsRunning() || !peer.IsRunning() {
   222  			return
   223  		}
   224  
   225  		// This happens because the CElement we were looking at got garbage
   226  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   227  		// start from the beginning.
   228  		if next == nil {
   229  			select {
   230  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   231  				if next = memR.mempool.TxsFront(); next == nil {
   232  					continue
   233  				}
   234  
   235  			case <-peer.Quit():
   236  				return
   237  
   238  			case <-memR.Quit():
   239  				return
   240  			}
   241  		}
   242  
   243  		// Make sure the peer is up to date.
   244  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   245  		if !ok {
   246  			// Peer does not have a state yet. We set it in the consensus reactor, but
   247  			// when we add peer in Switch, the order we call reactors#AddPeer is
   248  			// different every time due to us using a map. Sometimes other reactors
   249  			// will be initialized before the consensus reactor. We should wait a few
   250  			// milliseconds and retry.
   251  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   252  			continue
   253  		}
   254  
   255  		// Allow for a lag of 1 block.
   256  		memTx := next.Value.(*WrappedTx)
   257  		if peerState.GetHeight() < memTx.height-1 {
   258  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   259  			continue
   260  		}
   261  
   262  		// NOTE: Transaction batching was disabled due to
   263  		// https://github.com/vipernet-xyz/tm/issues/5796
   264  		if !memTx.HasPeer(peerID) {
   265  			success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   266  				ChannelID: mempool.MempoolChannel,
   267  				Message:   &protomem.Txs{Txs: [][]byte{memTx.tx}},
   268  			}, memR.Logger)
   269  			if !success {
   270  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   271  				continue
   272  			}
   273  		}
   274  
   275  		select {
   276  		case <-next.NextWaitChan():
   277  			// see the start of the for loop for nil check
   278  			next = next.Next()
   279  
   280  		case <-peer.Quit():
   281  			return
   282  
   283  		case <-memR.Quit():
   284  			return
   285  		}
   286  	}
   287  }
   288  
   289  //-----------------------------------------------------------------------------
   290  // Messages
   291  
   292  // TxsMessage is a Message containing transactions.
   293  type TxsMessage struct {
   294  	Txs []types.Tx
   295  }
   296  
   297  // String returns a string representation of the TxsMessage.
   298  func (m *TxsMessage) String() string {
   299  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   300  }