github.com/fiagdao/tendermint@v0.32.11-0.20220824195748-2087fcc480c1/mempool/reactor.go (about)

     1  package mempool
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	amino "github.com/tendermint/go-amino"
    11  
    12  	cfg "github.com/tendermint/tendermint/config"
    13  	"github.com/tendermint/tendermint/libs/clist"
    14  	"github.com/tendermint/tendermint/libs/log"
    15  	"github.com/tendermint/tendermint/p2p"
    16  	"github.com/tendermint/tendermint/types"
    17  )
    18  
    19  const (
    20  	MempoolChannel = byte(0x30)
    21  
    22  	aminoOverheadForTxMessage = 8
    23  
    24  	peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
    25  
    26  	// UnknownPeerID is the peer ID to use when running CheckTx when there is
    27  	// no peer (e.g. RPC)
    28  	UnknownPeerID uint16 = 0
    29  
    30  	maxActiveIDs = math.MaxUint16
    31  )
    32  
    33  // Reactor handles mempool tx broadcasting amongst peers.
    34  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    35  // peers you received it from.
    36  type Reactor struct {
    37  	p2p.BaseReactor
    38  	config  *cfg.MempoolConfig
    39  	mempool *CListMempool
    40  	ids     *mempoolIDs
    41  }
    42  
    43  type mempoolIDs struct {
    44  	mtx       sync.RWMutex
    45  	peerMap   map[p2p.ID]uint16
    46  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    47  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    48  }
    49  
    50  // Reserve searches for the next unused ID and assigns it to the
    51  // peer.
    52  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    53  	ids.mtx.Lock()
    54  	defer ids.mtx.Unlock()
    55  
    56  	curID := ids.nextPeerID()
    57  	ids.peerMap[peer.ID()] = curID
    58  	ids.activeIDs[curID] = struct{}{}
    59  }
    60  
    61  // nextPeerID returns the next unused peer ID to use.
    62  // This assumes that ids's mutex is already locked.
    63  func (ids *mempoolIDs) nextPeerID() uint16 {
    64  	if len(ids.activeIDs) == maxActiveIDs {
    65  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
    66  	}
    67  
    68  	_, idExists := ids.activeIDs[ids.nextID]
    69  	for idExists {
    70  		ids.nextID++
    71  		_, idExists = ids.activeIDs[ids.nextID]
    72  	}
    73  	curID := ids.nextID
    74  	ids.nextID++
    75  	return curID
    76  }
    77  
    78  // Reclaim returns the ID reserved for the peer back to unused pool.
    79  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    80  	ids.mtx.Lock()
    81  	defer ids.mtx.Unlock()
    82  
    83  	removedID, ok := ids.peerMap[peer.ID()]
    84  	if ok {
    85  		delete(ids.activeIDs, removedID)
    86  		delete(ids.peerMap, peer.ID())
    87  	}
    88  }
    89  
    90  // GetForPeer returns an ID reserved for the peer.
    91  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    92  	ids.mtx.RLock()
    93  	defer ids.mtx.RUnlock()
    94  
    95  	return ids.peerMap[peer.ID()]
    96  }
    97  
    98  func newMempoolIDs() *mempoolIDs {
    99  	return &mempoolIDs{
   100  		peerMap:   make(map[p2p.ID]uint16),
   101  		activeIDs: map[uint16]struct{}{0: {}},
   102  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
   103  	}
   104  }
   105  
   106  // NewReactor returns a new Reactor with the given config and mempool.
   107  func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
   108  	memR := &Reactor{
   109  		config:  config,
   110  		mempool: mempool,
   111  		ids:     newMempoolIDs(),
   112  	}
   113  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   114  	return memR
   115  }
   116  
   117  // InitPeer implements Reactor by creating a state for the peer.
   118  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   119  	memR.ids.ReserveForPeer(peer)
   120  	return peer
   121  }
   122  
   123  // SetLogger sets the Logger on the reactor and the underlying mempool.
   124  func (memR *Reactor) SetLogger(l log.Logger) {
   125  	memR.Logger = l
   126  	memR.mempool.SetLogger(l)
   127  }
   128  
   129  // OnStart implements p2p.BaseReactor.
   130  func (memR *Reactor) OnStart() error {
   131  	if !memR.config.Broadcast {
   132  		memR.Logger.Info("Tx broadcasting is disabled")
   133  	}
   134  	return nil
   135  }
   136  
   137  // GetChannels implements Reactor.
   138  // It returns the list of channels for this reactor.
   139  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   140  	return []*p2p.ChannelDescriptor{
   141  		{
   142  			ID:       MempoolChannel,
   143  			Priority: 5,
   144  		},
   145  	}
   146  }
   147  
   148  // AddPeer implements Reactor.
   149  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   150  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   151  	go memR.broadcastTxRoutine(peer)
   152  }
   153  
   154  // RemovePeer implements Reactor.
   155  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   156  	memR.ids.Reclaim(peer)
   157  	// broadcast routine checks if peer is gone and returns
   158  }
   159  
   160  // Receive implements Reactor.
   161  // It adds any received transactions to the mempool.
   162  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   163  	msg, err := memR.decodeMsg(msgBytes)
   164  	if err != nil {
   165  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
   166  		memR.Switch.StopPeerForError(src, err)
   167  		return
   168  	}
   169  	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   170  
   171  	switch msg := msg.(type) {
   172  	case *TxMessage:
   173  		txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
   174  		if src != nil {
   175  			txInfo.SenderP2PID = src.ID()
   176  		}
   177  		err := memR.mempool.CheckTx(msg.Tx, nil, txInfo)
   178  		if err != nil {
   179  			memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err)
   180  		}
   181  		// broadcasting happens from go routines per peer
   182  	default:
   183  		memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   184  	}
   185  }
   186  
   187  // PeerState describes the state of a peer.
   188  type PeerState interface {
   189  	GetHeight() int64
   190  }
   191  
   192  // Send new mempool txs to peer.
   193  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   194  	if !memR.config.Broadcast {
   195  		return
   196  	}
   197  
   198  	peerID := memR.ids.GetForPeer(peer)
   199  	var next *clist.CElement
   200  	for {
   201  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   202  		if !memR.IsRunning() || !peer.IsRunning() {
   203  			return
   204  		}
   205  		// This happens because the CElement we were looking at got garbage
   206  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   207  		// start from the beginning.
   208  		if next == nil {
   209  			select {
   210  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   211  				if next = memR.mempool.TxsFront(); next == nil {
   212  					continue
   213  				}
   214  			case <-peer.Quit():
   215  				return
   216  			case <-memR.Quit():
   217  				return
   218  			}
   219  		}
   220  
   221  		memTx := next.Value.(*mempoolTx)
   222  
   223  		// make sure the peer is up to date
   224  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   225  		if !ok {
   226  			// Peer does not have a state yet. We set it in the consensus reactor, but
   227  			// when we add peer in Switch, the order we call reactors#AddPeer is
   228  			// different every time due to us using a map. Sometimes other reactors
   229  			// will be initialized before the consensus reactor. We should wait a few
   230  			// milliseconds and retry.
   231  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   232  			continue
   233  		}
   234  		if peerState.GetHeight() < memTx.Height()-1 { // Allow for a lag of 1 block
   235  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   236  			continue
   237  		}
   238  
   239  		// ensure peer hasn't already sent us this tx
   240  		if _, ok := memTx.senders.Load(peerID); !ok {
   241  			// send memTx
   242  			msg := &TxMessage{Tx: memTx.tx}
   243  			success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
   244  			if !success {
   245  				time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   246  				continue
   247  			}
   248  		}
   249  
   250  		select {
   251  		case <-next.NextWaitChan():
   252  			// see the start of the for loop for nil check
   253  			next = next.Next()
   254  		case <-peer.Quit():
   255  			return
   256  		case <-memR.Quit():
   257  			return
   258  		}
   259  	}
   260  }
   261  
   262  //-----------------------------------------------------------------------------
   263  // Messages
   264  
   265  // Message is a message sent or received by the Reactor.
   266  type Message interface{}
   267  
   268  func RegisterMessages(cdc *amino.Codec) {
   269  	cdc.RegisterInterface((*Message)(nil), nil)
   270  	cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
   271  }
   272  
   273  func (memR *Reactor) decodeMsg(bz []byte) (msg Message, err error) {
   274  	maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
   275  	if l := len(bz); l > maxMsgSize {
   276  		return msg, ErrTxTooLarge{maxMsgSize, l}
   277  	}
   278  	err = cdc.UnmarshalBinaryBare(bz, &msg)
   279  	return
   280  }
   281  
   282  //-------------------------------------
   283  
   284  // TxMessage is a Message containing a transaction.
   285  type TxMessage struct {
   286  	Tx types.Tx
   287  }
   288  
   289  // String returns a string representation of the TxMessage.
   290  func (m *TxMessage) String() string {
   291  	return fmt.Sprintf("[TxMessage %v]", m.Tx)
   292  }
   293  
   294  // calcMaxMsgSize returns the max size of TxMessage
   295  // account for amino overhead of TxMessage
   296  func calcMaxMsgSize(maxTxSize int) int {
   297  	return maxTxSize + aminoOverheadForTxMessage
   298  }