github.com/MagHErmit/tendermint@v0.282.1/mempool/v1/reactor.go (about)

     1  package v1
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"time"
     7  
     8  	cfg "github.com/MagHErmit/tendermint/config"
     9  	"github.com/MagHErmit/tendermint/libs/clist"
    10  	"github.com/MagHErmit/tendermint/libs/log"
    11  	tmsync "github.com/MagHErmit/tendermint/libs/sync"
    12  	"github.com/MagHErmit/tendermint/mempool"
    13  	"github.com/MagHErmit/tendermint/p2p"
    14  	protomem "github.com/MagHErmit/tendermint/proto/tendermint/mempool"
    15  	"github.com/MagHErmit/tendermint/types"
    16  )
    17  
    18  // Reactor handles mempool tx broadcasting amongst peers.
    19  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    20  // peers you received it from.
    21  type Reactor struct {
    22  	p2p.BaseReactor
    23  	config  *cfg.MempoolConfig
    24  	mempool *TxMempool
    25  	ids     *mempoolIDs
    26  }
    27  
    28  type mempoolIDs struct {
    29  	mtx       tmsync.RWMutex
    30  	peerMap   map[p2p.ID]uint16
    31  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    32  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    33  }
    34  
    35  // Reserve searches for the next unused ID and assigns it to the
    36  // peer.
    37  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    38  	ids.mtx.Lock()
    39  	defer ids.mtx.Unlock()
    40  
    41  	curID := ids.nextPeerID()
    42  	ids.peerMap[peer.ID()] = curID
    43  	ids.activeIDs[curID] = struct{}{}
    44  }
    45  
    46  // nextPeerID returns the next unused peer ID to use.
    47  // This assumes that ids's mutex is already locked.
    48  func (ids *mempoolIDs) nextPeerID() uint16 {
    49  	if len(ids.activeIDs) == mempool.MaxActiveIDs {
    50  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
    51  	}
    52  
    53  	_, idExists := ids.activeIDs[ids.nextID]
    54  	for idExists {
    55  		ids.nextID++
    56  		_, idExists = ids.activeIDs[ids.nextID]
    57  	}
    58  	curID := ids.nextID
    59  	ids.nextID++
    60  	return curID
    61  }
    62  
    63  // Reclaim returns the ID reserved for the peer back to unused pool.
    64  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    65  	ids.mtx.Lock()
    66  	defer ids.mtx.Unlock()
    67  
    68  	removedID, ok := ids.peerMap[peer.ID()]
    69  	if ok {
    70  		delete(ids.activeIDs, removedID)
    71  		delete(ids.peerMap, peer.ID())
    72  	}
    73  }
    74  
    75  // GetForPeer returns an ID reserved for the peer.
    76  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    77  	ids.mtx.RLock()
    78  	defer ids.mtx.RUnlock()
    79  
    80  	return ids.peerMap[peer.ID()]
    81  }
    82  
    83  func newMempoolIDs() *mempoolIDs {
    84  	return &mempoolIDs{
    85  		peerMap:   make(map[p2p.ID]uint16),
    86  		activeIDs: map[uint16]struct{}{0: {}},
    87  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
    88  	}
    89  }
    90  
    91  // NewReactor returns a new Reactor with the given config and mempool.
    92  func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
    93  	memR := &Reactor{
    94  		config:  config,
    95  		mempool: mempool,
    96  		ids:     newMempoolIDs(),
    97  	}
    98  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
    99  	return memR
   100  }
   101  
   102  // InitPeer implements Reactor by creating a state for the peer.
   103  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   104  	memR.ids.ReserveForPeer(peer)
   105  	return peer
   106  }
   107  
   108  // SetLogger sets the Logger on the reactor and the underlying mempool.
   109  func (memR *Reactor) SetLogger(l log.Logger) {
   110  	memR.Logger = l
   111  }
   112  
   113  // OnStart implements p2p.BaseReactor.
   114  func (memR *Reactor) OnStart() error {
   115  	if !memR.config.Broadcast {
   116  		memR.Logger.Info("Tx broadcasting is disabled")
   117  	}
   118  	return nil
   119  }
   120  
   121  // GetChannels implements Reactor by returning the list of channels for this
   122  // reactor.
   123  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   124  	largestTx := make([]byte, memR.config.MaxTxBytes)
   125  	batchMsg := protomem.Message{
   126  		Sum: &protomem.Message_Txs{
   127  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   128  		},
   129  	}
   130  
   131  	return []*p2p.ChannelDescriptor{
   132  		{
   133  			ID:                  mempool.MempoolChannel,
   134  			Priority:            5,
   135  			RecvMessageCapacity: batchMsg.Size(),
   136  		},
   137  	}
   138  }
   139  
   140  // AddPeer implements Reactor.
   141  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   142  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   143  	if memR.config.Broadcast {
   144  		go memR.broadcastTxRoutine(peer)
   145  	}
   146  }
   147  
   148  // RemovePeer implements Reactor.
   149  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   150  	memR.ids.Reclaim(peer)
   151  	// broadcast routine checks if peer is gone and returns
   152  }
   153  
   154  // Receive implements Reactor.
   155  // It adds any received transactions to the mempool.
   156  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   157  	msg, err := memR.decodeMsg(msgBytes)
   158  	if err != nil {
   159  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
   160  		memR.Switch.StopPeerForError(src, err)
   161  		return
   162  	}
   163  	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   164  
   165  	txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
   166  	if src != nil {
   167  		txInfo.SenderP2PID = src.ID()
   168  	}
   169  	for _, tx := range msg.Txs {
   170  		err = memR.mempool.CheckTx(tx, nil, txInfo)
   171  		if err == mempool.ErrTxInCache {
   172  			memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
   173  		} else if err != nil {
   174  			memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
   175  		}
   176  	}
   177  	// broadcasting happens from go routines per peer
   178  }
   179  
   180  // PeerState describes the state of a peer.
   181  type PeerState interface {
   182  	GetHeight() int64
   183  }
   184  
   185  // Send new mempool txs to peer.
   186  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   187  	peerID := memR.ids.GetForPeer(peer)
   188  	var next *clist.CElement
   189  
   190  	for {
   191  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   192  		if !memR.IsRunning() || !peer.IsRunning() {
   193  			return
   194  		}
   195  
   196  		// This happens because the CElement we were looking at got garbage
   197  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   198  		// start from the beginning.
   199  		if next == nil {
   200  			select {
   201  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   202  				if next = memR.mempool.TxsFront(); next == nil {
   203  					continue
   204  				}
   205  
   206  			case <-peer.Quit():
   207  				return
   208  
   209  			case <-memR.Quit():
   210  				return
   211  			}
   212  		}
   213  
   214  		// Make sure the peer is up to date.
   215  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   216  		if !ok {
   217  			// Peer does not have a state yet. We set it in the consensus reactor, but
   218  			// when we add peer in Switch, the order we call reactors#AddPeer is
   219  			// different every time due to us using a map. Sometimes other reactors
   220  			// will be initialized before the consensus reactor. We should wait a few
   221  			// milliseconds and retry.
   222  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   223  			continue
   224  		}
   225  
   226  		// Allow for a lag of 1 block.
   227  		memTx := next.Value.(*WrappedTx)
   228  		if peerState.GetHeight() < memTx.height-1 {
   229  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   230  			continue
   231  		}
   232  
   233  		// NOTE: Transaction batching was disabled due to
   234  		// https://github.com/MagHErmit/tendermint/issues/5796
   235  		if !memTx.HasPeer(peerID) {
   236  			msg := protomem.Message{
   237  				Sum: &protomem.Message_Txs{
   238  					Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
   239  				},
   240  			}
   241  
   242  			bz, err := msg.Marshal()
   243  			if err != nil {
   244  				panic(err)
   245  			}
   246  
   247  			success := peer.Send(mempool.MempoolChannel, bz)
   248  			if !success {
   249  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   250  				continue
   251  			}
   252  		}
   253  
   254  		select {
   255  		case <-next.NextWaitChan():
   256  			// see the start of the for loop for nil check
   257  			next = next.Next()
   258  
   259  		case <-peer.Quit():
   260  			return
   261  
   262  		case <-memR.Quit():
   263  			return
   264  		}
   265  	}
   266  }
   267  
   268  //-----------------------------------------------------------------------------
   269  // Messages
   270  
   271  func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
   272  	msg := protomem.Message{}
   273  	err := msg.Unmarshal(bz)
   274  	if err != nil {
   275  		return TxsMessage{}, err
   276  	}
   277  
   278  	var message TxsMessage
   279  
   280  	if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
   281  		txs := i.Txs.GetTxs()
   282  
   283  		if len(txs) == 0 {
   284  			return message, errors.New("empty TxsMessage")
   285  		}
   286  
   287  		decoded := make([]types.Tx, len(txs))
   288  		for j, tx := range txs {
   289  			decoded[j] = types.Tx(tx)
   290  		}
   291  
   292  		message = TxsMessage{
   293  			Txs: decoded,
   294  		}
   295  		return message, nil
   296  	}
   297  	return message, fmt.Errorf("msg type: %T is not supported", msg)
   298  }
   299  
   300  //-------------------------------------
   301  
   302  // TxsMessage is a Message containing transactions.
   303  type TxsMessage struct {
   304  	Txs []types.Tx
   305  }
   306  
   307  // String returns a string representation of the TxsMessage.
   308  func (m *TxsMessage) String() string {
   309  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   310  }