github.com/line/ostracon@v1.0.10-0.20230328032236-7f20145f065d/mempool/reactor.go (about)

     1  package mempool
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"time"
     8  
     9  	protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
    10  
    11  	cfg "github.com/line/ostracon/config"
    12  	"github.com/line/ostracon/libs/clist"
    13  	"github.com/line/ostracon/libs/log"
    14  	tmsync "github.com/line/ostracon/libs/sync"
    15  	"github.com/line/ostracon/p2p"
    16  	"github.com/line/ostracon/types"
    17  )
    18  
    19  const (
    20  	MempoolChannel = byte(0x30)
    21  
    22  	peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
    23  
    24  	// UnknownPeerID is the peer ID to use when running CheckTx when there is
    25  	// no peer (e.g. RPC)
    26  	UnknownPeerID uint16 = 0
    27  
    28  	maxActiveIDs = math.MaxUint16
    29  )
    30  
    31  // Reactor handles mempool tx broadcasting amongst peers.
    32  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    33  // peers you received it from.
    34  type Reactor struct {
    35  	p2p.BaseReactor
    36  	config  *cfg.MempoolConfig
    37  	mempool *CListMempool
    38  	ids     *mempoolIDs
    39  }
    40  
    41  type mempoolIDs struct {
    42  	mtx       tmsync.RWMutex
    43  	peerMap   map[p2p.ID]uint16
    44  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    45  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    46  }
    47  
    48  // Reserve searches for the next unused ID and assigns it to the
    49  // peer.
    50  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    51  	ids.mtx.Lock()
    52  	defer ids.mtx.Unlock()
    53  
    54  	curID := ids.nextPeerID()
    55  	ids.peerMap[peer.ID()] = curID
    56  	ids.activeIDs[curID] = struct{}{}
    57  }
    58  
    59  // nextPeerID returns the next unused peer ID to use.
    60  // This assumes that ids's mutex is already locked.
    61  func (ids *mempoolIDs) nextPeerID() uint16 {
    62  	if len(ids.activeIDs) == maxActiveIDs {
    63  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
    64  	}
    65  
    66  	_, idExists := ids.activeIDs[ids.nextID]
    67  	for idExists {
    68  		ids.nextID++
    69  		_, idExists = ids.activeIDs[ids.nextID]
    70  	}
    71  	curID := ids.nextID
    72  	ids.nextID++
    73  	return curID
    74  }
    75  
    76  // Reclaim returns the ID reserved for the peer back to unused pool.
    77  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    78  	ids.mtx.Lock()
    79  	defer ids.mtx.Unlock()
    80  
    81  	removedID, ok := ids.peerMap[peer.ID()]
    82  	if ok {
    83  		delete(ids.activeIDs, removedID)
    84  		delete(ids.peerMap, peer.ID())
    85  	}
    86  }
    87  
    88  // GetForPeer returns an ID reserved for the peer.
    89  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    90  	ids.mtx.RLock()
    91  	defer ids.mtx.RUnlock()
    92  
    93  	return ids.peerMap[peer.ID()]
    94  }
    95  
    96  func newMempoolIDs() *mempoolIDs {
    97  	return &mempoolIDs{
    98  		peerMap:   make(map[p2p.ID]uint16),
    99  		activeIDs: map[uint16]struct{}{0: {}},
   100  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
   101  	}
   102  }
   103  
   104  // NewReactor returns a new Reactor with the given config and mempool.
   105  func NewReactor(config *cfg.MempoolConfig, async bool, recvBufSize int, mempool *CListMempool) *Reactor {
   106  	memR := &Reactor{
   107  		config:  config,
   108  		mempool: mempool,
   109  		ids:     newMempoolIDs(),
   110  	}
   111  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR, async, recvBufSize)
   112  	return memR
   113  }
   114  
   115  // InitPeer implements Reactor by creating a state for the peer.
   116  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   117  	memR.ids.ReserveForPeer(peer)
   118  	return peer
   119  }
   120  
   121  // SetLogger sets the Logger on the reactor and the underlying mempool.
   122  func (memR *Reactor) SetLogger(l log.Logger) {
   123  	memR.Logger = l
   124  	memR.mempool.SetLogger(l)
   125  }
   126  
   127  // OnStart implements p2p.BaseReactor.
   128  func (memR *Reactor) OnStart() error {
   129  	// call BaseReactor's OnStart()
   130  	err := memR.BaseReactor.OnStart()
   131  	if err != nil {
   132  		return err
   133  	}
   134  
   135  	if !memR.config.Broadcast {
   136  		memR.Logger.Info("Tx broadcasting is disabled")
   137  	}
   138  	return nil
   139  }
   140  
   141  // GetChannels implements Reactor by returning the list of channels for this
   142  // reactor.
   143  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   144  	largestTx := make([]byte, memR.config.MaxTxBytes)
   145  	batchMsg := protomem.Message{
   146  		Sum: &protomem.Message_Txs{
   147  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   148  		},
   149  	}
   150  
   151  	return []*p2p.ChannelDescriptor{
   152  		{
   153  			ID:                  MempoolChannel,
   154  			Priority:            5,
   155  			RecvMessageCapacity: batchMsg.Size(),
   156  		},
   157  	}
   158  }
   159  
   160  // AddPeer implements Reactor.
   161  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   162  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   163  	if memR.config.Broadcast {
   164  		go memR.broadcastTxRoutine(peer)
   165  	}
   166  }
   167  
   168  // RemovePeer implements Reactor.
   169  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   170  	memR.ids.Reclaim(peer)
   171  	// broadcast routine checks if peer is gone and returns
   172  }
   173  
   174  // Receive implements Reactor.
   175  // It adds any received transactions to the mempool.
   176  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   177  	msg, err := memR.decodeMsg(msgBytes)
   178  	if err != nil {
   179  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
   180  		memR.Switch.StopPeerForError(src, err)
   181  		return
   182  	}
   183  	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   184  
   185  	txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
   186  	if src != nil {
   187  		txInfo.SenderP2PID = src.ID()
   188  	}
   189  	for _, tx := range msg.Txs {
   190  		tx := tx // pin! workaround for `scopelint` error
   191  		memR.mempool.CheckTxAsync(tx, txInfo, func(err error) {
   192  			if err == ErrTxInCache {
   193  				memR.Logger.Debug("Tx already exists in cache", "tx", txID(tx))
   194  			} else if err != nil {
   195  				memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err)
   196  			}
   197  		}, nil)
   198  	}
   199  	// broadcasting happens from go routines per peer
   200  }
   201  
   202  // PeerState describes the state of a peer.
   203  type PeerState interface {
   204  	GetHeight() int64
   205  }
   206  
   207  // Send new mempool txs to peer.
   208  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   209  	peerID := memR.ids.GetForPeer(peer)
   210  	var next *clist.CElement
   211  
   212  	for {
   213  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   214  		if !memR.IsRunning() || !peer.IsRunning() {
   215  			return
   216  		}
   217  		// This happens because the CElement we were looking at got garbage
   218  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   219  		// start from the beginning.
   220  		if next == nil {
   221  			select {
   222  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   223  				if next = memR.mempool.TxsFront(); next == nil {
   224  					continue
   225  				}
   226  			case <-peer.Quit():
   227  				return
   228  			case <-memR.Quit():
   229  				return
   230  			}
   231  		}
   232  
   233  		// Make sure the peer is up to date.
   234  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   235  		if !ok {
   236  			// Peer does not have a state yet. We set it in the consensus reactor, but
   237  			// when we add peer in Switch, the order we call reactors#AddPeer is
   238  			// different every time due to us using a map. Sometimes other reactors
   239  			// will be initialized before the consensus reactor. We should wait a few
   240  			// milliseconds and retry.
   241  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   242  			continue
   243  		}
   244  
   245  		// Allow for a lag of 1 block.
   246  		memTx := next.Value.(*mempoolTx)
   247  		if peerState.GetHeight() < memTx.Height()-1 {
   248  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   249  			continue
   250  		}
   251  
   252  		// NOTE: Transaction batching was disabled due to
   253  		// https://github.com/tendermint/tendermint/issues/5796
   254  
   255  		if _, ok := memTx.senders.Load(peerID); !ok {
   256  			msg := protomem.Message{
   257  				Sum: &protomem.Message_Txs{
   258  					Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
   259  				},
   260  			}
   261  			bz, err := msg.Marshal()
   262  			if err != nil {
   263  				panic(err)
   264  			}
   265  			success := peer.Send(MempoolChannel, bz)
   266  			if !success {
   267  				time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   268  				continue
   269  			}
   270  		}
   271  
   272  		select {
   273  		case <-next.NextWaitChan():
   274  			// see the start of the for loop for nil check
   275  			next = next.Next()
   276  		case <-peer.Quit():
   277  			return
   278  		case <-memR.Quit():
   279  			return
   280  		}
   281  	}
   282  }
   283  
   284  //-----------------------------------------------------------------------------
   285  // Messages
   286  
   287  func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
   288  	msg := protomem.Message{}
   289  	err := msg.Unmarshal(bz)
   290  	if err != nil {
   291  		return TxsMessage{}, err
   292  	}
   293  
   294  	var message TxsMessage
   295  
   296  	if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
   297  		txs := i.Txs.GetTxs()
   298  
   299  		if len(txs) == 0 {
   300  			return message, errors.New("empty TxsMessage")
   301  		}
   302  
   303  		decoded := make([]types.Tx, len(txs))
   304  		for j, tx := range txs {
   305  			decoded[j] = types.Tx(tx)
   306  		}
   307  
   308  		message = TxsMessage{
   309  			Txs: decoded,
   310  		}
   311  		return message, nil
   312  	}
   313  	return message, fmt.Errorf("msg type: %T is not supported", msg)
   314  }
   315  
   316  //-------------------------------------
   317  
   318  // TxsMessage is a Message containing transactions.
   319  type TxsMessage struct {
   320  	Txs []types.Tx
   321  }
   322  
   323  // String returns a string representation of the TxsMessage.
   324  func (m *TxsMessage) String() string {
   325  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   326  }