github.com/noirx94/tendermintmp@v0.0.1/mempool/reactor.go (about)

     1  package mempool
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"time"
     8  
     9  	cfg "github.com/tendermint/tendermint/config"
    10  	"github.com/tendermint/tendermint/libs/clist"
    11  	"github.com/tendermint/tendermint/libs/log"
    12  	tmsync "github.com/tendermint/tendermint/libs/sync"
    13  	"github.com/tendermint/tendermint/p2p"
    14  	protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
    15  	"github.com/tendermint/tendermint/types"
    16  )
    17  
    18  const (
    19  	MempoolChannel = byte(0x30)
    20  
    21  	peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
    22  
    23  	// UnknownPeerID is the peer ID to use when running CheckTx when there is
    24  	// no peer (e.g. RPC)
    25  	UnknownPeerID uint16 = 0
    26  
    27  	maxActiveIDs = math.MaxUint16
    28  )
    29  
    30  // Reactor handles mempool tx broadcasting amongst peers.
    31  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    32  // peers you received it from.
    33  type Reactor struct {
    34  	p2p.BaseReactor
    35  	config  *cfg.MempoolConfig
    36  	mempool *CListMempool
    37  	ids     *mempoolIDs
    38  }
    39  
    40  type mempoolIDs struct {
    41  	mtx       tmsync.RWMutex
    42  	peerMap   map[p2p.ID]uint16
    43  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    44  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    45  }
    46  
    47  // Reserve searches for the next unused ID and assigns it to the
    48  // peer.
    49  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    50  	ids.mtx.Lock()
    51  	defer ids.mtx.Unlock()
    52  
    53  	curID := ids.nextPeerID()
    54  	ids.peerMap[peer.ID()] = curID
    55  	ids.activeIDs[curID] = struct{}{}
    56  }
    57  
    58  // nextPeerID returns the next unused peer ID to use.
    59  // This assumes that ids's mutex is already locked.
    60  func (ids *mempoolIDs) nextPeerID() uint16 {
    61  	if len(ids.activeIDs) == maxActiveIDs {
    62  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
    63  	}
    64  
    65  	_, idExists := ids.activeIDs[ids.nextID]
    66  	for idExists {
    67  		ids.nextID++
    68  		_, idExists = ids.activeIDs[ids.nextID]
    69  	}
    70  	curID := ids.nextID
    71  	ids.nextID++
    72  	return curID
    73  }
    74  
    75  // Reclaim returns the ID reserved for the peer back to unused pool.
    76  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    77  	ids.mtx.Lock()
    78  	defer ids.mtx.Unlock()
    79  
    80  	removedID, ok := ids.peerMap[peer.ID()]
    81  	if ok {
    82  		delete(ids.activeIDs, removedID)
    83  		delete(ids.peerMap, peer.ID())
    84  	}
    85  }
    86  
    87  // GetForPeer returns an ID reserved for the peer.
    88  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    89  	ids.mtx.RLock()
    90  	defer ids.mtx.RUnlock()
    91  
    92  	return ids.peerMap[peer.ID()]
    93  }
    94  
    95  func newMempoolIDs() *mempoolIDs {
    96  	return &mempoolIDs{
    97  		peerMap:   make(map[p2p.ID]uint16),
    98  		activeIDs: map[uint16]struct{}{0: {}},
    99  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
   100  	}
   101  }
   102  
   103  // NewReactor returns a new Reactor with the given config and mempool.
   104  func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
   105  	memR := &Reactor{
   106  		config:  config,
   107  		mempool: mempool,
   108  		ids:     newMempoolIDs(),
   109  	}
   110  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   111  	return memR
   112  }
   113  
   114  // InitPeer implements Reactor by creating a state for the peer.
   115  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   116  	memR.ids.ReserveForPeer(peer)
   117  	return peer
   118  }
   119  
   120  // SetLogger sets the Logger on the reactor and the underlying mempool.
   121  func (memR *Reactor) SetLogger(l log.Logger) {
   122  	memR.Logger = l
   123  	memR.mempool.SetLogger(l)
   124  }
   125  
   126  // OnStart implements p2p.BaseReactor.
   127  func (memR *Reactor) OnStart() error {
   128  	if !memR.config.Broadcast {
   129  		memR.Logger.Info("Tx broadcasting is disabled")
   130  	}
   131  	return nil
   132  }
   133  
   134  // GetChannels implements Reactor by returning the list of channels for this
   135  // reactor.
   136  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   137  	largestTx := make([]byte, memR.config.MaxTxBytes)
   138  	batchMsg := protomem.Message{
   139  		Sum: &protomem.Message_Txs{
   140  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   141  		},
   142  	}
   143  
   144  	return []*p2p.ChannelDescriptor{
   145  		{
   146  			ID:                  MempoolChannel,
   147  			Priority:            5,
   148  			RecvMessageCapacity: batchMsg.Size(),
   149  		},
   150  	}
   151  }
   152  
   153  // AddPeer implements Reactor.
   154  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   155  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   156  	if memR.config.Broadcast {
   157  		go memR.broadcastTxRoutine(peer)
   158  	}
   159  }
   160  
   161  // RemovePeer implements Reactor.
   162  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   163  	memR.ids.Reclaim(peer)
   164  	// broadcast routine checks if peer is gone and returns
   165  }
   166  
   167  // Receive implements Reactor.
   168  // It adds any received transactions to the mempool.
   169  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   170  	msg, err := memR.decodeMsg(msgBytes)
   171  	if err != nil {
   172  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
   173  		memR.Switch.StopPeerForError(src, err)
   174  		return
   175  	}
   176  	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   177  
   178  	txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
   179  	if src != nil {
   180  		txInfo.SenderP2PID = src.ID()
   181  	}
   182  	for _, tx := range msg.Txs {
   183  		err = memR.mempool.CheckTx(tx, nil, txInfo)
   184  		if err == ErrTxInCache {
   185  			memR.Logger.Debug("Tx already exists in cache", "tx", txID(tx))
   186  		} else if err != nil {
   187  			memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err)
   188  		}
   189  	}
   190  	// broadcasting happens from go routines per peer
   191  }
   192  
   193  // PeerState describes the state of a peer.
   194  type PeerState interface {
   195  	GetHeight() int64
   196  }
   197  
   198  // Send new mempool txs to peer.
   199  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   200  	peerID := memR.ids.GetForPeer(peer)
   201  	var next *clist.CElement
   202  
   203  	for {
   204  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   205  		if !memR.IsRunning() || !peer.IsRunning() {
   206  			return
   207  		}
   208  		// This happens because the CElement we were looking at got garbage
   209  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   210  		// start from the beginning.
   211  		if next == nil {
   212  			select {
   213  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   214  				if next = memR.mempool.TxsFront(); next == nil {
   215  					continue
   216  				}
   217  			case <-peer.Quit():
   218  				return
   219  			case <-memR.Quit():
   220  				return
   221  			}
   222  		}
   223  
   224  		// Make sure the peer is up to date.
   225  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   226  		if !ok {
   227  			// Peer does not have a state yet. We set it in the consensus reactor, but
   228  			// when we add peer in Switch, the order we call reactors#AddPeer is
   229  			// different every time due to us using a map. Sometimes other reactors
   230  			// will be initialized before the consensus reactor. We should wait a few
   231  			// milliseconds and retry.
   232  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   233  			continue
   234  		}
   235  
   236  		// Allow for a lag of 1 block.
   237  		memTx := next.Value.(*mempoolTx)
   238  		if peerState.GetHeight() < memTx.Height()-1 {
   239  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   240  			continue
   241  		}
   242  
   243  		// NOTE: Transaction batching was disabled due to
   244  		// https://github.com/tendermint/tendermint/issues/5796
   245  
   246  		if _, ok := memTx.senders.Load(peerID); !ok {
   247  			msg := protomem.Message{
   248  				Sum: &protomem.Message_Txs{
   249  					Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
   250  				},
   251  			}
   252  			bz, err := msg.Marshal()
   253  			if err != nil {
   254  				panic(err)
   255  			}
   256  			success := peer.Send(MempoolChannel, bz)
   257  			if !success {
   258  				time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   259  				continue
   260  			}
   261  		}
   262  
   263  		select {
   264  		case <-next.NextWaitChan():
   265  			// see the start of the for loop for nil check
   266  			next = next.Next()
   267  		case <-peer.Quit():
   268  			return
   269  		case <-memR.Quit():
   270  			return
   271  		}
   272  	}
   273  }
   274  
   275  //-----------------------------------------------------------------------------
   276  // Messages
   277  
   278  func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
   279  	msg := protomem.Message{}
   280  	err := msg.Unmarshal(bz)
   281  	if err != nil {
   282  		return TxsMessage{}, err
   283  	}
   284  
   285  	var message TxsMessage
   286  
   287  	if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
   288  		txs := i.Txs.GetTxs()
   289  
   290  		if len(txs) == 0 {
   291  			return message, errors.New("empty TxsMessage")
   292  		}
   293  
   294  		decoded := make([]types.Tx, len(txs))
   295  		for j, tx := range txs {
   296  			decoded[j] = types.Tx(tx)
   297  		}
   298  
   299  		message = TxsMessage{
   300  			Txs: decoded,
   301  		}
   302  		return message, nil
   303  	}
   304  	return message, fmt.Errorf("msg type: %T is not supported", msg)
   305  }
   306  
   307  //-------------------------------------
   308  
   309  // TxsMessage is a Message containing transactions.
   310  type TxsMessage struct {
   311  	Txs []types.Tx
   312  }
   313  
   314  // String returns a string representation of the TxsMessage.
   315  func (m *TxsMessage) String() string {
   316  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   317  }