github.com/Finschia/ostracon@v1.1.5/mempool/v1/reactor.go (about)

     1  //go:build deprecated
     2  
     3  package v1
     4  
     5  import (
     6  	"errors"
     7  	"fmt"
     8  	"time"
     9  
    10  	"github.com/gogo/protobuf/proto"
    11  
    12  	cfg "github.com/tendermint/tendermint/config"
    13  	"github.com/tendermint/tendermint/libs/clist"
    14  	"github.com/tendermint/tendermint/libs/log"
    15  	tmsync "github.com/tendermint/tendermint/libs/sync"
    16  	"github.com/tendermint/tendermint/mempool"
    17  	"github.com/tendermint/tendermint/p2p"
    18  	protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
    19  	"github.com/tendermint/tendermint/types"
    20  )
    21  
    22  // Reactor handles mempool tx broadcasting amongst peers.
    23  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    24  // peers you received it from.
    25  type Reactor struct {
    26  	p2p.BaseReactor
    27  	config  *cfg.MempoolConfig
    28  	mempool *TxMempool
    29  	ids     *mempoolIDs
    30  }
    31  
    32  type mempoolIDs struct {
    33  	mtx       tmsync.RWMutex
    34  	peerMap   map[p2p.ID]uint16
    35  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    36  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    37  }
    38  
    39  // Reserve searches for the next unused ID and assigns it to the
    40  // peer.
    41  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    42  	ids.mtx.Lock()
    43  	defer ids.mtx.Unlock()
    44  
    45  	curID := ids.nextPeerID()
    46  	ids.peerMap[peer.ID()] = curID
    47  	ids.activeIDs[curID] = struct{}{}
    48  }
    49  
    50  // nextPeerID returns the next unused peer ID to use.
    51  // This assumes that ids's mutex is already locked.
    52  func (ids *mempoolIDs) nextPeerID() uint16 {
    53  	if len(ids.activeIDs) == mempool.MaxActiveIDs {
    54  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
    55  	}
    56  
    57  	_, idExists := ids.activeIDs[ids.nextID]
    58  	for idExists {
    59  		ids.nextID++
    60  		_, idExists = ids.activeIDs[ids.nextID]
    61  	}
    62  	curID := ids.nextID
    63  	ids.nextID++
    64  	return curID
    65  }
    66  
    67  // Reclaim returns the ID reserved for the peer back to unused pool.
    68  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    69  	ids.mtx.Lock()
    70  	defer ids.mtx.Unlock()
    71  
    72  	removedID, ok := ids.peerMap[peer.ID()]
    73  	if ok {
    74  		delete(ids.activeIDs, removedID)
    75  		delete(ids.peerMap, peer.ID())
    76  	}
    77  }
    78  
    79  // GetForPeer returns an ID reserved for the peer.
    80  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    81  	ids.mtx.RLock()
    82  	defer ids.mtx.RUnlock()
    83  
    84  	return ids.peerMap[peer.ID()]
    85  }
    86  
    87  func newMempoolIDs() *mempoolIDs {
    88  	return &mempoolIDs{
    89  		peerMap:   make(map[p2p.ID]uint16),
    90  		activeIDs: map[uint16]struct{}{0: {}},
    91  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
    92  	}
    93  }
    94  
    95  // NewReactor returns a new Reactor with the given config and mempool.
    96  func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
    97  	memR := &Reactor{
    98  		config:  config,
    99  		mempool: mempool,
   100  		ids:     newMempoolIDs(),
   101  	}
   102  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   103  	return memR
   104  }
   105  
   106  // InitPeer implements Reactor by creating a state for the peer.
   107  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   108  	memR.ids.ReserveForPeer(peer)
   109  	return peer
   110  }
   111  
   112  // SetLogger sets the Logger on the reactor and the underlying mempool.
   113  func (memR *Reactor) SetLogger(l log.Logger) {
   114  	memR.Logger = l
   115  }
   116  
   117  // OnStart implements p2p.BaseReactor.
   118  func (memR *Reactor) OnStart() error {
   119  	if !memR.config.Broadcast {
   120  		memR.Logger.Info("Tx broadcasting is disabled")
   121  	}
   122  	return nil
   123  }
   124  
   125  // GetChannels implements Reactor by returning the list of channels for this
   126  // reactor.
   127  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   128  	largestTx := make([]byte, memR.config.MaxTxBytes)
   129  	batchMsg := protomem.Message{
   130  		Sum: &protomem.Message_Txs{
   131  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   132  		},
   133  	}
   134  
   135  	return []*p2p.ChannelDescriptor{
   136  		{
   137  			ID:                  mempool.MempoolChannel,
   138  			Priority:            5,
   139  			RecvMessageCapacity: batchMsg.Size(),
   140  			MessageType:         &protomem.Message{},
   141  		},
   142  	}
   143  }
   144  
   145  // AddPeer implements Reactor.
   146  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   147  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   148  	if memR.config.Broadcast {
   149  		go memR.broadcastTxRoutine(peer)
   150  	}
   151  }
   152  
   153  // RemovePeer implements Reactor.
   154  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   155  	memR.ids.Reclaim(peer)
   156  	// broadcast routine checks if peer is gone and returns
   157  }
   158  
   159  // Receive implements Reactor.
   160  // It adds any received transactions to the mempool.
   161  func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   162  	memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   163  	switch msg := e.Message.(type) {
   164  	case *protomem.Txs:
   165  		protoTxs := msg.GetTxs()
   166  		if len(protoTxs) == 0 {
   167  			memR.Logger.Error("received tmpty txs from peer", "src", e.Src)
   168  			return
   169  		}
   170  		txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
   171  		if e.Src != nil {
   172  			txInfo.SenderP2PID = e.Src.ID()
   173  		}
   174  
   175  		var err error
   176  		for _, tx := range protoTxs {
   177  			ntx := types.Tx(tx)
   178  			err = memR.mempool.CheckTx(ntx, nil, txInfo)
   179  			if errors.Is(err, mempool.ErrTxInCache) {
   180  				memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
   181  			} else if err != nil {
   182  				memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
   183  			}
   184  		}
   185  	default:
   186  		memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   187  		memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
   188  		return
   189  	}
   190  
   191  	// broadcasting happens from go routines per peer
   192  }
   193  
   194  func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   195  	msg := &protomem.Message{}
   196  	err := proto.Unmarshal(msgBytes, msg)
   197  	if err != nil {
   198  		panic(err)
   199  	}
   200  	uw, err := msg.Unwrap()
   201  	if err != nil {
   202  		panic(err)
   203  	}
   204  	memR.ReceiveEnvelope(p2p.Envelope{
   205  		ChannelID: chID,
   206  		Src:       peer,
   207  		Message:   uw,
   208  	})
   209  }
   210  
   211  // PeerState describes the state of a peer.
   212  type PeerState interface {
   213  	GetHeight() int64
   214  }
   215  
   216  // Send new mempool txs to peer.
   217  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   218  	peerID := memR.ids.GetForPeer(peer)
   219  	var next *clist.CElement
   220  
   221  	for {
   222  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   223  		if !memR.IsRunning() || !peer.IsRunning() {
   224  			return
   225  		}
   226  
   227  		// This happens because the CElement we were looking at got garbage
   228  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   229  		// start from the beginning.
   230  		if next == nil {
   231  			select {
   232  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   233  				if next = memR.mempool.TxsFront(); next == nil {
   234  					continue
   235  				}
   236  
   237  			case <-peer.Quit():
   238  				return
   239  
   240  			case <-memR.Quit():
   241  				return
   242  			}
   243  		}
   244  
   245  		// Make sure the peer is up to date.
   246  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   247  		if !ok {
   248  			// Peer does not have a state yet. We set it in the consensus reactor, but
   249  			// when we add peer in Switch, the order we call reactors#AddPeer is
   250  			// different every time due to us using a map. Sometimes other reactors
   251  			// will be initialized before the consensus reactor. We should wait a few
   252  			// milliseconds and retry.
   253  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   254  			continue
   255  		}
   256  
   257  		// Allow for a lag of 1 block.
   258  		memTx := next.Value.(*WrappedTx)
   259  		if peerState.GetHeight() < memTx.height-1 {
   260  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   261  			continue
   262  		}
   263  
   264  		// NOTE: Transaction batching was disabled due to
   265  		// https://github.com/tendermint/tendermint/issues/5796
   266  		if !memTx.HasPeer(peerID) {
   267  			success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   268  				ChannelID: mempool.MempoolChannel,
   269  				Message:   &protomem.Txs{Txs: [][]byte{memTx.tx}},
   270  			}, memR.Logger)
   271  			if !success {
   272  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   273  				continue
   274  			}
   275  		}
   276  
   277  		select {
   278  		case <-next.NextWaitChan():
   279  			// see the start of the for loop for nil check
   280  			next = next.Next()
   281  
   282  		case <-peer.Quit():
   283  			return
   284  
   285  		case <-memR.Quit():
   286  			return
   287  		}
   288  	}
   289  }
   290  
   291  //-----------------------------------------------------------------------------
   292  // Messages
   293  
   294  // TxsMessage is a Message containing transactions.
   295  type TxsMessage struct {
   296  	Txs []types.Tx
   297  }
   298  
   299  // String returns a string representation of the TxsMessage.
   300  func (m *TxsMessage) String() string {
   301  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   302  }