github.com/Finschia/ostracon@v1.1.5/mempool/v0/reactor.go (about)

     1  package v0
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"time"
     7  
     8  	"github.com/gogo/protobuf/proto"
     9  
    10  	protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
    11  
    12  	cfg "github.com/Finschia/ostracon/config"
    13  	"github.com/Finschia/ostracon/libs/clist"
    14  	"github.com/Finschia/ostracon/libs/log"
    15  	tmsync "github.com/Finschia/ostracon/libs/sync"
    16  	"github.com/Finschia/ostracon/mempool"
    17  	"github.com/Finschia/ostracon/p2p"
    18  	"github.com/Finschia/ostracon/types"
    19  )
    20  
    21  // Reactor handles mempool tx broadcasting amongst peers.
    22  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    23  // peers you received it from.
    24  type Reactor struct {
    25  	p2p.BaseReactor
    26  	config  *cfg.MempoolConfig
    27  	mempool *CListMempool
    28  	ids     *mempoolIDs
    29  }
    30  
    31  type mempoolIDs struct {
    32  	mtx       tmsync.RWMutex
    33  	peerMap   map[p2p.ID]uint16
    34  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    35  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    36  }
    37  
    38  // Reserve searches for the next unused ID and assigns it to the
    39  // peer.
    40  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    41  	ids.mtx.Lock()
    42  	defer ids.mtx.Unlock()
    43  
    44  	curID := ids.nextPeerID()
    45  	ids.peerMap[peer.ID()] = curID
    46  	ids.activeIDs[curID] = struct{}{}
    47  }
    48  
    49  // nextPeerID returns the next unused peer ID to use.
    50  // This assumes that ids's mutex is already locked.
    51  func (ids *mempoolIDs) nextPeerID() uint16 {
    52  	if len(ids.activeIDs) == mempool.MaxActiveIDs {
    53  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
    54  	}
    55  
    56  	_, idExists := ids.activeIDs[ids.nextID]
    57  	for idExists {
    58  		ids.nextID++
    59  		_, idExists = ids.activeIDs[ids.nextID]
    60  	}
    61  	curID := ids.nextID
    62  	ids.nextID++
    63  	return curID
    64  }
    65  
    66  // Reclaim returns the ID reserved for the peer back to unused pool.
    67  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    68  	ids.mtx.Lock()
    69  	defer ids.mtx.Unlock()
    70  
    71  	removedID, ok := ids.peerMap[peer.ID()]
    72  	if ok {
    73  		delete(ids.activeIDs, removedID)
    74  		delete(ids.peerMap, peer.ID())
    75  	}
    76  }
    77  
    78  // GetForPeer returns an ID reserved for the peer.
    79  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    80  	ids.mtx.RLock()
    81  	defer ids.mtx.RUnlock()
    82  
    83  	return ids.peerMap[peer.ID()]
    84  }
    85  
    86  func newMempoolIDs() *mempoolIDs {
    87  	return &mempoolIDs{
    88  		peerMap:   make(map[p2p.ID]uint16),
    89  		activeIDs: map[uint16]struct{}{0: {}},
    90  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
    91  	}
    92  }
    93  
    94  // NewReactor returns a new Reactor with the given config and mempool.
    95  func NewReactor(config *cfg.MempoolConfig, async bool, recvBufSize int, mempool *CListMempool) *Reactor {
    96  	memR := &Reactor{
    97  		config:  config,
    98  		mempool: mempool,
    99  		ids:     newMempoolIDs(),
   100  	}
   101  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR, async, recvBufSize)
   102  	return memR
   103  }
   104  
   105  // InitPeer implements Reactor by creating a state for the peer.
   106  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   107  	memR.ids.ReserveForPeer(peer)
   108  	return peer
   109  }
   110  
   111  // SetLogger sets the Logger on the reactor and the underlying mempool.
   112  func (memR *Reactor) SetLogger(l log.Logger) {
   113  	memR.Logger = l
   114  	memR.mempool.SetLogger(l)
   115  }
   116  
   117  // OnStart implements p2p.BaseReactor.
   118  func (memR *Reactor) OnStart() error {
   119  	// call BaseReactor's OnStart()
   120  	err := memR.BaseReactor.OnStart()
   121  	if err != nil {
   122  		return err
   123  	}
   124  
   125  	if !memR.config.Broadcast {
   126  		memR.Logger.Info("Tx broadcasting is disabled")
   127  	}
   128  	return nil
   129  }
   130  
   131  // GetChannels implements Reactor by returning the list of channels for this
   132  // reactor.
   133  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   134  	largestTx := make([]byte, memR.config.MaxTxBytes)
   135  	batchMsg := protomem.Message{
   136  		Sum: &protomem.Message_Txs{
   137  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
   138  		},
   139  	}
   140  
   141  	return []*p2p.ChannelDescriptor{
   142  		{
   143  			ID:                  mempool.MempoolChannel,
   144  			Priority:            5,
   145  			RecvMessageCapacity: batchMsg.Size(),
   146  			MessageType:         &protomem.Message{},
   147  		},
   148  	}
   149  }
   150  
   151  // AddPeer implements Reactor.
   152  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   153  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   154  	if memR.config.Broadcast {
   155  		go memR.broadcastTxRoutine(peer)
   156  	}
   157  }
   158  
   159  // RemovePeer implements Reactor.
   160  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   161  	memR.ids.Reclaim(peer)
   162  	// broadcast routine checks if peer is gone and returns
   163  }
   164  
   165  // Receive implements Reactor.
   166  // It adds any received transactions to the mempool.
   167  func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
   168  	memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   169  	switch msg := e.Message.(type) {
   170  	case *protomem.Txs:
   171  		protoTxs := msg.GetTxs()
   172  		if len(protoTxs) == 0 {
   173  			memR.Logger.Error("received empty txs from peer", "src", e.Src)
   174  			return
   175  		}
   176  		txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(e.Src)}
   177  		if e.Src != nil {
   178  			txInfo.SenderP2PID = e.Src.ID()
   179  		}
   180  
   181  		for _, tx := range protoTxs {
   182  			ntx := types.Tx(tx)
   183  			memR.mempool.CheckTxAsync(tx, txInfo, func(err error) {
   184  				if errors.Is(err, mempool.ErrTxInMap) {
   185  					memR.Logger.Debug("Tx already exists in Map", "tx", ntx.String())
   186  				} else if errors.Is(err, mempool.ErrTxInCache) {
   187  					memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String())
   188  				} else if err != nil {
   189  					memR.Logger.Info("Could not check tx", "tx", ntx.String(), "err", err)
   190  				}
   191  			}, nil)
   192  		}
   193  	default:
   194  		memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message)
   195  		memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message))
   196  		return
   197  	}
   198  
   199  	// broadcasting happens from go routines per peer
   200  }
   201  
   202  func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
   203  	msg := &protomem.Message{}
   204  	err := proto.Unmarshal(msgBytes, msg)
   205  	if err != nil {
   206  		panic(err)
   207  	}
   208  	uw, err := msg.Unwrap()
   209  	if err != nil {
   210  		panic(err)
   211  	}
   212  	memR.ReceiveEnvelope(p2p.Envelope{
   213  		ChannelID: chID,
   214  		Src:       peer,
   215  		Message:   uw,
   216  	})
   217  }
   218  
   219  // PeerState describes the state of a peer.
   220  type PeerState interface {
   221  	GetHeight() int64
   222  }
   223  
   224  // Send new mempool txs to peer.
   225  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   226  	peerID := memR.ids.GetForPeer(peer)
   227  	var next *clist.CElement
   228  
   229  	for {
   230  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   231  		if !memR.IsRunning() || !peer.IsRunning() {
   232  			return
   233  		}
   234  		// This happens because the CElement we were looking at got garbage
   235  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   236  		// start from the beginning.
   237  		if next == nil {
   238  			select {
   239  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   240  				if next = memR.mempool.TxsFront(); next == nil {
   241  					continue
   242  				}
   243  			case <-peer.Quit():
   244  				return
   245  			case <-memR.Quit():
   246  				return
   247  			}
   248  		}
   249  
   250  		// Make sure the peer is up to date.
   251  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   252  		if !ok {
   253  			// Peer does not have a state yet. We set it in the consensus reactor, but
   254  			// when we add peer in Switch, the order we call reactors#AddPeer is
   255  			// different every time due to us using a map. Sometimes other reactors
   256  			// will be initialized before the consensus reactor. We should wait a few
   257  			// milliseconds and retry.
   258  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   259  			continue
   260  		}
   261  
   262  		// Allow for a lag of 1 block.
   263  		memTx := next.Value.(*mempoolTx)
   264  		if peerState.GetHeight() < memTx.Height()-1 {
   265  			time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   266  			continue
   267  		}
   268  
   269  		// NOTE: Transaction batching was disabled due to
   270  		// https://github.com/tendermint/tendermint/issues/5796
   271  
   272  		if _, ok := memTx.senders.Load(peerID); !ok {
   273  			success := p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
   274  				ChannelID: mempool.MempoolChannel,
   275  				Message:   &protomem.Txs{Txs: [][]byte{memTx.tx}},
   276  			}, memR.Logger)
   277  			if !success {
   278  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   279  				continue
   280  			}
   281  		}
   282  
   283  		select {
   284  		case <-next.NextWaitChan():
   285  			// see the start of the for loop for nil check
   286  			next = next.Next()
   287  		case <-peer.Quit():
   288  			return
   289  		case <-memR.Quit():
   290  			return
   291  		}
   292  	}
   293  }
   294  
   295  // TxsMessage is a Message containing transactions.
   296  type TxsMessage struct {
   297  	Txs []types.Tx
   298  }
   299  
   300  // String returns a string representation of the TxsMessage.
   301  func (m *TxsMessage) String() string {
   302  	return fmt.Sprintf("[TxsMessage %v]", m.Txs)
   303  }