github.com/number571/tendermint@v0.34.11-gost/internal/mempool/v0/reactor.go (about)

     1  package v0
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"runtime/debug"
     8  	"sync"
     9  	"time"
    10  
    11  	cfg "github.com/number571/tendermint/config"
    12  	"github.com/number571/tendermint/internal/libs/clist"
    13  	tmsync "github.com/number571/tendermint/internal/libs/sync"
    14  	"github.com/number571/tendermint/internal/mempool"
    15  	"github.com/number571/tendermint/internal/p2p"
    16  	"github.com/number571/tendermint/libs/log"
    17  	"github.com/number571/tendermint/libs/service"
    18  	protomem "github.com/number571/tendermint/proto/tendermint/mempool"
    19  	"github.com/number571/tendermint/types"
    20  )
    21  
    22  var (
    23  	_ service.Service = (*Reactor)(nil)
    24  	_ p2p.Wrapper     = (*protomem.Message)(nil)
    25  )
    26  
    27  // PeerManager defines the interface contract required for getting necessary
    28  // peer information. This should eventually be replaced with a message-oriented
    29  // approach utilizing the p2p stack.
    30  type PeerManager interface {
    31  	GetHeight(types.NodeID) int64
    32  }
    33  
    34  // Reactor implements a service that contains mempool of txs that are broadcasted
    35  // amongst peers. It maintains a map from peer ID to counter, to prevent gossiping
    36  // txs to the peers you received it from.
    37  type Reactor struct {
    38  	service.BaseService
    39  
    40  	config  *cfg.MempoolConfig
    41  	mempool *CListMempool
    42  	ids     *mempool.MempoolIDs
    43  
    44  	// XXX: Currently, this is the only way to get information about a peer. Ideally,
    45  	// we rely on message-oriented communication to get necessary peer data.
    46  	// ref: https://github.com/number571/tendermint/issues/5670
    47  	peerMgr PeerManager
    48  
    49  	mempoolCh   *p2p.Channel
    50  	peerUpdates *p2p.PeerUpdates
    51  	closeCh     chan struct{}
    52  
    53  	// peerWG is used to coordinate graceful termination of all peer broadcasting
    54  	// goroutines.
    55  	peerWG sync.WaitGroup
    56  
    57  	mtx          tmsync.Mutex
    58  	peerRoutines map[types.NodeID]*tmsync.Closer
    59  }
    60  
    61  // NewReactor returns a reference to a new reactor.
    62  func NewReactor(
    63  	logger log.Logger,
    64  	config *cfg.MempoolConfig,
    65  	peerMgr PeerManager,
    66  	mp *CListMempool,
    67  	mempoolCh *p2p.Channel,
    68  	peerUpdates *p2p.PeerUpdates,
    69  ) *Reactor {
    70  
    71  	r := &Reactor{
    72  		config:       config,
    73  		peerMgr:      peerMgr,
    74  		mempool:      mp,
    75  		ids:          mempool.NewMempoolIDs(),
    76  		mempoolCh:    mempoolCh,
    77  		peerUpdates:  peerUpdates,
    78  		closeCh:      make(chan struct{}),
    79  		peerRoutines: make(map[types.NodeID]*tmsync.Closer),
    80  	}
    81  
    82  	r.BaseService = *service.NewBaseService(logger, "Mempool", r)
    83  	return r
    84  }
    85  
    86  // GetChannelShims returns a map of ChannelDescriptorShim objects, where each
    87  // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding
    88  // p2p proto.Message the new p2p Channel is responsible for handling.
    89  //
    90  //
    91  // TODO: Remove once p2p refactor is complete.
    92  // ref: https://github.com/number571/tendermint/issues/5670
    93  func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim {
    94  	largestTx := make([]byte, config.MaxTxBytes)
    95  	batchMsg := protomem.Message{
    96  		Sum: &protomem.Message_Txs{
    97  			Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
    98  		},
    99  	}
   100  
   101  	return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
   102  		mempool.MempoolChannel: {
   103  			MsgType: new(protomem.Message),
   104  			Descriptor: &p2p.ChannelDescriptor{
   105  				ID:                  byte(mempool.MempoolChannel),
   106  				Priority:            5,
   107  				RecvMessageCapacity: batchMsg.Size(),
   108  				RecvBufferCapacity:  128,
   109  				MaxSendBytes:        5000,
   110  			},
   111  		},
   112  	}
   113  }
   114  
   115  // OnStart starts separate go routines for each p2p Channel and listens for
   116  // envelopes on each. In addition, it also listens for peer updates and handles
   117  // messages on that p2p channel accordingly. The caller must be sure to execute
   118  // OnStop to ensure the outbound p2p Channels are closed.
   119  func (r *Reactor) OnStart() error {
   120  	if !r.config.Broadcast {
   121  		r.Logger.Info("tx broadcasting is disabled")
   122  	}
   123  
   124  	go r.processMempoolCh()
   125  	go r.processPeerUpdates()
   126  
   127  	return nil
   128  }
   129  
   130  // OnStop stops the reactor by signaling to all spawned goroutines to exit and
   131  // blocking until they all exit.
   132  func (r *Reactor) OnStop() {
   133  	r.mtx.Lock()
   134  	for _, c := range r.peerRoutines {
   135  		c.Close()
   136  	}
   137  	r.mtx.Unlock()
   138  
   139  	// wait for all spawned peer tx broadcasting goroutines to gracefully exit
   140  	r.peerWG.Wait()
   141  
   142  	// Close closeCh to signal to all spawned goroutines to gracefully exit. All
   143  	// p2p Channels should execute Close().
   144  	close(r.closeCh)
   145  
   146  	// Wait for all p2p Channels to be closed before returning. This ensures we
   147  	// can easily reason about synchronization of all p2p Channels and ensure no
   148  	// panics will occur.
   149  	<-r.mempoolCh.Done()
   150  	<-r.peerUpdates.Done()
   151  }
   152  
   153  // handleMempoolMessage handles envelopes sent from peers on the MempoolChannel.
   154  // For every tx in the message, we execute CheckTx. It returns an error if an
   155  // empty set of txs are sent in an envelope or if we receive an unexpected
   156  // message type.
   157  func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
   158  	logger := r.Logger.With("peer", envelope.From)
   159  
   160  	switch msg := envelope.Message.(type) {
   161  	case *protomem.Txs:
   162  		protoTxs := msg.GetTxs()
   163  		if len(protoTxs) == 0 {
   164  			return errors.New("empty txs received from peer")
   165  		}
   166  
   167  		txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)}
   168  		if len(envelope.From) != 0 {
   169  			txInfo.SenderNodeID = envelope.From
   170  		}
   171  
   172  		for _, tx := range protoTxs {
   173  			if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
   174  				logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err)
   175  			}
   176  		}
   177  
   178  	default:
   179  		return fmt.Errorf("received unknown message: %T", msg)
   180  	}
   181  
   182  	return nil
   183  }
   184  
   185  // handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
   186  // It will handle errors and any possible panics gracefully. A caller can handle
   187  // any error returned by sending a PeerError on the respective channel.
   188  func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
   189  	defer func() {
   190  		if e := recover(); e != nil {
   191  			err = fmt.Errorf("panic in processing message: %v", e)
   192  			r.Logger.Error(
   193  				"recovering from processing message panic",
   194  				"err", err,
   195  				"stack", string(debug.Stack()),
   196  			)
   197  		}
   198  	}()
   199  
   200  	r.Logger.Debug("received message", "peer", envelope.From)
   201  
   202  	switch chID {
   203  	case mempool.MempoolChannel:
   204  		err = r.handleMempoolMessage(envelope)
   205  
   206  	default:
   207  		err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
   208  	}
   209  
   210  	return err
   211  }
   212  
   213  // processMempoolCh implements a blocking event loop where we listen for p2p
   214  // Envelope messages from the mempoolCh.
   215  func (r *Reactor) processMempoolCh() {
   216  	defer r.mempoolCh.Close()
   217  
   218  	for {
   219  		select {
   220  		case envelope := <-r.mempoolCh.In:
   221  			if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil {
   222  				r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err)
   223  				r.mempoolCh.Error <- p2p.PeerError{
   224  					NodeID: envelope.From,
   225  					Err:    err,
   226  				}
   227  			}
   228  
   229  		case <-r.closeCh:
   230  			r.Logger.Debug("stopped listening on mempool channel; closing...")
   231  			return
   232  		}
   233  	}
   234  }
   235  
   236  // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we
   237  // check if the reactor is running and if we've already started a tx broadcasting
   238  // goroutine or not. If not, we start one for the newly added peer. For down or
   239  // removed peers, we remove the peer from the mempool peer ID set and signal to
   240  // stop the tx broadcasting goroutine.
   241  func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
   242  	r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
   243  
   244  	r.mtx.Lock()
   245  	defer r.mtx.Unlock()
   246  
   247  	switch peerUpdate.Status {
   248  	case p2p.PeerStatusUp:
   249  		// Do not allow starting new tx broadcast loops after reactor shutdown
   250  		// has been initiated. This can happen after we've manually closed all
   251  		// peer broadcast loops and closed r.closeCh, but the router still sends
   252  		// in-flight peer updates.
   253  		if !r.IsRunning() {
   254  			return
   255  		}
   256  
   257  		if r.config.Broadcast {
   258  			// Check if we've already started a goroutine for this peer, if not we create
   259  			// a new done channel so we can explicitly close the goroutine if the peer
   260  			// is later removed, we increment the waitgroup so the reactor can stop
   261  			// safely, and finally start the goroutine to broadcast txs to that peer.
   262  			_, ok := r.peerRoutines[peerUpdate.NodeID]
   263  			if !ok {
   264  				closer := tmsync.NewCloser()
   265  
   266  				r.peerRoutines[peerUpdate.NodeID] = closer
   267  				r.peerWG.Add(1)
   268  
   269  				r.ids.ReserveForPeer(peerUpdate.NodeID)
   270  
   271  				// start a broadcast routine ensuring all txs are forwarded to the peer
   272  				go r.broadcastTxRoutine(peerUpdate.NodeID, closer)
   273  			}
   274  		}
   275  
   276  	case p2p.PeerStatusDown:
   277  		r.ids.Reclaim(peerUpdate.NodeID)
   278  
   279  		// Check if we've started a tx broadcasting goroutine for this peer.
   280  		// If we have, we signal to terminate the goroutine via the channel's closure.
   281  		// This will internally decrement the peer waitgroup and remove the peer
   282  		// from the map of peer tx broadcasting goroutines.
   283  		closer, ok := r.peerRoutines[peerUpdate.NodeID]
   284  		if ok {
   285  			closer.Close()
   286  		}
   287  	}
   288  }
   289  
   290  // processPeerUpdates initiates a blocking process where we listen for and handle
   291  // PeerUpdate messages. When the reactor is stopped, we will catch the signal and
   292  // close the p2p PeerUpdatesCh gracefully.
   293  func (r *Reactor) processPeerUpdates() {
   294  	defer r.peerUpdates.Close()
   295  
   296  	for {
   297  		select {
   298  		case peerUpdate := <-r.peerUpdates.Updates():
   299  			r.processPeerUpdate(peerUpdate)
   300  
   301  		case <-r.closeCh:
   302  			r.Logger.Debug("stopped listening on peer updates channel; closing...")
   303  			return
   304  		}
   305  	}
   306  }
   307  
   308  func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
   309  	peerMempoolID := r.ids.GetForPeer(peerID)
   310  	var next *clist.CElement
   311  
   312  	// remove the peer ID from the map of routines and mark the waitgroup as done
   313  	defer func() {
   314  		r.mtx.Lock()
   315  		delete(r.peerRoutines, peerID)
   316  		r.mtx.Unlock()
   317  
   318  		r.peerWG.Done()
   319  
   320  		if e := recover(); e != nil {
   321  			r.Logger.Error(
   322  				"recovering from broadcasting mempool loop",
   323  				"err", e,
   324  				"stack", string(debug.Stack()),
   325  			)
   326  		}
   327  	}()
   328  
   329  	for {
   330  		if !r.IsRunning() {
   331  			return
   332  		}
   333  
   334  		// This happens because the CElement we were looking at got garbage
   335  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   336  		// start from the beginning.
   337  		if next == nil {
   338  			select {
   339  			case <-r.mempool.TxsWaitChan(): // wait until a tx is available
   340  				if next = r.mempool.TxsFront(); next == nil {
   341  					continue
   342  				}
   343  
   344  			case <-closer.Done():
   345  				// The peer is marked for removal via a PeerUpdate as the doneCh was
   346  				// explicitly closed to signal we should exit.
   347  				return
   348  
   349  			case <-r.closeCh:
   350  				// The reactor has signaled that we are stopped and thus we should
   351  				// implicitly exit this peer's goroutine.
   352  				return
   353  			}
   354  		}
   355  
   356  		memTx := next.Value.(*mempoolTx)
   357  
   358  		if r.peerMgr != nil {
   359  			height := r.peerMgr.GetHeight(peerID)
   360  			if height > 0 && height < memTx.Height()-1 {
   361  				// allow for a lag of one block
   362  				time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
   363  				continue
   364  			}
   365  		}
   366  
   367  		// NOTE: Transaction batching was disabled due to:
   368  		// https://github.com/number571/tendermint/issues/5796
   369  
   370  		if _, ok := memTx.senders.Load(peerMempoolID); !ok {
   371  			// Send the mempool tx to the corresponding peer. Note, the peer may be
   372  			// behind and thus would not be able to process the mempool tx correctly.
   373  			r.mempoolCh.Out <- p2p.Envelope{
   374  				To: peerID,
   375  				Message: &protomem.Txs{
   376  					Txs: [][]byte{memTx.tx},
   377  				},
   378  			}
   379  			r.Logger.Debug(
   380  				"gossiped tx to peer",
   381  				"tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)),
   382  				"peer", peerID,
   383  			)
   384  		}
   385  
   386  		select {
   387  		case <-next.NextWaitChan():
   388  			// see the start of the for loop for nil check
   389  			next = next.Next()
   390  
   391  		case <-closer.Done():
   392  			// The peer is marked for removal via a PeerUpdate as the doneCh was
   393  			// explicitly closed to signal we should exit.
   394  			return
   395  
   396  		case <-r.closeCh:
   397  			// The reactor has signaled that we are stopped and thus we should
   398  			// implicitly exit this peer's goroutine.
   399  			return
   400  		}
   401  	}
   402  }