github.com/evdatsion/aphelion-dpos-bft@v0.32.1/mempool/reactor.go (about)

     1  package mempool
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"reflect"
     7  	"sync"
     8  	"time"
     9  
    10  	amino "github.com/evdatsion/go-amino"
    11  
    12  	cfg "github.com/evdatsion/aphelion-dpos-bft/config"
    13  	"github.com/evdatsion/aphelion-dpos-bft/libs/clist"
    14  	"github.com/evdatsion/aphelion-dpos-bft/libs/log"
    15  	"github.com/evdatsion/aphelion-dpos-bft/p2p"
    16  	"github.com/evdatsion/aphelion-dpos-bft/types"
    17  )
    18  
    19  const (
    20  	MempoolChannel = byte(0x30)
    21  
    22  	maxMsgSize = 1048576        // 1MB TODO make it configurable
    23  	maxTxSize  = maxMsgSize - 8 // account for amino overhead of TxMessage
    24  
    25  	peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
    26  
    27  	// UnknownPeerID is the peer ID to use when running CheckTx when there is
    28  	// no peer (e.g. RPC)
    29  	UnknownPeerID uint16 = 0
    30  
    31  	maxActiveIDs = math.MaxUint16
    32  )
    33  
    34  // Reactor handles mempool tx broadcasting amongst peers.
    35  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    36  // peers you received it from.
    37  type Reactor struct {
    38  	p2p.BaseReactor
    39  	config  *cfg.MempoolConfig
    40  	mempool *CListMempool
    41  	ids     *mempoolIDs
    42  }
    43  
    44  type mempoolIDs struct {
    45  	mtx       sync.RWMutex
    46  	peerMap   map[p2p.ID]uint16
    47  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    48  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    49  }
    50  
    51  // Reserve searches for the next unused ID and assignes it to the
    52  // peer.
    53  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    54  	ids.mtx.Lock()
    55  	defer ids.mtx.Unlock()
    56  
    57  	curID := ids.nextPeerID()
    58  	ids.peerMap[peer.ID()] = curID
    59  	ids.activeIDs[curID] = struct{}{}
    60  }
    61  
    62  // nextPeerID returns the next unused peer ID to use.
    63  // This assumes that ids's mutex is already locked.
    64  func (ids *mempoolIDs) nextPeerID() uint16 {
    65  	if len(ids.activeIDs) == maxActiveIDs {
    66  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
    67  	}
    68  
    69  	_, idExists := ids.activeIDs[ids.nextID]
    70  	for idExists {
    71  		ids.nextID++
    72  		_, idExists = ids.activeIDs[ids.nextID]
    73  	}
    74  	curID := ids.nextID
    75  	ids.nextID++
    76  	return curID
    77  }
    78  
    79  // Reclaim returns the ID reserved for the peer back to unused pool.
    80  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    81  	ids.mtx.Lock()
    82  	defer ids.mtx.Unlock()
    83  
    84  	removedID, ok := ids.peerMap[peer.ID()]
    85  	if ok {
    86  		delete(ids.activeIDs, removedID)
    87  		delete(ids.peerMap, peer.ID())
    88  	}
    89  }
    90  
    91  // GetForPeer returns an ID reserved for the peer.
    92  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
    93  	ids.mtx.RLock()
    94  	defer ids.mtx.RUnlock()
    95  
    96  	return ids.peerMap[peer.ID()]
    97  }
    98  
    99  func newMempoolIDs() *mempoolIDs {
   100  	return &mempoolIDs{
   101  		peerMap:   make(map[p2p.ID]uint16),
   102  		activeIDs: map[uint16]struct{}{0: {}},
   103  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
   104  	}
   105  }
   106  
   107  // NewReactor returns a new Reactor with the given config and mempool.
   108  func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
   109  	memR := &Reactor{
   110  		config:  config,
   111  		mempool: mempool,
   112  		ids:     newMempoolIDs(),
   113  	}
   114  	memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR)
   115  	return memR
   116  }
   117  
   118  // SetLogger sets the Logger on the reactor and the underlying mempool.
   119  func (memR *Reactor) SetLogger(l log.Logger) {
   120  	memR.Logger = l
   121  	memR.mempool.SetLogger(l)
   122  }
   123  
   124  // OnStart implements p2p.BaseReactor.
   125  func (memR *Reactor) OnStart() error {
   126  	if !memR.config.Broadcast {
   127  		memR.Logger.Info("Tx broadcasting is disabled")
   128  	}
   129  	return nil
   130  }
   131  
   132  // GetChannels implements Reactor.
   133  // It returns the list of channels for this reactor.
   134  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   135  	return []*p2p.ChannelDescriptor{
   136  		{
   137  			ID:       MempoolChannel,
   138  			Priority: 5,
   139  		},
   140  	}
   141  }
   142  
   143  // AddPeer implements Reactor.
   144  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   145  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   146  	memR.ids.ReserveForPeer(peer)
   147  	go memR.broadcastTxRoutine(peer)
   148  }
   149  
   150  // RemovePeer implements Reactor.
   151  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   152  	memR.ids.Reclaim(peer)
   153  	// broadcast routine checks if peer is gone and returns
   154  }
   155  
   156  // Receive implements Reactor.
   157  // It adds any received transactions to the mempool.
   158  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   159  	msg, err := decodeMsg(msgBytes)
   160  	if err != nil {
   161  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
   162  		memR.Switch.StopPeerForError(src, err)
   163  		return
   164  	}
   165  	memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
   166  
   167  	switch msg := msg.(type) {
   168  	case *TxMessage:
   169  		peerID := memR.ids.GetForPeer(src)
   170  		err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID})
   171  		if err != nil {
   172  			memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err)
   173  		}
   174  		// broadcasting happens from go routines per peer
   175  	default:
   176  		memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   177  	}
   178  }
   179  
   180  // PeerState describes the state of a peer.
   181  type PeerState interface {
   182  	GetHeight() int64
   183  }
   184  
   185  // Send new mempool txs to peer.
   186  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   187  	if !memR.config.Broadcast {
   188  		return
   189  	}
   190  
   191  	peerID := memR.ids.GetForPeer(peer)
   192  	var next *clist.CElement
   193  	for {
   194  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   195  		if !memR.IsRunning() || !peer.IsRunning() {
   196  			return
   197  		}
   198  		// This happens because the CElement we were looking at got garbage
   199  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   200  		// start from the beginning.
   201  		if next == nil {
   202  			select {
   203  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   204  				if next = memR.mempool.TxsFront(); next == nil {
   205  					continue
   206  				}
   207  			case <-peer.Quit():
   208  				return
   209  			case <-memR.Quit():
   210  				return
   211  			}
   212  		}
   213  
   214  		memTx := next.Value.(*mempoolTx)
   215  
   216  		// make sure the peer is up to date
   217  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   218  		if !ok {
   219  			// Peer does not have a state yet. We set it in the consensus reactor, but
   220  			// when we add peer in Switch, the order we call reactors#AddPeer is
   221  			// different every time due to us using a map. Sometimes other reactors
   222  			// will be initialized before the consensus reactor. We should wait a few
   223  			// milliseconds and retry.
   224  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   225  			continue
   226  		}
   227  		if peerState.GetHeight() < memTx.Height()-1 { // Allow for a lag of 1 block
   228  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   229  			continue
   230  		}
   231  
   232  		// ensure peer hasn't already sent us this tx
   233  		if _, ok := memTx.senders.Load(peerID); !ok {
   234  			// send memTx
   235  			msg := &TxMessage{Tx: memTx.tx}
   236  			success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
   237  			if !success {
   238  				time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   239  				continue
   240  			}
   241  		}
   242  
   243  		select {
   244  		case <-next.NextWaitChan():
   245  			// see the start of the for loop for nil check
   246  			next = next.Next()
   247  		case <-peer.Quit():
   248  			return
   249  		case <-memR.Quit():
   250  			return
   251  		}
   252  	}
   253  }
   254  
   255  //-----------------------------------------------------------------------------
   256  // Messages
   257  
   258  // MempoolMessage is a message sent or received by the Reactor.
   259  type MempoolMessage interface{}
   260  
   261  func RegisterMempoolMessages(cdc *amino.Codec) {
   262  	cdc.RegisterInterface((*MempoolMessage)(nil), nil)
   263  	cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
   264  }
   265  
   266  func decodeMsg(bz []byte) (msg MempoolMessage, err error) {
   267  	if len(bz) > maxMsgSize {
   268  		return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
   269  	}
   270  	err = cdc.UnmarshalBinaryBare(bz, &msg)
   271  	return
   272  }
   273  
   274  //-------------------------------------
   275  
   276  // TxMessage is a MempoolMessage containing a transaction.
   277  type TxMessage struct {
   278  	Tx types.Tx
   279  }
   280  
   281  // String returns a string representation of the TxMessage.
   282  func (m *TxMessage) String() string {
   283  	return fmt.Sprintf("[TxMessage %v]", m.Tx)
   284  }