github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/libs/tendermint/mempool/reactor.go (about)

     1  package mempool
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"math"
     7  	"reflect"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/ethereum/go-ethereum/common"
    12  
    13  	abci "github.com/fibonacci-chain/fbc/libs/tendermint/abci/types"
    14  	cfg "github.com/fibonacci-chain/fbc/libs/tendermint/config"
    15  	"github.com/fibonacci-chain/fbc/libs/tendermint/libs/clist"
    16  	"github.com/fibonacci-chain/fbc/libs/tendermint/libs/log"
    17  	"github.com/fibonacci-chain/fbc/libs/tendermint/p2p"
    18  	"github.com/fibonacci-chain/fbc/libs/tendermint/types"
    19  	"github.com/tendermint/go-amino"
    20  )
    21  
    22  const (
    23  	MempoolChannel = byte(0x30)
    24  
    25  	aminoOverheadForTxMessage = 8
    26  
    27  	peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
    28  
    29  	// UnknownPeerID is the peer ID to use when running CheckTx when there is
    30  	// no peer (e.g. RPC)
    31  	UnknownPeerID uint16 = 0
    32  
    33  	maxActiveIDs = math.MaxUint16
    34  )
    35  
    36  // Reactor handles mempool tx broadcasting amongst peers.
    37  // It maintains a map from peer ID to counter, to prevent gossiping txs to the
    38  // peers you received it from.
    39  type Reactor struct {
    40  	p2p.BaseReactor
    41  	config           *cfg.MempoolConfig
    42  	mempool          *CListMempool
    43  	ids              *mempoolIDs
    44  	nodeKey          *p2p.NodeKey
    45  	nodeKeyWhitelist map[string]struct{}
    46  	enableWtx        bool
    47  }
    48  
    49  func (memR *Reactor) SetNodeKey(key *p2p.NodeKey) {
    50  	memR.nodeKey = key
    51  }
    52  
    53  type mempoolIDs struct {
    54  	mtx       sync.RWMutex
    55  	peerMap   map[p2p.ID]uint16
    56  	nextID    uint16              // assumes that a node will never have over 65536 active peers
    57  	activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
    58  }
    59  
    60  // Reserve searches for the next unused ID and assigns it to the
    61  // peer.
    62  func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
    63  	ids.mtx.Lock()
    64  	defer ids.mtx.Unlock()
    65  
    66  	curID := ids.nextPeerID()
    67  	ids.peerMap[peer.ID()] = curID
    68  	ids.activeIDs[curID] = struct{}{}
    69  }
    70  
    71  // nextPeerID returns the next unused peer ID to use.
    72  // This assumes that ids's mutex is already locked.
    73  func (ids *mempoolIDs) nextPeerID() uint16 {
    74  	if len(ids.activeIDs) == maxActiveIDs {
    75  		panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
    76  	}
    77  
    78  	_, idExists := ids.activeIDs[ids.nextID]
    79  	for idExists {
    80  		ids.nextID++
    81  		_, idExists = ids.activeIDs[ids.nextID]
    82  	}
    83  	curID := ids.nextID
    84  	ids.nextID++
    85  	return curID
    86  }
    87  
    88  // Reclaim returns the ID reserved for the peer back to unused pool.
    89  func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
    90  	ids.mtx.Lock()
    91  	defer ids.mtx.Unlock()
    92  
    93  	removedID, ok := ids.peerMap[peer.ID()]
    94  	if ok {
    95  		delete(ids.activeIDs, removedID)
    96  		delete(ids.peerMap, peer.ID())
    97  	}
    98  }
    99  
   100  // GetForPeer returns an ID reserved for the peer.
   101  func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
   102  	ids.mtx.RLock()
   103  	defer ids.mtx.RUnlock()
   104  
   105  	return ids.peerMap[peer.ID()]
   106  }
   107  
   108  func newMempoolIDs() *mempoolIDs {
   109  	return &mempoolIDs{
   110  		peerMap:   make(map[p2p.ID]uint16),
   111  		activeIDs: map[uint16]struct{}{0: {}},
   112  		nextID:    1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
   113  	}
   114  }
   115  
   116  // NewReactor returns a new Reactor with the given config and mempool.
   117  func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
   118  	memR := &Reactor{
   119  		config:           config,
   120  		mempool:          mempool,
   121  		ids:              newMempoolIDs(),
   122  		nodeKeyWhitelist: make(map[string]struct{}),
   123  		enableWtx:        cfg.DynamicConfig.GetEnableWtx(),
   124  	}
   125  	for _, nodeKey := range config.GetNodeKeyWhitelist() {
   126  		memR.nodeKeyWhitelist[nodeKey] = struct{}{}
   127  	}
   128  	memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
   129  	memR.press()
   130  	return memR
   131  }
   132  
   133  // InitPeer implements Reactor by creating a state for the peer.
   134  func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
   135  	memR.ids.ReserveForPeer(peer)
   136  	return peer
   137  }
   138  
   139  // SetLogger sets the Logger on the reactor and the underlying mempool.
   140  func (memR *Reactor) SetLogger(l log.Logger) {
   141  	memR.Logger = l
   142  	memR.mempool.SetLogger(l)
   143  }
   144  
   145  // OnStart implements p2p.BaseReactor.
   146  func (memR *Reactor) OnStart() error {
   147  	if !memR.config.Broadcast {
   148  		memR.Logger.Info("Tx broadcasting is disabled")
   149  	}
   150  	return nil
   151  }
   152  
   153  // GetChannels implements Reactor.
   154  // It returns the list of channels for this reactor.
   155  func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
   156  	return []*p2p.ChannelDescriptor{
   157  		{
   158  			ID:       MempoolChannel,
   159  			Priority: 5,
   160  		},
   161  	}
   162  }
   163  
   164  // AddPeer implements Reactor.
   165  // It starts a broadcast routine ensuring all txs are forwarded to the given peer.
   166  func (memR *Reactor) AddPeer(peer p2p.Peer) {
   167  	go memR.broadcastTxRoutine(peer)
   168  }
   169  
   170  // RemovePeer implements Reactor.
   171  func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
   172  	memR.ids.Reclaim(peer)
   173  	// broadcast routine checks if peer is gone and returns
   174  }
   175  
   176  // txMessageDecodePool is a sync.Pool of *TxMessage.
   177  // memR.decodeMsg will call txMessageDeocdePool.Get, and memR.Receive will reset the Msg after use, then call txMessageDeocdePool.Put.
   178  var txMessageDeocdePool = &sync.Pool{
   179  	New: func() interface{} {
   180  		return &TxMessage{}
   181  	},
   182  }
   183  
   184  var logParamsPool = &sync.Pool{
   185  	New: func() interface{} {
   186  		return &[6]interface{}{}
   187  	},
   188  }
   189  
   190  func (memR *Reactor) logReceive(peer p2p.Peer, chID byte, msg Message) {
   191  	logParams := logParamsPool.Get().(*[6]interface{})
   192  
   193  	logParams[0] = "src"
   194  	logParams[1] = peer
   195  	logParams[2] = "chId"
   196  	logParams[3] = chID
   197  	logParams[4] = "msg"
   198  	logParams[5] = msg
   199  
   200  	memR.Logger.Debug("Receive", logParams[:]...)
   201  
   202  	logParamsPool.Put(logParams)
   203  }
   204  
   205  var txIDStringerPool = &sync.Pool{
   206  	New: func() interface{} {
   207  		return &txIDStringer{}
   208  	},
   209  }
   210  
   211  func (memR *Reactor) logCheckTxError(tx []byte, height int64, err error) {
   212  	logParams := logParamsPool.Get().(*[6]interface{})
   213  	txStr := txIDStringerPool.Get().(*txIDStringer)
   214  	txStr.tx = tx
   215  	txStr.height = height
   216  
   217  	logParams[0] = "tx"
   218  	logParams[1] = txStr
   219  	logParams[2] = "err"
   220  	logParams[3] = err
   221  
   222  	memR.Logger.Info("Could not check tx", logParams[:4]...)
   223  
   224  	txIDStringerPool.Put(txStr)
   225  	logParamsPool.Put(logParams)
   226  }
   227  
   228  // Receive implements Reactor.
   229  // It adds any received transactions to the mempool.
   230  func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
   231  	if memR.mempool.config.Sealed {
   232  		return
   233  	}
   234  	msg, err := memR.decodeMsg(msgBytes)
   235  	if err != nil {
   236  		memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
   237  		memR.Switch.StopPeerForError(src, err)
   238  		return
   239  	}
   240  	memR.logReceive(src, chID, msg)
   241  
   242  	txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
   243  	if src != nil {
   244  		txInfo.SenderP2PID = src.ID()
   245  	}
   246  	var tx types.Tx
   247  
   248  	switch msg := msg.(type) {
   249  	case *TxMessage:
   250  		tx = msg.Tx
   251  		if _, isInWhiteList := memR.nodeKeyWhitelist[string(src.ID())]; isInWhiteList && msg.From != "" {
   252  			txInfo.from = msg.From
   253  		}
   254  		*msg = TxMessage{}
   255  		txMessageDeocdePool.Put(msg)
   256  	case *WtxMessage:
   257  		tx = msg.Wtx.Payload
   258  		if err := msg.Wtx.verify(memR.nodeKeyWhitelist); err != nil {
   259  			memR.Logger.Error("wtx.verify", "error", err, "txhash",
   260  				common.BytesToHash(types.Tx(msg.Wtx.Payload).Hash(memR.mempool.Height())),
   261  			)
   262  		} else {
   263  			txInfo.wtx = msg.Wtx
   264  			txInfo.checkType = abci.CheckTxType_WrappedCheck
   265  		}
   266  		// broadcasting happens from go routines per peer
   267  	default:
   268  		memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
   269  		return
   270  	}
   271  
   272  	err = memR.mempool.CheckTx(tx, nil, txInfo)
   273  	if err != nil {
   274  		memR.logCheckTxError(tx, memR.mempool.height, err)
   275  	}
   276  }
   277  
   278  // PeerState describes the state of a peer.
   279  type PeerState interface {
   280  	GetHeight() int64
   281  }
   282  
   283  // Send new mempool txs to peer.
   284  func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
   285  	if !memR.config.Broadcast {
   286  		return
   287  	}
   288  	_, isInWhiteList := memR.nodeKeyWhitelist[string(peer.ID())]
   289  
   290  	peerID := memR.ids.GetForPeer(peer)
   291  	var next *clist.CElement
   292  	for {
   293  		// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
   294  		if !memR.IsRunning() || !peer.IsRunning() {
   295  			return
   296  		}
   297  		// This happens because the CElement we were looking at got garbage
   298  		// collected (removed). That is, .NextWait() returned nil. Go ahead and
   299  		// start from the beginning.
   300  		if next == nil {
   301  			select {
   302  			case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
   303  				if next = memR.mempool.BroadcastTxsFront(); next == nil {
   304  					continue
   305  				}
   306  			case <-peer.Quit():
   307  				return
   308  			case <-memR.Quit():
   309  				return
   310  			}
   311  		}
   312  
   313  		memTx := next.Value.(*mempoolTx)
   314  
   315  		// make sure the peer is up to date
   316  		peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
   317  		if !ok {
   318  			// Peer does not have a state yet. We set it in the consensus reactor, but
   319  			// when we add peer in Switch, the order we call reactors#AddPeer is
   320  			// different every time due to us using a map. Sometimes other reactors
   321  			// will be initialized before the consensus reactor. We should wait a few
   322  			// milliseconds and retry.
   323  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   324  			continue
   325  		}
   326  		if peerState.GetHeight() < memTx.Height()-1 { // Allow for a lag of 1 block
   327  			time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   328  			continue
   329  		}
   330  
   331  		// ensure peer hasn't already sent us this tx
   332  		memTx.senderMtx.RLock()
   333  		_, ok = memTx.senders[peerID]
   334  		memTx.senderMtx.RUnlock()
   335  		if !ok {
   336  			var getFromPool bool
   337  			// send memTx
   338  			var msg Message
   339  			if memTx.nodeKey != nil && memTx.signature != nil {
   340  				msg = &WtxMessage{
   341  					Wtx: &WrappedTx{
   342  						Payload:   memTx.tx,
   343  						From:      memTx.from,
   344  						Signature: memTx.signature,
   345  						NodeKey:   memTx.nodeKey,
   346  					},
   347  				}
   348  			} else if memR.enableWtx {
   349  				if wtx, err := memR.wrapTx(memTx.tx, memTx.from); err == nil {
   350  					msg = &WtxMessage{
   351  						Wtx: wtx,
   352  					}
   353  				}
   354  			} else {
   355  				txMsg := txMessageDeocdePool.Get().(*TxMessage)
   356  				txMsg.Tx = memTx.tx
   357  				if isInWhiteList {
   358  					txMsg.From = memTx.from
   359  				} else {
   360  					txMsg.From = ""
   361  				}
   362  				msg = txMsg
   363  				getFromPool = true
   364  			}
   365  
   366  			msgBz := memR.encodeMsg(msg)
   367  			if getFromPool {
   368  				getFromPool = false
   369  				txMessageDeocdePool.Put(msg)
   370  			}
   371  
   372  			success := peer.Send(MempoolChannel, msgBz)
   373  			if !success {
   374  				time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
   375  				continue
   376  			}
   377  		}
   378  
   379  		select {
   380  		case <-next.NextWaitChan():
   381  			// see the start of the for loop for nil check
   382  			next = next.Next()
   383  		case <-peer.Quit():
   384  			return
   385  		case <-memR.Quit():
   386  			return
   387  		}
   388  	}
   389  }
   390  
   391  //-----------------------------------------------------------------------------
   392  // Messages
   393  
   394  // Message is a message sent or received by the Reactor.
   395  type Message interface{}
   396  
   397  func RegisterMessages(cdc *amino.Codec) {
   398  	cdc.RegisterInterface((*Message)(nil), nil)
   399  	cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
   400  	cdc.RegisterConcrete(&WtxMessage{}, "tendermint/mempool/WtxMessage", nil)
   401  
   402  	cdc.RegisterConcreteMarshaller("tendermint/mempool/TxMessage", func(codec *amino.Codec, i interface{}) ([]byte, error) {
   403  		txmp, ok := i.(*TxMessage)
   404  		if ok {
   405  			return txmp.MarshalToAmino(codec)
   406  		}
   407  		txm, ok := i.(TxMessage)
   408  		if ok {
   409  			return txm.MarshalToAmino(codec)
   410  		}
   411  		return nil, fmt.Errorf("%T is not a TxMessage", i)
   412  	})
   413  	cdc.RegisterConcreteUnmarshaller("tendermint/mempool/TxMessage", func(cdc *amino.Codec, bz []byte) (interface{}, int, error) {
   414  		m := &TxMessage{}
   415  		err := m.UnmarshalFromAmino(cdc, bz)
   416  		if err != nil {
   417  			return nil, 0, err
   418  		}
   419  		return m, len(bz), nil
   420  	})
   421  }
   422  
   423  // decodeMsg decodes the bz bytes into a Message,
   424  // if err is nil and Message is a TxMessage, you must put Message to txMessageDeocdePool after use.
   425  func (memR *Reactor) decodeMsg(bz []byte) (Message, error) {
   426  	maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
   427  	l := len(bz)
   428  	if l > maxMsgSize {
   429  		return nil, ErrTxTooLarge{maxMsgSize, l}
   430  	}
   431  
   432  	tp := getTxMessageAminoTypePrefix()
   433  	if l >= len(tp) && bytes.Equal(bz[:len(tp)], tp) {
   434  		txmsg := txMessageDeocdePool.Get().(*TxMessage)
   435  		err := txmsg.UnmarshalFromAmino(cdc, bz[len(tp):])
   436  		if err == nil {
   437  			return txmsg, nil
   438  		}
   439  		txmsg.Tx = nil
   440  		txMessageDeocdePool.Put(txmsg)
   441  	}
   442  	var msg Message
   443  	err := cdc.UnmarshalBinaryBare(bz, &msg)
   444  	return msg, err
   445  }
   446  
   447  func (memR *Reactor) encodeMsg(msg Message) []byte {
   448  	var ok bool
   449  	var txmp *TxMessage
   450  	var txm TxMessage
   451  	if txmp, ok = msg.(*TxMessage); !ok {
   452  		txmp = nil
   453  		if txm, ok = msg.(TxMessage); ok {
   454  			txmp = &txm
   455  		}
   456  	}
   457  	if txmp != nil {
   458  		buf := &bytes.Buffer{}
   459  		tp := getTxMessageAminoTypePrefix()
   460  		buf.Grow(len(tp) + txmp.AminoSize(cdc))
   461  		// we manually assemble the encoded bytes for performance
   462  		buf.Write(tp)
   463  		err := txmp.MarshalAminoTo(cdc, buf)
   464  		if err == nil {
   465  			return buf.Bytes()
   466  		}
   467  	}
   468  	return cdc.MustMarshalBinaryBare(msg)
   469  }
   470  
   471  //-------------------------------------
   472  
   473  // TxMessage is a Message containing a transaction.
   474  type TxMessage struct {
   475  	Tx   types.Tx
   476  	From string
   477  }
   478  
   479  func (m TxMessage) AminoSize(_ *amino.Codec) int {
   480  	size := 0
   481  	if len(m.Tx) > 0 {
   482  		size += 1 + amino.ByteSliceSize(m.Tx)
   483  	}
   484  	if m.From != "" {
   485  		size += 1 + amino.EncodedStringSize(m.From)
   486  	}
   487  	return size
   488  }
   489  
   490  func (m TxMessage) MarshalToAmino(cdc *amino.Codec) ([]byte, error) {
   491  	buf := new(bytes.Buffer)
   492  	buf.Grow(m.AminoSize(cdc))
   493  	err := m.MarshalAminoTo(cdc, buf)
   494  	if err != nil {
   495  		return nil, err
   496  	}
   497  	return buf.Bytes(), nil
   498  }
   499  
   500  func (m TxMessage) MarshalAminoTo(_ *amino.Codec, buf *bytes.Buffer) error {
   501  	if len(m.Tx) != 0 {
   502  		const pbKey = byte(1<<3 | amino.Typ3_ByteLength)
   503  		err := amino.EncodeByteSliceWithKeyToBuffer(buf, m.Tx, pbKey)
   504  		if err != nil {
   505  			return err
   506  		}
   507  	}
   508  	if m.From != "" {
   509  		const pbKey = byte(2<<3 | amino.Typ3_ByteLength)
   510  		err := amino.EncodeStringWithKeyToBuffer(buf, m.From, pbKey)
   511  		if err != nil {
   512  			return err
   513  		}
   514  	}
   515  	return nil
   516  }
   517  
   518  func (m *TxMessage) UnmarshalFromAmino(_ *amino.Codec, data []byte) error {
   519  	const fieldCount = 2
   520  	var currentField int
   521  	var currentType amino.Typ3
   522  	var err error
   523  
   524  	for cur := 1; cur <= fieldCount; cur++ {
   525  		if len(data) != 0 && (currentField == 0 || currentField < cur) {
   526  			var nextField int
   527  			if nextField, currentType, err = amino.ParseProtoPosAndTypeMustOneByte(data[0]); err != nil {
   528  				return err
   529  			}
   530  			if nextField < currentField {
   531  				return fmt.Errorf("next field should greater than %d, got %d", currentField, nextField)
   532  			} else {
   533  				currentField = nextField
   534  			}
   535  		}
   536  		if len(data) == 0 || currentField != cur {
   537  			switch cur {
   538  			case 1:
   539  				m.Tx = nil
   540  			case 2:
   541  				m.From = ""
   542  			default:
   543  				return fmt.Errorf("unexpect feild num %d", cur)
   544  			}
   545  		} else {
   546  			pbk := data[0]
   547  			data = data[1:]
   548  			var subData []byte
   549  			if currentType == amino.Typ3_ByteLength {
   550  				if subData, err = amino.DecodeByteSliceWithoutCopy(&data); err != nil {
   551  					return err
   552  				}
   553  			}
   554  			switch pbk {
   555  			case 1<<3 | byte(amino.Typ3_ByteLength):
   556  				if len(subData) == 0 {
   557  					m.Tx = nil
   558  				} else {
   559  					m.Tx = make([]byte, len(subData))
   560  					copy(m.Tx, subData)
   561  				}
   562  			case 2<<3 | byte(amino.Typ3_ByteLength):
   563  				m.From = string(subData)
   564  			default:
   565  				return fmt.Errorf("unexpect pb key %d", pbk)
   566  			}
   567  		}
   568  	}
   569  	if len(data) != 0 {
   570  		return fmt.Errorf("unexpect data remain %X", data)
   571  	}
   572  	return nil
   573  }
   574  
   575  // String returns a string representation of the TxMessage.
   576  func (m *TxMessage) String() string {
   577  	return fmt.Sprintf("[TxMessage %v]", m.Tx)
   578  }
   579  
   580  // calcMaxMsgSize returns the max size of TxMessage
   581  // account for amino overhead of TxMessage
   582  func calcMaxMsgSize(maxTxSize int) int {
   583  	return maxTxSize + aminoOverheadForTxMessage
   584  }
   585  
   586  // WtxMessage is a Message containing a transaction.
   587  type WtxMessage struct {
   588  	Wtx *WrappedTx
   589  }
   590  
   591  // String returns a string representation of the WtxMessage.
   592  func (m *WtxMessage) String() string {
   593  	return fmt.Sprintf("[WtxMessage %v]", m.Wtx)
   594  }
   595  
   596  type WrappedTx struct {
   597  	Payload   []byte `json:"payload"`   // std tx or evm tx
   598  	From      string `json:"from"`      // from address of evm tx or ""
   599  	Signature []byte `json:"signature"` // signature for payload
   600  	NodeKey   []byte `json:"nodeKey"`   // pub key of the node who signs the tx
   601  }
   602  
   603  func (wtx *WrappedTx) GetPayload() []byte {
   604  	if wtx != nil {
   605  		return wtx.Payload
   606  	}
   607  	return nil
   608  }
   609  
   610  func (wtx *WrappedTx) GetSignature() []byte {
   611  	if wtx != nil {
   612  		return wtx.Signature
   613  	}
   614  	return nil
   615  }
   616  
   617  func (wtx *WrappedTx) GetNodeKey() []byte {
   618  	if wtx != nil {
   619  		return wtx.NodeKey
   620  	}
   621  	return nil
   622  }
   623  
   624  func (wtx *WrappedTx) GetFrom() string {
   625  	if wtx != nil {
   626  		return wtx.From
   627  	}
   628  	return ""
   629  }
   630  
   631  func (w *WrappedTx) verify(whitelist map[string]struct{}) error {
   632  	pub := p2p.BytesToPubKey(w.NodeKey)
   633  	if _, ok := whitelist[string(p2p.PubKeyToID(pub))]; !ok {
   634  		return fmt.Errorf("node key [%s] not in whitelist", p2p.PubKeyToID(pub))
   635  	}
   636  	if !pub.VerifyBytes(append(w.Payload, w.From...), w.Signature) {
   637  		return fmt.Errorf("invalid signature of wtx")
   638  	}
   639  	return nil
   640  }
   641  
   642  func (memR *Reactor) wrapTx(tx types.Tx, from string) (*WrappedTx, error) {
   643  	wtx := &WrappedTx{
   644  		Payload: tx,
   645  		From:    from,
   646  		NodeKey: memR.nodeKey.PubKey().Bytes(),
   647  	}
   648  	sig, err := memR.nodeKey.PrivKey.Sign(append(wtx.Payload, from...))
   649  	if err != nil {
   650  		return nil, err
   651  	}
   652  	wtx.Signature = sig
   653  	return wtx, nil
   654  }