github.com/MagHErmit/tendermint@v0.282.1/mempool/v0/reactor.go (about) 1 package v0 2 3 import ( 4 "errors" 5 "fmt" 6 "time" 7 8 cfg "github.com/MagHErmit/tendermint/config" 9 "github.com/MagHErmit/tendermint/libs/clist" 10 "github.com/MagHErmit/tendermint/libs/log" 11 tmsync "github.com/MagHErmit/tendermint/libs/sync" 12 "github.com/MagHErmit/tendermint/mempool" 13 "github.com/MagHErmit/tendermint/p2p" 14 protomem "github.com/MagHErmit/tendermint/proto/tendermint/mempool" 15 "github.com/MagHErmit/tendermint/types" 16 ) 17 18 // Reactor handles mempool tx broadcasting amongst peers. 19 // It maintains a map from peer ID to counter, to prevent gossiping txs to the 20 // peers you received it from. 21 type Reactor struct { 22 p2p.BaseReactor 23 config *cfg.MempoolConfig 24 mempool *CListMempool 25 ids *mempoolIDs 26 } 27 28 type mempoolIDs struct { 29 mtx tmsync.RWMutex 30 peerMap map[p2p.ID]uint16 31 nextID uint16 // assumes that a node will never have over 65536 active peers 32 activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter 33 } 34 35 // Reserve searches for the next unused ID and assigns it to the 36 // peer. 37 func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { 38 ids.mtx.Lock() 39 defer ids.mtx.Unlock() 40 41 curID := ids.nextPeerID() 42 ids.peerMap[peer.ID()] = curID 43 ids.activeIDs[curID] = struct{}{} 44 } 45 46 // nextPeerID returns the next unused peer ID to use. 47 // This assumes that ids's mutex is already locked. 48 func (ids *mempoolIDs) nextPeerID() uint16 { 49 if len(ids.activeIDs) == mempool.MaxActiveIDs { 50 panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs)) 51 } 52 53 _, idExists := ids.activeIDs[ids.nextID] 54 for idExists { 55 ids.nextID++ 56 _, idExists = ids.activeIDs[ids.nextID] 57 } 58 curID := ids.nextID 59 ids.nextID++ 60 return curID 61 } 62 63 // Reclaim returns the ID reserved for the peer back to unused pool. 64 func (ids *mempoolIDs) Reclaim(peer p2p.Peer) { 65 ids.mtx.Lock() 66 defer ids.mtx.Unlock() 67 68 removedID, ok := ids.peerMap[peer.ID()] 69 if ok { 70 delete(ids.activeIDs, removedID) 71 delete(ids.peerMap, peer.ID()) 72 } 73 } 74 75 // GetForPeer returns an ID reserved for the peer. 76 func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 { 77 ids.mtx.RLock() 78 defer ids.mtx.RUnlock() 79 80 return ids.peerMap[peer.ID()] 81 } 82 83 func newMempoolIDs() *mempoolIDs { 84 return &mempoolIDs{ 85 peerMap: make(map[p2p.ID]uint16), 86 activeIDs: map[uint16]struct{}{0: {}}, 87 nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx 88 } 89 } 90 91 // NewReactor returns a new Reactor with the given config and mempool. 92 func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { 93 memR := &Reactor{ 94 config: config, 95 mempool: mempool, 96 ids: newMempoolIDs(), 97 } 98 memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) 99 return memR 100 } 101 102 // InitPeer implements Reactor by creating a state for the peer. 103 func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { 104 memR.ids.ReserveForPeer(peer) 105 return peer 106 } 107 108 // SetLogger sets the Logger on the reactor and the underlying mempool. 109 func (memR *Reactor) SetLogger(l log.Logger) { 110 memR.Logger = l 111 memR.mempool.SetLogger(l) 112 } 113 114 // OnStart implements p2p.BaseReactor. 115 func (memR *Reactor) OnStart() error { 116 if !memR.config.Broadcast { 117 memR.Logger.Info("Tx broadcasting is disabled") 118 } 119 return nil 120 } 121 122 // GetChannels implements Reactor by returning the list of channels for this 123 // reactor. 124 func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { 125 largestTx := make([]byte, memR.config.MaxTxBytes) 126 batchMsg := protomem.Message{ 127 Sum: &protomem.Message_Txs{ 128 Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, 129 }, 130 } 131 132 return []*p2p.ChannelDescriptor{ 133 { 134 ID: mempool.MempoolChannel, 135 Priority: 5, 136 RecvMessageCapacity: batchMsg.Size(), 137 }, 138 } 139 } 140 141 // AddPeer implements Reactor. 142 // It starts a broadcast routine ensuring all txs are forwarded to the given peer. 143 func (memR *Reactor) AddPeer(peer p2p.Peer) { 144 if memR.config.Broadcast { 145 go memR.broadcastTxRoutine(peer) 146 } 147 } 148 149 // RemovePeer implements Reactor. 150 func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { 151 memR.ids.Reclaim(peer) 152 // broadcast routine checks if peer is gone and returns 153 } 154 155 // Receive implements Reactor. 156 // It adds any received transactions to the mempool. 157 func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 158 msg, err := memR.decodeMsg(msgBytes) 159 if err != nil { 160 memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) 161 memR.Switch.StopPeerForError(src, err) 162 return 163 } 164 memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) 165 166 txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)} 167 if src != nil { 168 txInfo.SenderP2PID = src.ID() 169 } 170 171 for _, tx := range msg.Txs { 172 err = memR.mempool.CheckTx(tx, nil, txInfo) 173 if errors.Is(err, mempool.ErrTxInCache) { 174 memR.Logger.Debug("Tx already exists in cache", "tx", tx.String()) 175 } else if err != nil { 176 memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err) 177 } 178 } 179 180 // broadcasting happens from go routines per peer 181 } 182 183 // PeerState describes the state of a peer. 184 type PeerState interface { 185 GetHeight() int64 186 } 187 188 // Send new mempool txs to peer. 189 func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { 190 peerID := memR.ids.GetForPeer(peer) 191 var next *clist.CElement 192 193 for { 194 // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time 195 if !memR.IsRunning() || !peer.IsRunning() { 196 return 197 } 198 // This happens because the CElement we were looking at got garbage 199 // collected (removed). That is, .NextWait() returned nil. Go ahead and 200 // start from the beginning. 201 if next == nil { 202 select { 203 case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available 204 if next = memR.mempool.TxsFront(); next == nil { 205 continue 206 } 207 case <-peer.Quit(): 208 return 209 case <-memR.Quit(): 210 return 211 } 212 } 213 214 // Make sure the peer is up to date. 215 peerState, ok := peer.Get(types.PeerStateKey).(PeerState) 216 if !ok { 217 // Peer does not have a state yet. We set it in the consensus reactor, but 218 // when we add peer in Switch, the order we call reactors#AddPeer is 219 // different every time due to us using a map. Sometimes other reactors 220 // will be initialized before the consensus reactor. We should wait a few 221 // milliseconds and retry. 222 time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) 223 continue 224 } 225 226 // Allow for a lag of 1 block. 227 memTx := next.Value.(*mempoolTx) 228 if peerState.GetHeight() < memTx.Height()-1 { 229 time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) 230 continue 231 } 232 233 // NOTE: Transaction batching was disabled due to 234 // https://github.com/MagHErmit/tendermint/issues/5796 235 236 if _, ok := memTx.senders.Load(peerID); !ok { 237 msg := protomem.Message{ 238 Sum: &protomem.Message_Txs{ 239 Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, 240 }, 241 } 242 243 bz, err := msg.Marshal() 244 if err != nil { 245 panic(err) 246 } 247 248 success := peer.Send(mempool.MempoolChannel, bz) 249 if !success { 250 time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) 251 continue 252 } 253 } 254 255 select { 256 case <-next.NextWaitChan(): 257 // see the start of the for loop for nil check 258 next = next.Next() 259 case <-peer.Quit(): 260 return 261 case <-memR.Quit(): 262 return 263 } 264 } 265 } 266 267 func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { 268 msg := protomem.Message{} 269 err := msg.Unmarshal(bz) 270 if err != nil { 271 return TxsMessage{}, err 272 } 273 274 var message TxsMessage 275 276 if i, ok := msg.Sum.(*protomem.Message_Txs); ok { 277 txs := i.Txs.GetTxs() 278 279 if len(txs) == 0 { 280 return message, errors.New("empty TxsMessage") 281 } 282 283 decoded := make([]types.Tx, len(txs)) 284 for j, tx := range txs { 285 decoded[j] = types.Tx(tx) 286 } 287 288 message = TxsMessage{ 289 Txs: decoded, 290 } 291 return message, nil 292 } 293 return message, fmt.Errorf("msg type: %T is not supported", msg) 294 } 295 296 // TxsMessage is a Message containing transactions. 297 type TxsMessage struct { 298 Txs []types.Tx 299 } 300 301 // String returns a string representation of the TxsMessage. 302 func (m *TxsMessage) String() string { 303 return fmt.Sprintf("[TxsMessage %v]", m.Txs) 304 }