github.com/mydexchain/tendermint@v0.0.4/mempool/reactor.go (about) 1 package mempool 2 3 import ( 4 "errors" 5 "fmt" 6 "math" 7 "time" 8 9 cfg "github.com/mydexchain/tendermint/config" 10 "github.com/mydexchain/tendermint/libs/clist" 11 "github.com/mydexchain/tendermint/libs/log" 12 tmsync "github.com/mydexchain/tendermint/libs/sync" 13 "github.com/mydexchain/tendermint/p2p" 14 protomem "github.com/mydexchain/tendermint/proto/tendermint/mempool" 15 "github.com/mydexchain/tendermint/types" 16 ) 17 18 const ( 19 MempoolChannel = byte(0x30) 20 21 peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount 22 23 // UnknownPeerID is the peer ID to use when running CheckTx when there is 24 // no peer (e.g. RPC) 25 UnknownPeerID uint16 = 0 26 27 maxActiveIDs = math.MaxUint16 28 ) 29 30 // Reactor handles mempool tx broadcasting amongst peers. 31 // It maintains a map from peer ID to counter, to prevent gossiping txs to the 32 // peers you received it from. 33 type Reactor struct { 34 p2p.BaseReactor 35 config *cfg.MempoolConfig 36 mempool *CListMempool 37 ids *mempoolIDs 38 } 39 40 type mempoolIDs struct { 41 mtx tmsync.RWMutex 42 peerMap map[p2p.ID]uint16 43 nextID uint16 // assumes that a node will never have over 65536 active peers 44 activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter 45 } 46 47 // Reserve searches for the next unused ID and assigns it to the 48 // peer. 49 func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { 50 ids.mtx.Lock() 51 defer ids.mtx.Unlock() 52 53 curID := ids.nextPeerID() 54 ids.peerMap[peer.ID()] = curID 55 ids.activeIDs[curID] = struct{}{} 56 } 57 58 // nextPeerID returns the next unused peer ID to use. 59 // This assumes that ids's mutex is already locked. 60 func (ids *mempoolIDs) nextPeerID() uint16 { 61 if len(ids.activeIDs) == maxActiveIDs { 62 panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) 63 } 64 65 _, idExists := ids.activeIDs[ids.nextID] 66 for idExists { 67 ids.nextID++ 68 _, idExists = ids.activeIDs[ids.nextID] 69 } 70 curID := ids.nextID 71 ids.nextID++ 72 return curID 73 } 74 75 // Reclaim returns the ID reserved for the peer back to unused pool. 76 func (ids *mempoolIDs) Reclaim(peer p2p.Peer) { 77 ids.mtx.Lock() 78 defer ids.mtx.Unlock() 79 80 removedID, ok := ids.peerMap[peer.ID()] 81 if ok { 82 delete(ids.activeIDs, removedID) 83 delete(ids.peerMap, peer.ID()) 84 } 85 } 86 87 // GetForPeer returns an ID reserved for the peer. 88 func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 { 89 ids.mtx.RLock() 90 defer ids.mtx.RUnlock() 91 92 return ids.peerMap[peer.ID()] 93 } 94 95 func newMempoolIDs() *mempoolIDs { 96 return &mempoolIDs{ 97 peerMap: make(map[p2p.ID]uint16), 98 activeIDs: map[uint16]struct{}{0: {}}, 99 nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx 100 } 101 } 102 103 // NewReactor returns a new Reactor with the given config and mempool. 104 func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { 105 memR := &Reactor{ 106 config: config, 107 mempool: mempool, 108 ids: newMempoolIDs(), 109 } 110 memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) 111 return memR 112 } 113 114 // InitPeer implements Reactor by creating a state for the peer. 115 func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { 116 memR.ids.ReserveForPeer(peer) 117 return peer 118 } 119 120 // SetLogger sets the Logger on the reactor and the underlying mempool. 121 func (memR *Reactor) SetLogger(l log.Logger) { 122 memR.Logger = l 123 memR.mempool.SetLogger(l) 124 } 125 126 // OnStart implements p2p.BaseReactor. 127 func (memR *Reactor) OnStart() error { 128 if !memR.config.Broadcast { 129 memR.Logger.Info("Tx broadcasting is disabled") 130 } 131 return nil 132 } 133 134 // GetChannels implements Reactor by returning the list of channels for this 135 // reactor. 136 func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { 137 largestTx := make([]byte, memR.config.MaxTxBytes) 138 batchMsg := protomem.Message{ 139 Sum: &protomem.Message_Txs{ 140 Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, 141 }, 142 } 143 144 return []*p2p.ChannelDescriptor{ 145 { 146 ID: MempoolChannel, 147 Priority: 5, 148 RecvMessageCapacity: batchMsg.Size(), 149 }, 150 } 151 } 152 153 // AddPeer implements Reactor. 154 // It starts a broadcast routine ensuring all txs are forwarded to the given peer. 155 func (memR *Reactor) AddPeer(peer p2p.Peer) { 156 if memR.config.Broadcast { 157 go memR.broadcastTxRoutine(peer) 158 } 159 } 160 161 // RemovePeer implements Reactor. 162 func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { 163 memR.ids.Reclaim(peer) 164 // broadcast routine checks if peer is gone and returns 165 } 166 167 // Receive implements Reactor. 168 // It adds any received transactions to the mempool. 169 func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { 170 msg, err := memR.decodeMsg(msgBytes) 171 if err != nil { 172 memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) 173 memR.Switch.StopPeerForError(src, err) 174 return 175 } 176 memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) 177 178 txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)} 179 if src != nil { 180 txInfo.SenderP2PID = src.ID() 181 } 182 for _, tx := range msg.Txs { 183 err = memR.mempool.CheckTx(tx, nil, txInfo) 184 if err != nil { 185 memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err) 186 } 187 } 188 // broadcasting happens from go routines per peer 189 } 190 191 // PeerState describes the state of a peer. 192 type PeerState interface { 193 GetHeight() int64 194 } 195 196 // Send new mempool txs to peer. 197 func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { 198 peerID := memR.ids.GetForPeer(peer) 199 var next *clist.CElement 200 201 for { 202 // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time 203 if !memR.IsRunning() || !peer.IsRunning() { 204 return 205 } 206 // This happens because the CElement we were looking at got garbage 207 // collected (removed). That is, .NextWait() returned nil. Go ahead and 208 // start from the beginning. 209 if next == nil { 210 select { 211 case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available 212 if next = memR.mempool.TxsFront(); next == nil { 213 continue 214 } 215 case <-peer.Quit(): 216 return 217 case <-memR.Quit(): 218 return 219 } 220 } 221 222 // Make sure the peer is up to date. 223 peerState, ok := peer.Get(types.PeerStateKey).(PeerState) 224 if !ok { 225 // Peer does not have a state yet. We set it in the consensus reactor, but 226 // when we add peer in Switch, the order we call reactors#AddPeer is 227 // different every time due to us using a map. Sometimes other reactors 228 // will be initialized before the consensus reactor. We should wait a few 229 // milliseconds and retry. 230 time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) 231 continue 232 } 233 234 // Allow for a lag of 1 block. 235 memTx := next.Value.(*mempoolTx) 236 if peerState.GetHeight() < memTx.Height()-1 { 237 time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) 238 continue 239 } 240 241 // NOTE: Transaction batching was disabled due to 242 // https://github.com/mydexchain/tendermint/issues/5796 243 244 if _, ok := memTx.senders.Load(peerID); !ok { 245 msg := protomem.Message{ 246 Sum: &protomem.Message_Txs{ 247 Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}}, 248 }, 249 } 250 bz, err := msg.Marshal() 251 if err != nil { 252 panic(err) 253 } 254 success := peer.Send(MempoolChannel, bz) 255 if !success { 256 time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) 257 continue 258 } 259 } 260 261 select { 262 case <-next.NextWaitChan(): 263 // see the start of the for loop for nil check 264 next = next.Next() 265 case <-peer.Quit(): 266 return 267 case <-memR.Quit(): 268 return 269 } 270 } 271 } 272 273 //----------------------------------------------------------------------------- 274 // Messages 275 276 func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { 277 msg := protomem.Message{} 278 err := msg.Unmarshal(bz) 279 if err != nil { 280 return TxsMessage{}, err 281 } 282 283 var message TxsMessage 284 285 if i, ok := msg.Sum.(*protomem.Message_Txs); ok { 286 txs := i.Txs.GetTxs() 287 288 if len(txs) == 0 { 289 return message, errors.New("empty TxsMessage") 290 } 291 292 decoded := make([]types.Tx, len(txs)) 293 for j, tx := range txs { 294 decoded[j] = types.Tx(tx) 295 } 296 297 message = TxsMessage{ 298 Txs: decoded, 299 } 300 return message, nil 301 } 302 return message, fmt.Errorf("msg type: %T is not supported", msg) 303 } 304 305 //------------------------------------- 306 307 // TxsMessage is a Message containing transactions. 308 type TxsMessage struct { 309 Txs []types.Tx 310 } 311 312 // String returns a string representation of the TxsMessage. 313 func (m *TxsMessage) String() string { 314 return fmt.Sprintf("[TxsMessage %v]", m.Txs) 315 }