github.com/aergoio/aergo@v1.3.1/p2p/subproto/tx.go (about) 1 /* 2 * @file 3 * @copyright defined in aergo/LICENSE.txt 4 */ 5 6 package subproto 7 8 import ( 9 "github.com/aergoio/aergo-lib/log" 10 "github.com/aergoio/aergo/internal/enc" 11 "github.com/aergoio/aergo/message" 12 "github.com/aergoio/aergo/p2p/p2pcommon" 13 "github.com/aergoio/aergo/p2p/p2putil" 14 "github.com/aergoio/aergo/types" 15 "github.com/golang/protobuf/proto" 16 ) 17 18 type txRequestHandler struct { 19 BaseMsgHandler 20 asyncHelper 21 msgHelper message.Helper 22 } 23 24 var _ p2pcommon.MessageHandler = (*txRequestHandler)(nil) 25 26 type txResponseHandler struct { 27 BaseMsgHandler 28 } 29 30 var _ p2pcommon.MessageHandler = (*txResponseHandler)(nil) 31 32 type newTxNoticeHandler struct { 33 BaseMsgHandler 34 } 35 36 var _ p2pcommon.MessageHandler = (*newTxNoticeHandler)(nil) 37 38 // newTxReqHandler creates handler for GetTransactionsRequest 39 func NewTxReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger *log.Logger, actor p2pcommon.ActorService) *txRequestHandler { 40 th := &txRequestHandler{ 41 BaseMsgHandler{protocol: p2pcommon.GetTXsRequest, pm: pm, peer: peer, actor: actor, logger: logger}, 42 newAsyncHelper(), message.GetHelper()} 43 return th 44 } 45 46 func (th *txRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { 47 return p2putil.UnmarshalAndReturn(rawbytes, &types.GetTransactionsRequest{}) 48 } 49 50 func (th *txRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { 51 remotePeer := th.peer 52 body := msgBody.(*types.GetTransactionsRequest) 53 reqHashes := body.Hashes 54 p2putil.DebugLogReceive(th.logger, th.protocol, msg.ID().String(), remotePeer, body) 55 56 if th.issue() { 57 go th.handleTxReq(msg, reqHashes) 58 } else { 59 th.logger.Info().Str(p2putil.LogPeerName, remotePeer.Name()).Str(p2putil.LogMsgID, msg.ID().String()).Msg("return err for concurrent get tx request") 60 resp := &types.GetTransactionsResponse{ 61 Status: types.ResultStatus_RESOURCE_EXHAUSTED, 62 Hashes: nil, 63 Txs: nil, HasNext: false} 64 remotePeer.SendMessage(remotePeer.MF().NewMsgResponseOrder(msg.ID(), p2pcommon.GetTXsResponse, resp)) 65 } 66 } 67 68 // this function must called only if ticket can be retrieved. 69 func (th *txRequestHandler) handleTxReq(msg p2pcommon.Message, reqHashes [][]byte) { 70 defer th.release() 71 remotePeer := th.peer 72 // TODO consider to make async if deadlock with remote peer can occurs 73 // NOTE size estimation is tied to protobuf3 it should be changed when protobuf is changed. 74 // find transactions from chainservice 75 idx := 0 76 status := types.ResultStatus_OK 77 var hashes []types.TxHash 78 var txInfos, txs []*types.Tx 79 payloadSize := EmptyGetBlockResponseSize 80 var txSize, fieldSize int 81 82 bucket := message.MaxReqestHashes 83 var futures []interface{} 84 85 for _, h := range reqHashes { 86 hashes = append(hashes, h) 87 if len(hashes) == bucket { 88 if f, err := th.actor.CallRequestDefaultTimeout(message.MemPoolSvc, 89 &message.MemPoolExistEx{Hashes: hashes}); err == nil { 90 futures = append(futures, f) 91 } 92 hashes = nil 93 } 94 } 95 if hashes != nil { 96 if f, err := th.actor.CallRequestDefaultTimeout(message.MemPoolSvc, 97 &message.MemPoolExistEx{Hashes: hashes}); err == nil { 98 futures = append(futures, f) 99 } 100 } 101 hashes = nil 102 idx = 0 103 for _, f := range futures { 104 if tmp, err := th.msgHelper.ExtractTxsFromResponseAndError(f, nil); err == nil { 105 txs = append(txs, tmp...) 106 } else { 107 th.logger.Debug().Err(err).Msg("ErrExtract tx in future") 108 } 109 } 110 for _, tx := range txs { 111 if tx == nil { 112 continue 113 } 114 hash := tx.GetHash() 115 txSize = proto.Size(tx) 116 117 fieldSize = txSize + p2putil.CalculateFieldDescSize(txSize) 118 fieldSize += len(hash) + p2putil.CalculateFieldDescSize(len(hash)) 119 120 if (payloadSize + fieldSize) > p2pcommon.MaxPayloadLength { 121 // send partial list 122 resp := &types.GetTransactionsResponse{ 123 Status: status, 124 Hashes: hashes, 125 Txs: txInfos, HasNext: true} 126 th.logger.Debug().Int(p2putil.LogTxCount, len(hashes)). 127 Str(p2putil.LogOrgReqID, msg.ID().String()).Msg("Sending partial response") 128 129 remotePeer.SendMessage(remotePeer.MF(). 130 NewMsgResponseOrder(msg.ID(), p2pcommon.GetTXsResponse, resp)) 131 hashes, txInfos, payloadSize = nil, nil, EmptyGetBlockResponseSize 132 } 133 134 hashes = append(hashes, hash) 135 txInfos = append(txInfos, tx) 136 payloadSize += fieldSize 137 idx++ 138 } 139 if 0 == idx { 140 status = types.ResultStatus_NOT_FOUND 141 } 142 th.logger.Debug().Int(p2putil.LogTxCount, len(hashes)). 143 Str(p2putil.LogOrgReqID, msg.ID().String()).Str(p2putil.LogRespStatus, status.String()).Msg("Sending last part response") 144 // generate response message 145 146 resp := &types.GetTransactionsResponse{ 147 Status: status, 148 Hashes: hashes, 149 Txs: txInfos, HasNext: false} 150 remotePeer.SendMessage(remotePeer.MF().NewMsgResponseOrder(msg.ID(), p2pcommon.GetTXsResponse, resp)) 151 } 152 153 // newTxRespHandler creates handler for GetTransactionsResponse 154 func NewTxRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger *log.Logger, actor p2pcommon.ActorService) *txResponseHandler { 155 th := &txResponseHandler{BaseMsgHandler{protocol: p2pcommon.GetTXsResponse, pm: pm, peer: peer, actor: actor, logger: logger}} 156 return th 157 } 158 159 func (th *txResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { 160 return p2putil.UnmarshalAndReturn(rawbytes, &types.GetTransactionsResponse{}) 161 } 162 163 func (th *txResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { 164 data := msgBody.(*types.GetTransactionsResponse) 165 p2putil.DebugLogReceiveResponse(th.logger, th.protocol, msg.ID().String(), msg.OriginalID().String(), th.peer, data) 166 167 th.peer.ConsumeRequest(msg.OriginalID()) 168 go func() { 169 // TODO: Is there any better solution than passing everything to mempool service? 170 if len(data.Txs) > 0 { 171 th.logger.Debug().Int(p2putil.LogTxCount, len(data.Txs)).Msg("Request mempool to add txs") 172 //th.actor.SendRequest(message.MemPoolSvc, &message.MemPoolPut{Txs: data.Txs}) 173 for _, tx := range data.Txs { 174 th.actor.SendRequest(message.MemPoolSvc, &message.MemPoolPut{Tx: tx}) 175 } 176 } 177 }() 178 } 179 180 // newNewTxNoticeHandler creates handler for GetTransactionsResponse 181 func NewNewTxNoticeHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger *log.Logger, actor p2pcommon.ActorService, sm p2pcommon.SyncManager) *newTxNoticeHandler { 182 th := &newTxNoticeHandler{BaseMsgHandler: BaseMsgHandler{protocol: p2pcommon.NewTxNotice, pm: pm, sm: sm, peer: peer, actor: actor, logger: logger}} 183 return th 184 } 185 186 func (th *newTxNoticeHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { 187 return p2putil.UnmarshalAndReturn(rawbytes, &types.NewTransactionsNotice{}) 188 } 189 190 func (th *newTxNoticeHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { 191 remotePeer := th.peer 192 data := msgBody.(*types.NewTransactionsNotice) 193 // remove to verbose log 194 if th.logger.IsDebugEnabled() { 195 p2putil.DebugLogReceive(th.logger, th.protocol, msg.ID().String(), remotePeer, data) 196 } 197 198 if len(data.TxHashes) == 0 { 199 return 200 } 201 // lru cache can accept hashable key 202 hashes := make([]types.TxID, len(data.TxHashes)) 203 for i, hash := range data.TxHashes { 204 if tid, err := types.ParseToTxID(hash); err != nil { 205 th.logger.Info().Str(p2putil.LogPeerName, remotePeer.Name()).Str("hash", enc.ToString(hash)).Msg("malformed txhash found") 206 // TODO Add penalty score and break 207 break 208 } else { 209 hashes[i] = tid 210 } 211 } 212 added := th.peer.UpdateTxCache(hashes) 213 if len(added) > 0 { 214 th.sm.HandleNewTxNotice(th.peer, added, data) 215 } 216 }