github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/rpc.go (about) 1 package sync 2 3 import ( 4 "context" 5 "reflect" 6 "strings" 7 8 libp2pcore "github.com/libp2p/go-libp2p-core" 9 "github.com/libp2p/go-libp2p-core/network" 10 "github.com/prysmaticlabs/prysm/beacon-chain/p2p" 11 p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types" 12 "github.com/prysmaticlabs/prysm/shared/params" 13 "github.com/prysmaticlabs/prysm/shared/timeutils" 14 "github.com/prysmaticlabs/prysm/shared/traceutil" 15 "go.opencensus.io/trace" 16 ) 17 18 // Time to first byte timeout. The maximum time to wait for first byte of 19 // request response (time-to-first-byte). The client is expected to give up if 20 // they don't receive the first byte within 5 seconds. 21 var ttfbTimeout = params.BeaconNetworkConfig().TtfbTimeout 22 23 // respTimeout is the maximum time for complete response transfer. 24 var respTimeout = params.BeaconNetworkConfig().RespTimeout 25 26 // rpcHandler is responsible for handling and responding to any incoming message. 27 // This method may return an error to internal monitoring, but the error will 28 // not be relayed to the peer. 29 type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error 30 31 // registerRPCHandlers for p2p RPC. 32 func (s *Service) registerRPCHandlers() { 33 s.registerRPC( 34 p2p.RPCStatusTopicV1, 35 s.statusRPCHandler, 36 ) 37 s.registerRPC( 38 p2p.RPCGoodByeTopicV1, 39 s.goodbyeRPCHandler, 40 ) 41 s.registerRPC( 42 p2p.RPCBlocksByRangeTopicV1, 43 s.beaconBlocksByRangeRPCHandler, 44 ) 45 s.registerRPC( 46 p2p.RPCBlocksByRootTopicV1, 47 s.beaconBlocksRootRPCHandler, 48 ) 49 s.registerRPC( 50 p2p.RPCPingTopicV1, 51 s.pingHandler, 52 ) 53 s.registerRPC( 54 p2p.RPCMetaDataTopicV1, 55 s.metaDataHandler, 56 ) 57 } 58 59 // registerRPC for a given topic with an expected protobuf message type. 60 func (s *Service) registerRPC(baseTopic string, handle rpcHandler) { 61 topic := baseTopic + s.cfg.P2P.Encoding().ProtocolSuffix() 62 log := log.WithField("topic", topic) 63 s.cfg.P2P.SetStreamHandler(topic, func(stream network.Stream) { 64 ctx, cancel := context.WithTimeout(s.ctx, ttfbTimeout) 65 defer cancel() 66 67 // Resetting after closing is a no-op so defer a reset in case something goes wrong. 68 // It's up to the handler to Close the stream (send an EOF) if 69 // it successfully writes a response. We don't blindly call 70 // Close here because we may have only written a partial 71 // response. 72 defer func() { 73 _err := stream.Reset() 74 _ = _err 75 }() 76 77 ctx, span := trace.StartSpan(ctx, "sync.rpc") 78 defer span.End() 79 span.AddAttributes(trace.StringAttribute("topic", topic)) 80 span.AddAttributes(trace.StringAttribute("peer", stream.Conn().RemotePeer().Pretty())) 81 log := log.WithField("peer", stream.Conn().RemotePeer().Pretty()).WithField("topic", string(stream.Protocol())) 82 83 // Check before hand that peer is valid. 84 if s.cfg.P2P.Peers().IsBad(stream.Conn().RemotePeer()) { 85 if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil { 86 log.Debugf("Could not disconnect from peer: %v", err) 87 } 88 return 89 } 90 // Validate request according to peer limits. 91 if err := s.rateLimiter.validateRawRpcRequest(stream); err != nil { 92 log.Debugf("Could not validate rpc request from peer: %v", err) 93 return 94 } 95 s.rateLimiter.addRawStream(stream) 96 97 if err := stream.SetReadDeadline(timeutils.Now().Add(ttfbTimeout)); err != nil { 98 log.WithError(err).Debug("Could not set stream read deadline") 99 return 100 } 101 102 base, ok := p2p.RPCTopicMappings[baseTopic] 103 if !ok { 104 log.Errorf("Could not retrieve base message for topic %s", baseTopic) 105 return 106 } 107 t := reflect.TypeOf(base) 108 // Copy Base 109 base = reflect.New(t) 110 111 // Increment message received counter. 112 messageReceivedCounter.WithLabelValues(topic).Inc() 113 114 // since metadata requests do not have any data in the payload, we 115 // do not decode anything. 116 if baseTopic == p2p.RPCMetaDataTopicV1 { 117 if err := handle(ctx, base, stream); err != nil { 118 messageFailedProcessingCounter.WithLabelValues(topic).Inc() 119 if err != p2ptypes.ErrWrongForkDigestVersion { 120 log.WithError(err).Debug("Could not handle p2p RPC") 121 } 122 traceutil.AnnotateError(span, err) 123 } 124 return 125 } 126 127 // Given we have an input argument that can be pointer or the actual object, this gives us 128 // a way to check for its reflect.Kind and based on the result, we can decode 129 // accordingly. 130 if t.Kind() == reflect.Ptr { 131 msg := reflect.New(t.Elem()) 132 if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil { 133 // Debug logs for goodbye/status errors 134 if strings.Contains(topic, p2p.RPCGoodByeTopicV1) || strings.Contains(topic, p2p.RPCStatusTopicV1) { 135 log.WithError(err).Debug("Could not decode goodbye stream message") 136 traceutil.AnnotateError(span, err) 137 return 138 } 139 log.WithError(err).Debug("Could not decode stream message") 140 traceutil.AnnotateError(span, err) 141 return 142 } 143 if err := handle(ctx, msg.Interface(), stream); err != nil { 144 messageFailedProcessingCounter.WithLabelValues(topic).Inc() 145 if err != p2ptypes.ErrWrongForkDigestVersion { 146 log.WithError(err).Debug("Could not handle p2p RPC") 147 } 148 traceutil.AnnotateError(span, err) 149 } 150 } else { 151 msg := reflect.New(t) 152 if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil { 153 log.WithError(err).Debug("Could not decode stream message") 154 traceutil.AnnotateError(span, err) 155 return 156 } 157 if err := handle(ctx, msg.Elem().Interface(), stream); err != nil { 158 messageFailedProcessingCounter.WithLabelValues(topic).Inc() 159 if err != p2ptypes.ErrWrongForkDigestVersion { 160 log.WithError(err).Debug("Could not handle p2p RPC") 161 } 162 traceutil.AnnotateError(span, err) 163 } 164 } 165 }) 166 }