github.com/amazechain/amc@v0.1.3/internal/sync/rpc_status.go (about) 1 package sync 2 3 import ( 4 "bytes" 5 "context" 6 "github.com/amazechain/amc/api/protocol/sync_pb" 7 "github.com/amazechain/amc/internal/p2p" 8 "github.com/amazechain/amc/internal/p2p/peers" 9 p2ptypes "github.com/amazechain/amc/internal/p2p/types" 10 "github.com/amazechain/amc/log" 11 "github.com/amazechain/amc/utils" 12 "github.com/holiman/uint256" 13 "sync" 14 "time" 15 16 libp2pcore "github.com/libp2p/go-libp2p/core" 17 "github.com/libp2p/go-libp2p/core/network" 18 "github.com/libp2p/go-libp2p/core/peer" 19 "github.com/pkg/errors" 20 ) 21 22 // maintainPeerStatuses by infrequently polling peers for their latest status. 23 func (s *Service) maintainPeerStatuses() { 24 // Run twice per epoch. 25 utils.RunEvery(s.ctx, maintainPeerStatusesInterval, func() { 26 wg := new(sync.WaitGroup) 27 for _, pid := range s.cfg.p2p.Peers().Connected() { 28 wg.Add(1) 29 go func(id peer.ID) { 30 defer wg.Done() 31 // If our peer status has not been updated correctly we disconnect over here 32 // and set the connection state over here instead. 33 if s.cfg.p2p.Host().Network().Connectedness(id) != network.Connected { 34 s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnecting) 35 if err := s.cfg.p2p.Disconnect(id); err != nil { 36 log.Debug("Error when disconnecting with peer", "err", err) 37 } 38 s.cfg.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected) 39 return 40 } 41 // Disconnect from peers that are considered bad by any of the registered scorers. 42 if s.cfg.p2p.Peers().IsBad(id) { 43 s.disconnectBadPeer(s.ctx, id) 44 return 45 } 46 // If the status hasn't been updated in the recent interval time. 47 lastUpdated, err := s.cfg.p2p.Peers().ChainStateLastUpdated(id) 48 if err != nil { 49 // Peer has vanished; nothing to do. 50 return 51 } 52 if time.Now().After(lastUpdated.Add(maintainPeerStatusesInterval)) { 53 if err := s.reValidatePeer(s.ctx, id); err != nil { 54 log.Debug("Could not revalidate peer", "peer", id, "err", err) 55 s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id) 56 } 57 } 58 }(pid) 59 } 60 // Wait for all status checks to finish and then proceed onwards to 61 // pruning excess peers. 62 wg.Wait() 63 peerIds := s.cfg.p2p.Peers().PeersToPrune() 64 for _, id := range peerIds { 65 if err := s.sendGoodByeAndDisconnect(s.ctx, p2ptypes.GoodbyeCodeTooManyPeers, id); err != nil { 66 log.Debug("Could not disconnect with peer", "peer", id, "err", err) 67 } 68 } 69 }) 70 } 71 72 // resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers 73 // by more than an epoch, in which case we attempt a resync using the initial sync method to catch up. 74 func (s *Service) resyncIfBehind() { 75 utils.RunEvery(s.ctx, resyncInterval, func() { 76 //todo header should > body ? 77 if s.cfg.initialSync != nil && !s.cfg.initialSync.Syncing() { 78 // Factor number of expected minimum sync peers, to make sure that enough peers are 79 // available to resync (some peers may go away between checking non-finalized peers and 80 // actual resyncing). 81 82 // 83 highestBlockNr, _ := s.cfg.p2p.Peers().BestPeers(s.cfg.p2p.GetConfig().MinSyncPeers*2, s.cfg.chain.CurrentBlock().Number64()) 84 // Check if the current node is more than 1 epoch behind. 85 if highestBlockNr.Cmp(new(uint256.Int).AddUint64(s.cfg.chain.CurrentBlock().Number64(), 5)) >= 0 { 86 log.Info("Fallen behind peers; reverting to initial sync to catch up", "currentBlockNr", s.cfg.chain.CurrentBlock().Number64(), "peersBlockNr", highestBlockNr) 87 numberOfTimesResyncedCounter.Inc() 88 //s.clearPendingSlots() 89 if err := s.cfg.initialSync.Resync(); err != nil { 90 log.Error("Could not resync chain", "err", err) 91 } 92 } 93 } 94 }) 95 } 96 97 // sendRPCStatusRequest for a given topic with an expected protobuf message type. 98 func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error { 99 ctx, cancel := context.WithTimeout(ctx, respTimeout) 100 defer cancel() 101 102 //forkDigest, err := s.currentForkDigest() 103 104 resp := &sync_pb.Status{ 105 GenesisHash: utils.ConvertHashToH256(s.cfg.chain.GenesisBlock().Hash()), 106 CurrentHeight: utils.ConvertUint256IntToH256(s.cfg.chain.CurrentBlock().Number64()), 107 } 108 topic, err := p2p.TopicFromMessage(p2p.StatusMessageName) 109 if err != nil { 110 return err 111 } 112 stream, err := s.cfg.p2p.Send(ctx, resp, topic, id) 113 if err != nil { 114 return err 115 } 116 defer closeStream(stream) 117 118 code, errMsg, err := ReadStatusCode(stream, s.cfg.p2p.Encoding()) 119 if err != nil { 120 s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer()) 121 return err 122 } 123 124 if code != 0 { 125 s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(id) 126 return errors.New(errMsg) 127 } 128 msg := &sync_pb.Status{} 129 if err := s.cfg.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil { 130 s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer()) 131 return err 132 } 133 134 // If validation fails, validation error is logged, and peer status scorer will mark peer as bad. 135 err = s.validateStatusMessage(ctx, msg) 136 s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err) 137 if s.cfg.p2p.Peers().IsBad(id) { 138 s.disconnectBadPeer(s.ctx, id) 139 } 140 return err 141 } 142 143 func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error { 144 s.cfg.p2p.Peers().Scorers().PeerStatusScorer().SetCurrentHeight(s.cfg.chain.CurrentBlock().Number64()) 145 if err := s.sendRPCStatusRequest(ctx, id); err != nil { 146 return err 147 } 148 // Do not return an error for ping requests. 149 if err := s.sendPingRequest(ctx, id); err != nil { 150 log.Debug("Could not ping peer", "err", err) 151 } 152 return nil 153 } 154 155 // statusRPCHandler reads the incoming Status RPC from the peer and responds with our version of a status message. 156 // This handler will disconnect any peer that does not match our fork version. 157 func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { 158 ctx, cancel := context.WithTimeout(ctx, ttfbTimeout) 159 defer cancel() 160 SetRPCStreamDeadlines(stream) 161 m, ok := msg.(*sync_pb.Status) 162 if !ok { 163 return errors.New("message is not type *pb.Status") 164 } 165 if err := s.rateLimiter.validateRequest(stream, 1); err != nil { 166 return err 167 } 168 s.rateLimiter.add(stream, 1) 169 170 remotePeer := stream.Conn().RemotePeer() 171 if err := s.validateStatusMessage(ctx, m); err != nil { 172 log.Debug("Invalid status message from peer", "handler", "status", "peer", remotePeer, "error", err) 173 174 respCode := byte(0) 175 switch err { 176 case p2ptypes.ErrGeneric: 177 respCode = responseCodeServerError 178 case p2ptypes.ErrWrongForkDigestVersion: 179 // Respond with our status and disconnect with the peer. 180 s.cfg.p2p.Peers().SetChainState(remotePeer, m) 181 if err := s.respondWithStatus(ctx, stream); err != nil { 182 return err 183 } 184 // Close before disconnecting, and wait for the other end to ack our response. 185 closeStreamAndWait(stream) 186 if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeWrongNetwork, remotePeer); err != nil { 187 return err 188 } 189 return nil 190 default: 191 respCode = responseCodeInvalidRequest 192 s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer) 193 } 194 195 originalErr := err 196 resp, err := s.generateErrorResponse(respCode, err.Error()) 197 if err != nil { 198 log.Debug("Could not generate a response error", "err", err) 199 } else if _, err := stream.Write(resp); err != nil { 200 // The peer may already be ignoring us, as we disagree on fork version, so log this as debug only. 201 log.Debug("Could not write to stream", "err", err) 202 } 203 closeStreamAndWait(stream) 204 if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeGenericError, remotePeer); err != nil { 205 return err 206 } 207 return originalErr 208 } 209 s.cfg.p2p.Peers().SetChainState(remotePeer, m) 210 211 if err := s.respondWithStatus(ctx, stream); err != nil { 212 return err 213 } 214 closeStream(stream) 215 return nil 216 } 217 218 func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream) error { 219 220 //forkDigest, err := s.currentForkDigest() 221 //if err != nil { 222 // return err 223 //} 224 resp := &sync_pb.Status{ 225 GenesisHash: utils.ConvertHashToH256(s.cfg.chain.GenesisBlock().Hash()), 226 CurrentHeight: utils.ConvertUint256IntToH256(s.cfg.chain.CurrentBlock().Number64()), 227 } 228 229 if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { 230 log.Debug("Could not write to stream", "err", err) 231 } 232 _, err := s.cfg.p2p.Encoding().EncodeWithMaxLength(stream, resp) 233 return err 234 } 235 236 func (s *Service) validateStatusMessage(ctx context.Context, msg *sync_pb.Status) error { 237 forkDigest, err := s.currentForkDigest() 238 if err != nil { 239 return err 240 } 241 remoteDigest, err := utils.CreateForkDigest( 242 utils.ConvertH256ToUint256Int(msg.CurrentHeight), 243 utils.ConvertH256ToHash(msg.GenesisHash), 244 ) 245 if err != nil { 246 return err 247 } 248 if !bytes.Equal(forkDigest[:], remoteDigest[:]) { 249 return p2ptypes.ErrWrongForkDigestVersion 250 } 251 252 return nil 253 }