github.com/sixexorg/magnetic-ring@v0.0.0-20191119090307-31705a21e419/p2pserver/sync/sync_handler.go (about)

     1  package sync
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"net"
     7  	"strconv"
     8  	"strings"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/sixexorg/magnetic-ring/bactor"
    13  
    14  	"github.com/sixexorg/magnetic-ring/p2pserver/temp"
    15  
    16  	"github.com/hashicorp/golang-lru"
    17  	evtActor "github.com/ontio/ontology-eventbus/actor"
    18  	"github.com/sixexorg/magnetic-ring/common"
    19  	"github.com/sixexorg/magnetic-ring/config"
    20  	"github.com/sixexorg/magnetic-ring/log"
    21  
    22  	"github.com/sixexorg/magnetic-ring/core/mainchain/types"
    23  	actor "github.com/sixexorg/magnetic-ring/p2pserver/actor/req"
    24  	msgCommon "github.com/sixexorg/magnetic-ring/p2pserver/common"
    25  	msgpack "github.com/sixexorg/magnetic-ring/p2pserver/message"
    26  	"github.com/sixexorg/magnetic-ring/p2pserver/net/protocol"
    27  	"github.com/sixexorg/magnetic-ring/store/mainchain/extstates"
    28  	"github.com/sixexorg/magnetic-ring/store/mainchain/extstorages"
    29  	ledger "github.com/sixexorg/magnetic-ring/store/mainchain/storages"
    30  )
    31  
    32  //respCache cache for some response data
    33  var (
    34  	respCache *lru.ARCCache
    35  
    36  	nlhcache *nlhCache
    37  )
    38  
    39  type nlhCache struct {
    40  	nlh     map[string]int64
    41  	nlhLock sync.Mutex
    42  }
    43  
    44  func (nlh *nlhCache) watch() {
    45  	ticker := time.NewTicker(time.Minute * 5)
    46  	for {
    47  		select {
    48  		case <-ticker.C:
    49  			now := time.Now().Unix()
    50  			for k, v := range nlh.nlh {
    51  				diff := now - v
    52  				if diff >= 300 {
    53  					delete(nlh.nlh, k)
    54  				}
    55  			}
    56  		}
    57  	}
    58  }
    59  
    60  func initNlhCache() {
    61  	nlhcache = new(nlhCache)
    62  	nlhcache.nlh = make(map[string]int64)
    63  	go nlhcache.watch()
    64  }
    65  
    66  // AddrReqHandle handles the neighbor address request from peer
    67  func AddrReqHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
    68  	log.Trace("[p2p]receive addr request message", "data.Addr", data.Addr,
    69  		"data.Id", data.Id)
    70  	remotePeer := p2p.GetPeer(data.Id)
    71  	if remotePeer == nil {
    72  		log.Debug("[p2p]remotePeer invalid in AddrReqHandle")
    73  		return
    74  	}
    75  
    76  	var addrStr []msgCommon.PeerAddr
    77  	addrStr = p2p.GetNeighborAddrs()
    78  	//check mask peers
    79  	mskPeers := config.GlobalConfig.P2PCfg.ReservedCfg.MaskPeers
    80  	if config.GlobalConfig.P2PCfg.ReservedPeersOnly && len(mskPeers) > 0 {
    81  		for i := 0; i < len(addrStr); i++ {
    82  			var ip net.IP
    83  			ip = addrStr[i].IpAddr[:]
    84  			address := ip.To16().String()
    85  			for j := 0; j < len(mskPeers); j++ {
    86  				if address == mskPeers[j] {
    87  					addrStr = append(addrStr[:i], addrStr[i+1:]...)
    88  					i--
    89  					break
    90  				}
    91  			}
    92  		}
    93  
    94  	}
    95  	msg := msgpack.NewAddrs(addrStr)
    96  	err := p2p.Send(remotePeer, msg, false)
    97  	if err != nil {
    98  		log.Warn("err", "err", err)
    99  		return
   100  	}
   101  }
   102  
   103  // HeaderReqHandle handles the header sync req from peer
   104  func HeadersReqHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   105  	log.Trace("[p2p]receive headers request message", "Addr", data.Addr, "Id", data.Id)
   106  
   107  	headersReq := data.Payload.(*msgCommon.HeadersReq)
   108  	// 圈子的特殊处理
   109  	if headersReq.SyncType == msgCommon.SYNC_DATA_ORG {
   110  		OrgHeadersReqHandle(data, p2p, pid)
   111  		return
   112  	}
   113  	startHash := headersReq.HashStart
   114  	stopHash := headersReq.HashEnd
   115  
   116  	// headers, err := GetHeadersFromHash(startHash, stopHash,headersReq.OrgID,headersReq.SyncType)
   117  
   118  	headers, err := GetHeadersFromHeight(headersReq.Height)
   119  	if err != nil {
   120  		log.Warn("get headers in HeadersReqHandle ", "error", err.Error(), "startHash", startHash.String(), "stopHash", stopHash.String())
   121  		return
   122  	}
   123  	remotePeer := p2p.GetPeer(data.Id)
   124  	if remotePeer == nil {
   125  		log.Debug("[p2p]remotePeer invalid in HeadersReqHandle, peer", "id", data.Id)
   126  		return
   127  	}
   128  	msg := msgpack.NewHeaders(headers, nil, headersReq.OrgID, headersReq.SyncType)
   129  	err = p2p.Send(remotePeer, msg, false)
   130  	if err != nil {
   131  		log.Warn("err", "err", err)
   132  		return
   133  	}
   134  }
   135  
   136  //PingHandle handle ping msg from peer
   137  func PingHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   138  	log.Trace("[p2p]receiveping message", "Addr", data.Addr, "Id", data.Id, "Height", data.Payload.(*msgCommon.Ping).Height)
   139  
   140  	ping := data.Payload.(*msgCommon.Ping)
   141  	remotePeer := p2p.GetPeer(data.Id)
   142  	if remotePeer == nil {
   143  		log.Debug("[p2p]remotePeer invalid in PingHandle")
   144  		return
   145  	}
   146  	remotetype := ping.InfoType
   147  	height := uint32(0)
   148  	owninfo := make([]*msgCommon.OrgPIPOInfo, 0)
   149  	if remotetype == msgCommon.PING_INFO_ALL || remotetype == msgCommon.PING_INFO_MAIN {
   150  		//remote
   151  		remotePeer.SetHeight(ping.Height)
   152  		//own
   153  		height = uint32(ledger.GetLedgerStore().GetCurrentBlockHeight())
   154  		p2p.SetHeight(uint64(height))
   155  	}
   156  
   157  	remotedelorgs := make([]common.Address, 0)
   158  	if remotetype == msgCommon.PING_INFO_ALL {
   159  
   160  		orgs := p2p.PeerGetOrg()
   161  		ownorgmap := make(map[common.Address]bool)
   162  
   163  		for _, ownorgid := range orgs {
   164  			ownorgmap[ownorgid] = true
   165  		}
   166  
   167  		if len(ping.OrgInfo) == 0 {
   168  			for id, _ := range ownorgmap {
   169  				remotedelorgs = handleDelPeer(data, p2p, pid, id, remotedelorgs)
   170  			}
   171  		}
   172  
   173  		for _, info := range ping.OrgInfo {
   174  
   175  			if _, ok := ownorgmap[info.OrgID]; ok {
   176  				//updata remote peer orgid -> height
   177  				remotePeer.SetRemoteOrgHeight(info.Height, info.OrgID)
   178  				//own
   179  				orgheight := uint64(0)
   180  				if info.OrgID != msgCommon.StellarNodeID {
   181  					if temp.GetLedger(info.OrgID, "sync_handler.go 001") != nil {
   182  						orgheight = temp.GetLedger(info.OrgID, "sync_handler.go 002").GetCurrentBlockHeight()
   183  					}
   184  				}
   185  				// orgheight := ledger.GetLedgerStore().GetCurrentBlockHeight(/*info.OrgID*/)
   186  				ownorginfo := msgpack.NewOrgPIPOMsg(info.OrgID, uint64(orgheight))
   187  				owninfo = append(owninfo, ownorginfo)
   188  
   189  				p2p.SetOwnOrgHeight(uint64(orgheight), info.OrgID)
   190  			} else {
   191  				remotedelorgs = handleDelPeer(data, p2p, pid, info.OrgID, remotedelorgs)
   192  			}
   193  		}
   194  	} else if remotetype == msgCommon.PING_INFO_ORG && len(ping.OrgInfo) == 1 {
   195  		remoteorginfo := ping.OrgInfo[0]
   196  		if p2p.BHaveOrgsId(remoteorginfo.OrgID) {
   197  			//updata remote peer orgid -> height
   198  			remotePeer.SetRemoteOrgHeight(remoteorginfo.Height, remoteorginfo.OrgID)
   199  			//own
   200  			orgheight := uint64(0)
   201  			if temp.GetLedger(remoteorginfo.OrgID, "sync_handler.go 003") != nil {
   202  				orgheight = temp.GetLedger(remoteorginfo.OrgID, "sync_handler.go 004").GetCurrentBlockHeight()
   203  			}
   204  			// orgheight := ledger.GetLedgerStore().GetCurrentBlockHeight(/*remoteorginfo.OrgID*/)
   205  			ownorginfo := msgpack.NewOrgPIPOMsg(remoteorginfo.OrgID, uint64(orgheight))
   206  			owninfo = append(owninfo, ownorginfo)
   207  
   208  			p2p.SetOwnOrgHeight(uint64(orgheight), remoteorginfo.OrgID)
   209  		} else {
   210  			remotedelorgs = handleDelPeer(data, p2p, pid, remoteorginfo.OrgID, remotedelorgs)
   211  		}
   212  	}
   213  
   214  	if len(remotedelorgs) > 0 {
   215  		appendorgs := &msgCommon.HandleAddDelOrgPeerID{
   216  			BAdd:   false,
   217  			PeerID: data.Id,
   218  			OrgID:  remotedelorgs,
   219  		}
   220  		pid.Tell(appendorgs)
   221  	}
   222  
   223  	// height: main chan
   224  	msg := msgpack.NewPongMsg(uint64(height), owninfo, remotetype)
   225  	err := p2p.Send(remotePeer, msg, false)
   226  	if err != nil {
   227  		log.Warn("err", "err", err)
   228  	}
   229  }
   230  
   231  func handleDelPeer(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID,
   232  	orgID common.Address, remotedelorgs []common.Address) []common.Address {
   233  	remotePeer := p2p.GetPeer(data.Id)
   234  	if remotePeer == nil {
   235  		log.Debug("[p2p]handleDelPeer invalid in PingHandle")
   236  		return remotedelorgs
   237  	}
   238  	if msgCommon.StellarNodeID == orgID {
   239  		bhave := remotePeer.DelRemoteOrg(msgCommon.StellarNodeID)
   240  		if bhave {
   241  			pid.Tell(&msgCommon.StellarAddDelPeerID{
   242  				BAdd:   false,
   243  				PeerID: data.Id,
   244  			})
   245  		}
   246  	} else {
   247  		remotedelorgs = append(remotedelorgs, orgID)
   248  	}
   249  	return remotedelorgs
   250  }
   251  
   252  ///PongHandle handle pong msg from peer
   253  func PongHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   254  	log.Trace("[p2p]receive pong message", "Addr", data.Addr, "Id", data.Id)
   255  
   256  	pong := data.Payload.(*msgCommon.Pong)
   257  
   258  	remotePeer := p2p.GetPeer(data.Id)
   259  	if remotePeer == nil {
   260  		log.Debug("[p2p]remotePeer invalid in PongHandle")
   261  		return
   262  	}
   263  	remotetype := pong.InfoType
   264  	if remotetype == msgCommon.PING_INFO_ALL || remotetype == msgCommon.PING_INFO_MAIN {
   265  		//remote
   266  		remotePeer.SetHeight(pong.Height)
   267  	}
   268  
   269  	remoteorgs := make([]common.Address, 0)
   270  	if remotetype == msgCommon.PING_INFO_ALL || remotetype == msgCommon.PING_INFO_ORG {
   271  
   272  		ownorgs := p2p.PeerGetOrg()
   273  		maporg := make(map[common.Address]bool)
   274  
   275  		for _, ownorgid := range ownorgs {
   276  			maporg[ownorgid] = true
   277  		}
   278  
   279  		for _, info := range pong.OrgInfo {
   280  
   281  			if _, ok := maporg[info.OrgID]; ok {
   282  				//remote
   283  				bnew := remotePeer.SetRemoteOrgHeight(info.Height, info.OrgID)
   284  				if msgCommon.StellarNodeID == info.OrgID {
   285  					if bnew {
   286  						pid.Tell(&msgCommon.StellarAddDelPeerID{
   287  							BAdd:   true,
   288  							PeerID: data.Id,
   289  						})
   290  					}
   291  				} else {
   292  
   293  					remoteorgs = append(remoteorgs, info.OrgID)
   294  				}
   295  			}
   296  		}
   297  	}
   298  	if len(remoteorgs) > 0 {
   299  		appendorgs := &msgCommon.HandleAddDelOrgPeerID{
   300  			BAdd:   true,
   301  			PeerID: data.Id,
   302  			OrgID:  remoteorgs,
   303  		}
   304  		pid.Tell(appendorgs)
   305  	}
   306  }
   307  
   308  // BlkHeaderHandle handles the sync headers from peer
   309  func BlkHeaderHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   310  	log.Trace("[p2p]receive block header message", "Addr", data.Addr, "Id", data.Id)
   311  	if pid != nil {
   312  		var blkHeader = data.Payload.(*msgCommon.BlkHeader)
   313  		input := &msgCommon.AppendHeaders{
   314  			FromID:     data.Id,
   315  			Headers:    blkHeader.BlkHdr,
   316  			HeaderOrgs: blkHeader.BlkOrgHdr,
   317  			OrgID:      blkHeader.OrgID,
   318  			SyncType:   blkHeader.SyncType,
   319  		}
   320  		pid.Tell(input)
   321  	}
   322  }
   323  
   324  // BlockHandle handles the block message from peer
   325  func BlockHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   326  	log.Info("[p2p]receive block message from p2p", "Addr", data.Addr, "Id", data.Id, "data.Payload.CmdType", data.Payload.CmdType())
   327  
   328  	if pid != nil {
   329  		var block = data.Payload.(*msgCommon.Block)
   330  
   331  		if block != nil {
   332  			if block.SyncType == msgCommon.SYNC_DATA_A_TO_STELLAR || block.SyncType == msgCommon.SYNC_DATA_STELLAR_TO_STELLAR || block.SyncType == msgCommon.SYNC_DATA_ORG {
   333  
   334  				if block.SyncType == msgCommon.SYNC_DATA_ORG {
   335  					OrgDataReqHandle(data, p2p, pid)
   336  					return
   337  				} else if block.SyncType == msgCommon.SYNC_DATA_A_TO_STELLAR {
   338  					ANodeSendToStellarPeingData(data, p2p, pid)
   339  					return
   340  				} else if block.SyncType == msgCommon.SYNC_DATA_STELLAR_TO_STELLAR {
   341  					StellaNodeSendToStellarPeingData(data, p2p, pid)
   342  					return
   343  				}
   344  
   345  			}
   346  
   347  		}
   348  
   349  		input := &msgCommon.AppendBlock{
   350  			FromID:    data.Id,
   351  			BlockSize: data.PayloadSize,
   352  			Block:     block.Blk,
   353  			BlockOrg:  block.BlkOrg,
   354  			OrgID:     block.OrgID,
   355  			SyncType:  block.SyncType,
   356  		}
   357  		pid.Tell(input)
   358  	}
   359  }
   360  
   361  // ConsensusHandle handles the consensus message from peer
   362  func ConsensusHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   363  	log.Debug("[p2p]receive consensus message", "Addr", data.Addr, "Id", data.Id)
   364  
   365  	if actor.ConsensusPid != nil {
   366  		var consensus = data.Payload.(*msgCommon.Consensus)
   367  		if err := consensus.Cons.Verify(); err != nil {
   368  			log.Warn("err", "err", err)
   369  			return
   370  		}
   371  		consensus.Cons.PeerId = data.Id
   372  		actor.ConsensusPid.Tell(&consensus.Cons)
   373  	}
   374  }
   375  
   376  // NotFoundHandle handles the not found message from peer
   377  func NotFoundHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   378  	var notFound = data.Payload.(*msgCommon.NotFound)
   379  	log.Debug("[p2p]receive notFound message, hash is ", "Hash", notFound.Hash)
   380  }
   381  
   382  // TransactionHandle handles the transaction message from peer
   383  func TransactionHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   384  	log.Trace("[p2p]receive transaction message", "Addr", data.Addr, "Id", data.Id)
   385  
   386  	var trn = data.Payload.(*msgCommon.Trn)
   387  	// synctype := trn.SyncType
   388  	// orgid := trn.OrgID
   389  	// actor.AddTransaction(trn.Txn)
   390  	actor.AddTransaction(trn)
   391  	log.Trace("[p2p]receive Transaction message hash", "Hash", trn.Txn.Hash())
   392  
   393  }
   394  
   395  // VersionHandle handles version handshake protocol from peer
   396  func VersionHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   397  	log.Trace("[p2p]receive version message", "Addr", data.Addr, "Id", data.Id)
   398  
   399  	version := data.Payload.(*msgCommon.Version)
   400  
   401  	remotePeer := p2p.GetPeerFromAddr(data.Addr)
   402  	if remotePeer == nil {
   403  		log.Debug("[p2p]peer is not exist", "Addr", data.Addr)
   404  		//peer not exist,just remove list and return
   405  		p2p.RemoveFromConnectingList(data.Addr)
   406  		return
   407  	}
   408  	addrIp, err := msgCommon.ParseIPAddr(data.Addr)
   409  	if err != nil {
   410  		log.Warn("err", "err", err)
   411  		return
   412  	}
   413  	nodeAddr := addrIp + ":" +
   414  		strconv.Itoa(int(version.P.SyncPort))
   415  	if config.GlobalConfig.P2PCfg.ReservedPeersOnly && len(config.GlobalConfig.P2PCfg.ReservedCfg.ReservedPeers) > 0 {
   416  		found := false
   417  		for _, addr := range config.GlobalConfig.P2PCfg.ReservedCfg.ReservedPeers {
   418  			if strings.HasPrefix(data.Addr, addr) {
   419  				log.Debug("[p2p]peer in reserved list", "Addr", data.Addr)
   420  				found = true
   421  				break
   422  			}
   423  		}
   424  		if !found {
   425  			remotePeer.CloseSync()
   426  			remotePeer.CloseCons()
   427  			log.Debug("[p2p]peer not in reserved list,close", "Addr", data.Addr)
   428  			return
   429  		}
   430  
   431  	}
   432  
   433  	if version.P.Nonce == p2p.GetID() {
   434  		p2p.RemoveFromInConnRecord(remotePeer.GetAddr())
   435  		p2p.RemoveFromOutConnRecord(remotePeer.GetAddr())
   436  		log.Warn("[p2p]the node handshake with itself", "Addr", remotePeer.GetAddr())
   437  		p2p.SetOwnAddress(nodeAddr)
   438  		remotePeer.CloseSync()
   439  		return
   440  	}
   441  
   442  	s := remotePeer.GetSyncState()
   443  	if s != msgCommon.INIT && s != msgCommon.HAND {
   444  		log.Warn("[p2p]unknown status to received version", "s", s, "Addr", remotePeer.GetAddr())
   445  		remotePeer.CloseSync()
   446  		return
   447  	}
   448  
   449  	// Obsolete node
   450  	p := p2p.GetPeer(version.P.Nonce)
   451  	if p != nil {
   452  		ipOld, err := msgCommon.ParseIPAddr(p.GetAddr())
   453  		if err != nil {
   454  			log.Warn("[p2p]exist peer ip format is wrong", "Nonce", version.P.Nonce, "Addr", p.GetAddr())
   455  			return
   456  		}
   457  		ipNew, err := msgCommon.ParseIPAddr(data.Addr)
   458  		if err != nil {
   459  			remotePeer.CloseSync()
   460  			log.Warn("[p2p]connecting peer ip format is wrong, close", "Nonce", version.P.Nonce, "Addr", data.Addr)
   461  			return
   462  		}
   463  		if ipNew == ipOld {
   464  			//same id and same ip
   465  			n, ret := p2p.DelNbrNode(version.P.Nonce)
   466  			if ret == true {
   467  				log.Info("[p2p]peer reconnect", "Nonce", version.P.Nonce, "Addr", data.Addr)
   468  				// Close the connection and release the node source
   469  				n.CloseSync()
   470  				n.CloseCons()
   471  				if pid != nil {
   472  					input := &msgCommon.RemovePeerID{
   473  						ID: version.P.Nonce,
   474  					}
   475  					pid.Tell(input)
   476  				}
   477  			}
   478  		} else {
   479  			log.Warn("[p2p]same peer id from different addr close latest one", "ipOld", ipOld, "ipNew", ipNew)
   480  			remotePeer.CloseSync()
   481  			return
   482  
   483  		}
   484  	}
   485  
   486  	if version.P.Cap[msgCommon.HTTP_INFO_FLAG] == 0x01 {
   487  		remotePeer.SetHttpInfoState(true)
   488  	} else {
   489  		remotePeer.SetHttpInfoState(false)
   490  	}
   491  	remotePeer.SetHttpInfoPort(version.P.HttpInfoPort)
   492  	remotePeer.SetNode(&(version.N))
   493  
   494  	remotePeer.UpdateInfo(time.Now(), version.P.Version,
   495  		version.P.Services, version.P.SyncPort,
   496  		version.P.ConsPort, version.P.Nonce,
   497  		version.P.Relay, version.P.StartHeight)
   498  	remotePeer.SyncLink.SetID(version.P.Nonce)
   499  	p2p.AddNbrNode(remotePeer)
   500  
   501  	if pid != nil {
   502  		input := &msgCommon.AppendPeerID{
   503  			ID: version.P.Nonce,
   504  		}
   505  		pid.Tell(input)
   506  	}
   507  
   508  	var msg msgCommon.Message
   509  	if s == msgCommon.INIT {
   510  		remotePeer.SetSyncState(msgCommon.HAND_SHAKE)
   511  		msg = msgpack.NewVersion(p2p, false, uint32(ledger.GetLedgerStore().GetCurrentBlockHeight()))
   512  	} else if s == msgCommon.HAND {
   513  		remotePeer.SetSyncState(msgCommon.HAND_SHAKED)
   514  		msg = msgpack.NewVerAck(false)
   515  	}
   516  	err = p2p.Send(remotePeer, msg, false)
   517  	if err != nil {
   518  		log.Warn("err", "err", err)
   519  		return
   520  	}
   521  }
   522  
   523  // VerAckHandle handles the version ack from peer
   524  func VerAckHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   525  	log.Trace("[p2p]receive verAck message from ", "Addr", data.Addr, "Id", data.Id)
   526  
   527  	// verAck := data.Payload.(*msgCommon.VerACK)
   528  	remotePeer := p2p.GetPeer(data.Id)
   529  
   530  	if remotePeer == nil {
   531  		log.Warn("[p2p]nbr node is not exist", "Id", data.Id, "Addr", data.Addr)
   532  		return
   533  	}
   534  	s := remotePeer.GetSyncState()
   535  	if s != msgCommon.HAND_SHAKE && s != msgCommon.HAND_SHAKED {
   536  		log.Warn("[p2p]unknown status to received verAck,state", "s", s, "Addr", data.Addr)
   537  		return
   538  	}
   539  
   540  	remotePeer.SetSyncState(msgCommon.ESTABLISH)
   541  	p2p.RemoveFromConnectingList(data.Addr)
   542  	remotePeer.DumpInfo()
   543  
   544  	if s == msgCommon.HAND_SHAKE {
   545  		msg := msgpack.NewVerAck(false)
   546  		p2p.Send(remotePeer, msg, false)
   547  	}
   548  	msg := msgpack.NewAddrReq()
   549  	go p2p.Send(remotePeer, msg, false)
   550  }
   551  
   552  // AddrHandle handles the neighbor address response message from peer
   553  func AddrHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   554  	log.Trace("[p2p]handle addr message", "Addr", data.Addr, "Id", data.Id)
   555  
   556  	var msg = data.Payload.(*msgCommon.Addr)
   557  	for _, v := range msg.NodeAddrs {
   558  		var ip net.IP
   559  		ip = v.IpAddr[:]
   560  		address := ip.To16().String() + ":" + strconv.Itoa(int(v.Port))
   561  
   562  		if v.ID == p2p.GetID() {
   563  			continue
   564  		}
   565  
   566  		if p2p.NodeEstablished(v.ID) {
   567  			continue
   568  		}
   569  
   570  		if ret := p2p.GetPeerFromAddr(address); ret != nil {
   571  			continue
   572  		}
   573  
   574  		if v.Port == 0 {
   575  			continue
   576  		}
   577  		if p2p.IsAddrFromConnecting(address) {
   578  			continue
   579  		}
   580  		log.Debug("[p2p]connect ip ", "address", address)
   581  		go p2p.Connect(address, false, &(v.Node), false)
   582  	}
   583  }
   584  
   585  // DataReqHandle handles the data req(block/Transaction) from peer
   586  func DataReqHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   587  	log.Trace("[p2p]receive data req message", "Addr", data.Addr, "Id", data.Id)
   588  	var dataReq = data.Payload.(*msgCommon.DataReq)
   589  
   590  	fmt.Println("🌐  📩 dataReqHandle ", dataReq.SyncType)
   591  	if dataReq.SyncType == msgCommon.SYNC_DATA_ORG {
   592  		OrgDataReqHandle(data, p2p, pid)
   593  		return
   594  	} else if dataReq.SyncType == msgCommon.SYNC_DATA_A_TO_STELLAR {
   595  		ANodeSendToStellarPeingData(data, p2p, pid)
   596  		return
   597  	} else if dataReq.SyncType == msgCommon.SYNC_DATA_STELLAR_TO_STELLAR {
   598  		StellaNodeSendToStellarPeingData(data, p2p, pid)
   599  		return
   600  	}
   601  
   602  	remotePeer := p2p.GetPeer(data.Id)
   603  	if remotePeer == nil {
   604  		log.Debug("[p2p]remotePeer invalid in DataReqHandle")
   605  		return
   606  	}
   607  	reqType := common.InventoryType(dataReq.DataType)
   608  	hash := dataReq.Hash
   609  	orgID := dataReq.OrgID
   610  	synctype := dataReq.SyncType
   611  	switch reqType {
   612  	case common.BLOCK:
   613  		reqID := fmt.Sprintf("%x%s", reqType, hash.String())
   614  		data := getRespCacheValue(reqID)
   615  		var block *types.Block
   616  		var err error
   617  		if data != nil {
   618  			switch data.(type) {
   619  			case *types.Block:
   620  				block = data.(*types.Block)
   621  			}
   622  		}
   623  		if block == nil {
   624  			if synctype == msgCommon.SYNC_DATA_MAIN {
   625  				block, err = ledger.GetLedgerStore().GetBlockByHash(hash)
   626  			}
   627  			if err != nil || block == nil || block.Header == nil {
   628  				log.Debug("[p2p]can't get block send not found message by", "hash", hash)
   629  				msg := msgpack.NewNotFound(hash)
   630  				err := p2p.Send(remotePeer, msg, false)
   631  				if err != nil {
   632  					log.Warn("err", "err", err)
   633  					return
   634  				}
   635  				return
   636  			}
   637  			saveRespCache(reqID, block)
   638  		}
   639  		log.Debug("[p2p]block ", "height", block.Header.Height, "hash", hash)
   640  		if block.Sigs == nil {
   641  			block.Sigs = &types.SigData{}
   642  		}
   643  		msg := msgpack.NewBlock(block, nil, orgID, synctype)
   644  
   645  		err = p2p.Send(remotePeer, msg, false)
   646  		if err != nil {
   647  			log.Warn("err", "err", err)
   648  			return
   649  		}
   650  	}
   651  }
   652  
   653  // InvHandle handles the inventory message(block,
   654  // transaction and consensus) from peer.
   655  func InvHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   656  	log.Trace("[p2p]receive inv message", "Addr", data.Addr, "Id", data.Id)
   657  	var inv = data.Payload.(*msgCommon.Inv)
   658  
   659  	remotePeer := p2p.GetPeer(data.Id)
   660  	if remotePeer == nil {
   661  		log.Debug("[p2p]remotePeer invalid in InvHandle")
   662  		return
   663  	}
   664  	if len(inv.P.Blk) == 0 {
   665  		log.Debug("[p2p]empty inv payload in InvHandle")
   666  		return
   667  	}
   668  	var id common.Hash
   669  	str := inv.P.Blk[0].String()
   670  	hei := inv.P.Heis[0]
   671  	log.Debug("[p2p]the inv", "type", inv.P.InvType, "Blk.len", len(inv.P.Blk), "str", str)
   672  
   673  	invType := common.InventoryType(inv.P.InvType)
   674  	switch invType {
   675  	case common.BLOCK:
   676  		log.Debug("[p2p]receive block message")
   677  		for _, id = range inv.P.Blk {
   678  			log.Debug("[p2p]receive inv-block message, hash is", "id", id)
   679  			// TODO check the ID queue
   680  			isContainBlock, err := ledger.GetLedgerStore().ContainBlock(id)
   681  			if err != nil {
   682  				log.Warn("err", "err", err)
   683  				return
   684  			}
   685  
   686  			if !isContainBlock && msgCommon.LastInvHash != id {
   687  				prnnn := &msgCommon.PeerAnn{
   688  
   689  					PeerId: data.Id,
   690  
   691  					Height: hei,
   692  				}
   693  
   694  				consensPid, er := bactor.GetActorPid(bactor.CONSENSUSACTOR)
   695  				if er != nil {
   696  					log.Error("sync_handler.InvHandle get pid error", "error", er)
   697  					return
   698  				}
   699  
   700  				consensPid.Tell(prnnn)
   701  
   702  				msgCommon.LastInvHash = id
   703  				// send the block request
   704  				log.Info("[p2p]inv request block hash", "id", id)
   705  
   706  				msg := msgpack.NewBlkDataReq(id, common.Address{}, msgCommon.SYNC_DATA_MAIN)
   707  				err = p2p.Send(remotePeer, msg, false)
   708  				if err != nil {
   709  					log.Warn("err", "err", err)
   710  					return
   711  				}
   712  			}
   713  		}
   714  	default:
   715  		log.Warn("[p2p]receive unknown inventory message")
   716  	}
   717  }
   718  
   719  // DisconnectHandle handles the disconnect events
   720  func DisconnectHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   721  	log.Debug("[p2p]receive disconnect message", "Addr", data.Addr, "Id", data.Id)
   722  	p2p.RemoveFromInConnRecord(data.Addr)
   723  	p2p.RemoveFromOutConnRecord(data.Addr)
   724  	remotePeer := p2p.GetPeer(data.Id)
   725  	defer func() {
   726  		if actor.ConsensusPid != nil {
   727  			actor.ConsensusPid.Tell(&msgCommon.PeerDisConnect{
   728  				ID: data.Id,
   729  			})
   730  		}
   731  	}()
   732  
   733  	if remotePeer == nil {
   734  		log.Debug("[p2p]disconnect peer is nil")
   735  		return
   736  	}
   737  
   738  	for _, orgid := range remotePeer.GetRemoteOrgs() {
   739  		if orgid != msgCommon.StellarNodeID {
   740  			p2p.SyncHandleSentDisconnectToBootNode(data.Id, false)
   741  		} else {
   742  			fmt.Println(" ******** send hengxing")
   743  			p2p.SyncHandleSentDisconnectToBootNode(data.Id, true)
   744  		}
   745  	}
   746  
   747  	p2p.RemoveFromConnectingList(data.Addr)
   748  
   749  	if remotePeer.SyncLink.GetAddr() == data.Addr {
   750  		p2p.RemovePeerSyncAddress(data.Addr)
   751  		p2p.RemovePeerConsAddress(data.Addr)
   752  		remotePeer.CloseSync()
   753  		remotePeer.CloseCons()
   754  	}
   755  	if remotePeer.ConsLink.GetAddr() == data.Addr {
   756  		p2p.RemovePeerConsAddress(data.Addr)
   757  		remotePeer.CloseCons()
   758  	}
   759  }
   760  
   761  func GetHeadersFromHeight(srcHeight uint64) ([]*types.Header, error) {
   762  	var count uint64 = 0
   763  	headers := []*types.Header{}
   764  	// var startHeight uint32
   765  	var stopHeight uint64
   766  
   767  	curHeight := ledger.GetLedgerStore().GetCurrentHeaderHeight()
   768  	if srcHeight == 0 {
   769  		if curHeight > msgCommon.MAX_BLK_HDR_CNT {
   770  			count = msgCommon.MAX_BLK_HDR_CNT
   771  		} else {
   772  			count = curHeight
   773  		}
   774  	} else {
   775  		// bkStop, err := ledger.DefLedger.GetHeaderByHash(stopHash)
   776  		// if err != nil || bkStop == nil {
   777  		// 	return nil, err
   778  		// }
   779  		stopHeight = srcHeight
   780  
   781  		count = curHeight - stopHeight
   782  		if count > msgCommon.MAX_BLK_HDR_CNT {
   783  			count = msgCommon.MAX_BLK_HDR_CNT
   784  		}
   785  	}
   786  
   787  	var i uint64
   788  	for i = 1; i <= count; i++ {
   789  		hash, _ := ledger.GetLedgerStore().GetBlockHashByHeight(uint64(stopHeight + i))
   790  		hd, err := ledger.GetLedgerStore().GetHeaderByHash(hash)
   791  		if err != nil {
   792  			log.Debug("[p2p]net_server GetBlockWithHeight failed with", "err", err.Error(), "hash", hash, "height", stopHeight+i)
   793  			return nil, err
   794  		}
   795  		headers = append(headers, hd)
   796  	}
   797  
   798  	return headers, nil
   799  }
   800  
   801  //get blk hdrs from starthash to stophash
   802  func GetHeadersFromHash(startHash common.Hash, stopHash common.Hash,
   803  	orgid common.Address, orgtype string) ([]*types.Header, error) {
   804  	var count uint32 = 0
   805  	headers := []*types.Header{}
   806  	var startHeight uint32
   807  	var stopHeight uint32
   808  	curHeight := uint32(0)
   809  	if orgtype == msgCommon.SYNC_DATA_MAIN {
   810  		curHeight = uint32(ledger.GetLedgerStore().GetCurrentHeaderHeight())
   811  	}
   812  
   813  	empty := common.Hash{}
   814  	if startHash == empty {
   815  		if stopHash == empty {
   816  			if curHeight > msgCommon.MAX_BLK_HDR_CNT {
   817  				count = msgCommon.MAX_BLK_HDR_CNT
   818  			} else {
   819  				count = curHeight
   820  			}
   821  		} else {
   822  			var bkStop *types.Header
   823  			var err error
   824  			if orgtype == msgCommon.SYNC_DATA_MAIN {
   825  				bkStop, err = ledger.GetLedgerStore().GetHeaderByHash(stopHash)
   826  			}
   827  
   828  			if err != nil || bkStop == nil {
   829  				return nil, err
   830  			}
   831  			stopHeight = uint32(bkStop.Height)
   832  			count = curHeight - stopHeight
   833  			if count > msgCommon.MAX_BLK_HDR_CNT {
   834  				count = msgCommon.MAX_BLK_HDR_CNT
   835  			}
   836  		}
   837  	} else {
   838  		var bkStart *types.Header
   839  		var err error
   840  		if orgtype == msgCommon.SYNC_DATA_MAIN {
   841  			bkStart, err = ledger.GetLedgerStore().GetHeaderByHash(startHash)
   842  		}
   843  
   844  		if err != nil || bkStart == nil {
   845  			return nil, err
   846  		}
   847  		startHeight = uint32(bkStart.Height)
   848  		empty := common.Hash{}
   849  		if stopHash != empty {
   850  			var bkStop *types.Header
   851  			var err error
   852  			if orgtype == msgCommon.SYNC_DATA_MAIN {
   853  				bkStop, err = ledger.GetLedgerStore().GetHeaderByHash(stopHash)
   854  			}
   855  
   856  			if err != nil || bkStop == nil {
   857  				return nil, err
   858  			}
   859  			stopHeight = uint32(bkStop.Height)
   860  
   861  			// avoid unsigned integer underflow
   862  			if startHeight < stopHeight {
   863  				return nil, errors.New("[p2p]do not have header to send")
   864  			}
   865  			count = startHeight - stopHeight
   866  
   867  			if count >= msgCommon.MAX_BLK_HDR_CNT {
   868  				count = msgCommon.MAX_BLK_HDR_CNT
   869  				stopHeight = startHeight - msgCommon.MAX_BLK_HDR_CNT
   870  			}
   871  		} else {
   872  
   873  			if startHeight > msgCommon.MAX_BLK_HDR_CNT {
   874  				count = msgCommon.MAX_BLK_HDR_CNT
   875  			} else {
   876  				count = startHeight
   877  			}
   878  		}
   879  	}
   880  	var i uint32
   881  	for i = 1; i <= count; i++ {
   882  		var (
   883  			hash common.Hash
   884  			hd   *types.Header
   885  			err  error
   886  		)
   887  		if orgtype == msgCommon.SYNC_DATA_MAIN {
   888  			hash, _ = ledger.GetLedgerStore().GetBlockHashByHeight(uint64(stopHeight + i))
   889  			hd, err = ledger.GetLedgerStore().GetHeaderByHash(hash)
   890  		}
   891  
   892  		if err != nil {
   893  			log.Debug("[p2p]net_server GetBlockWithHeight failed with", "err", err.Error(), "hash", hash, "height", stopHeight+i)
   894  			return nil, err
   895  		}
   896  		headers = append(headers, hd)
   897  	}
   898  
   899  	return headers, nil
   900  }
   901  
   902  //getRespCacheValue get response data from cache
   903  func getRespCacheValue(key string) interface{} {
   904  	if respCache == nil {
   905  		return nil
   906  	}
   907  	data, ok := respCache.Get(key)
   908  	if ok {
   909  		return data
   910  	}
   911  	return nil
   912  }
   913  
   914  //saveRespCache save response msg to cache
   915  func saveRespCache(key string, value interface{}) bool {
   916  	if respCache == nil {
   917  		var err error
   918  		respCache, err = lru.NewARC(msgCommon.MAX_RESP_CACHE_SIZE)
   919  		if err != nil {
   920  			return false
   921  		}
   922  	}
   923  	respCache.Add(key, value)
   924  	return true
   925  }
   926  
   927  // new add
   928  //PingHandle handle ping msg from peer
   929  func PingSpcHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   930  	log.Trace("[p2p]receive PingSpcHandle message", "Addr", data.Addr, "Id", data.Id)
   931  
   932  	pingspc := data.Payload.(*msgCommon.PingSpc)
   933  	remotePeer := p2p.GetPeer(data.Id)
   934  	if remotePeer == nil {
   935  		log.Debug("[p2p]remotePeer invalid in PingSpcHandle")
   936  		return
   937  	}
   938  	if pingspc.BNodeASrc {
   939  		// 判断该节点是否是恒星节点
   940  		bstellar := false
   941  		for _, orgid := range p2p.PeerGetOrg() {
   942  			if orgid == msgCommon.StellarNodeID {
   943  				bstellar = true
   944  				break
   945  			}
   946  		}
   947  		// 如果是恒星节点,就去维护nodestar的状态
   948  		if bstellar {
   949  			pid.Tell(&msgCommon.StarConnedNodeAID{
   950  				PeerID: data.Id,
   951  			}) // 向p2p actor发送对方是A节点的信息
   952  		}
   953  
   954  		msg := msgpack.NewPongSpcMsg(pingspc.BNodeASrc, bstellar)
   955  		err := p2p.Send(remotePeer, msg, false)
   956  		if err != nil {
   957  			log.Warn("err", "err", err)
   958  		}
   959  	} else {
   960  		// 判断该节点是否是A节点
   961  		bANode := p2p.SyncHandleBANode()
   962  		msg := msgpack.NewPongSpcMsg(pingspc.BNodeASrc, bANode)
   963  		err := p2p.Send(remotePeer, msg, false)
   964  		if err != nil {
   965  			log.Warn("err", "err", err)
   966  		}
   967  	}
   968  }
   969  
   970  //PongSpcHandle handle ping msg from peer
   971  func PongSpcHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   972  	log.Trace("[p2p]receive PongSpcHandle message", "Addr", data.Addr, "Id", data.Id)
   973  
   974  	pongspc := data.Payload.(*msgCommon.PongSpc)
   975  	remotePeer := p2p.GetPeer(data.Id)
   976  	if remotePeer == nil {
   977  		log.Debug("[p2p]remotePeer invalid in PongSpcHandle")
   978  		return
   979  	}
   980  
   981  	pid.Tell(&msgCommon.PingPongSpcHandler{
   982  		BNodeASrc:     pongspc.BNodeASrc,
   983  		BOtherSideReq: pongspc.BOtherSideReq,
   984  		PeerID:        data.Id,
   985  	}) // 向p2p actor发送消息
   986  }
   987  
   988  func EarthNotifyHashHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   989  	log.Info("[p2p]receive EarthNotifyHashHandle message", "Addr", data.Addr, "Id", data.Id)
   990  
   991  	ntf := data.Payload.(*msgCommon.EarthNotifyMsg)
   992  
   993  	log.Info("earth ntf message", "ntf", ntf)
   994  
   995  }
   996  
   997  func NodeLeagueHeigtNtfHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
   998  	log.Info("[p2p]receive NodeLeagueHeigtNtfHandle message", "Addr", data.Addr, "Id", data.Id)
   999  
  1000  	if nlhcache == nil {
  1001  		initNlhCache()
  1002  	}
  1003  
  1004  	nlhcache.nlhLock.Lock()
  1005  	defer nlhcache.nlhLock.Unlock()
  1006  
  1007  	ntf := data.Payload.(*msgCommon.NodeLHMsg)
  1008  	uniq := ntf.Unique()
  1009  	if _, ok := nlhcache.nlh[uniq]; ok {
  1010  		return
  1011  	}
  1012  	log.Info("Node League Heigt notify Handle message", "ntf", ntf)
  1013  
  1014  	radarPid, err := bactor.GetActorPid(bactor.MAINRADARACTOR)
  1015  	if err != nil {
  1016  		log.Error("NodeLeagueHeigtNtfHandle get radar actor error", "error", err)
  1017  		return
  1018  	}
  1019  
  1020  	lh := &common.NodeLH{
  1021  		ntf.NodeId,
  1022  		ntf.LeagueId,
  1023  		ntf.Height,
  1024  	}
  1025  
  1026  	nlhcache.nlh[uniq] = time.Now().Unix()
  1027  	radarPid.Tell(lh)
  1028  
  1029  }
  1030  
  1031  // get extData request handle
  1032  func ExtDataRequestHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
  1033  	log.Info("[p2p]receive ExtDataRequestHandle message", "Addr", data.Addr, "Id", data.Id)
  1034  	var dataReq = data.Payload.(*msgCommon.ExtDataRequest)
  1035  	remotePeer := p2p.GetPeer(data.Id)
  1036  	if remotePeer == nil {
  1037  		log.Warn("[p2p]nbr node is not exist", "Id", data.Id, "Addr", data.Addr)
  1038  		return
  1039  	}
  1040  	bstellar := false
  1041  	for _, orgid := range p2p.PeerGetOrg() {
  1042  		if orgid == msgCommon.StellarNodeID {
  1043  			bstellar = true
  1044  			break
  1045  		}
  1046  	}
  1047  	// 如果是不是恒星节点不去请求
  1048  	if !bstellar {
  1049  		fmt.Println("[p2p][ExtDataRequestHandle] not stellar,exit")
  1050  		return
  1051  	}
  1052  
  1053  	//get local data
  1054  	if extstorages.GetLedgerStoreInstance() == nil {
  1055  		log.Error("[p2p] extstorages.GetLedgerStoreInstance() nil")
  1056  		return
  1057  	}
  1058  	fmt.Println("🌐 🚫 get radar extData GetExtDataByHeight start")
  1059  	extData, err := extstorages.GetLedgerStoreInstance().GetExtDataByHeight(dataReq.LeagueId, dataReq.Height)
  1060  	if err != nil {
  1061  		fmt.Printf("🌐 🚫 get radar extData error:%s , height:%d , leagueId:%s \n", err, dataReq.Height, dataReq.LeagueId.ToString())
  1062  		log.Warn("[p2p]get radar extData error", "error:", err)
  1063  		return
  1064  	}
  1065  	fmt.Printf("get extData:%+v\n", *extData)
  1066  	if len(extData.AccountStates) != 0 {
  1067  		var accountStates []*extstates.EasyLeagueAccount
  1068  		for key, value := range extData.AccountStates {
  1069  			fmt.Printf("AccountStates:%v:%v\n", key, value)
  1070  			if value != nil {
  1071  				accountStates = append(accountStates, value)
  1072  			}
  1073  		}
  1074  		extData.AccountStates = accountStates
  1075  	}
  1076  	fmt.Printf("get extData:%+v\n", *extData)
  1077  	if extData.LeagueBlock == nil {
  1078  		extData.LeagueBlock = new(extstates.LeagueBlockSimple)
  1079  	}
  1080  
  1081  	msg := msgpack.NewExtDataResponseMsg(extData)
  1082  	//hash,_:= common.StringToHash("")
  1083  	//msg := msgpack.NewNotFound(hash)
  1084  	err = p2p.Send(remotePeer, msg, false)
  1085  	if err != nil {
  1086  		fmt.Printf("[p2p] Sendto[%v:%v] error:%v", remotePeer.SyncLink.GetAddr(), remotePeer.SyncLink.GetPort(), err)
  1087  	}
  1088  	return
  1089  }
  1090  
  1091  // get extData response handle
  1092  func ExtDataResponseHandle(data *msgCommon.MsgPayload, p2p p2p.P2P, pid *evtActor.PID, args ...interface{}) {
  1093  	log.Info("[p2p]receive ExtDataResponseHandle message", "Addr", data.Addr, "Id", data.Id)
  1094  	//defer common.CatchPanic()
  1095  	var dataReq = data.Payload.(*msgCommon.ExtDataResponse)
  1096  
  1097  	if dataReq == nil {
  1098  		log.Error("[p2p]ExtDataHandle dataReq.Data is nil")
  1099  		return
  1100  	}
  1101  	if extstorages.GetLedgerStoreInstance() == nil {
  1102  		log.Error("[p2p] extstorages.GetLedgerStoreInstance() nil")
  1103  		return
  1104  	}
  1105  	blkInfo, mainTxUsed := dataReq.GetOrgBlockInfo()
  1106  	err := extstorages.GetLedgerStoreInstance().SaveAll(blkInfo, mainTxUsed)
  1107  	if err != nil {
  1108  		log.Error("[p2p]extstorages.GetLedgerStoreInstance().SaveAll", "LeagueBlock", dataReq.LeagueBlock, "MainTxUsed", dataReq.MainTxUsed)
  1109  	} else {
  1110  		radarPid, err := bactor.GetActorPid(bactor.MAINRADARACTOR)
  1111  		if err != nil {
  1112  			log.Error("NodeLeagueHeigtNtfHandle get radar actor error", "error", err)
  1113  			return
  1114  		}
  1115  
  1116  		lf := &bactor.LeagueRadarCache{
  1117  			LeagueId:  blkInfo.Block.Header.LeagueId,
  1118  			BlockHash: blkInfo.Block.Hash(),
  1119  			Height:    blkInfo.Block.Header.Height,
  1120  		}
  1121  		radarPid.Tell(lf)
  1122  	}
  1123  }