github.com/elastos/Elastos.ELA.SideChain.ETH@v0.2.2/consensus/pbft/network.go (about) 1 // Copyright (c) 2017-2019 The Elastos Foundation 2 // Use of this source code is governed by an MIT 3 // license that can be found in the LICENSE file. 4 // 5 6 package pbft 7 8 import ( 9 "bytes" 10 "fmt" 11 "sort" 12 "time" 13 14 "github.com/elastos/Elastos.ELA.SideChain.ESC/common" 15 "github.com/elastos/Elastos.ELA.SideChain.ESC/consensus" 16 "github.com/elastos/Elastos.ELA.SideChain.ESC/core/types" 17 "github.com/elastos/Elastos.ELA.SideChain.ESC/dpos" 18 dmsg "github.com/elastos/Elastos.ELA.SideChain.ESC/dpos/msg" 19 "github.com/elastos/Elastos.ELA.SideChain.ESC/log" 20 "github.com/elastos/Elastos.ELA.SideChain.ESC/rlp" 21 "github.com/elastos/Elastos.ELA.SideChain.ESC/smallcrosstx" 22 "github.com/elastos/Elastos.ELA.SideChain.ESC/spv" 23 "github.com/elastos/Elastos.ELA.SideChain.ESC/withdrawfailedtx" 24 25 elacom "github.com/elastos/Elastos.ELA/common" 26 "github.com/elastos/Elastos.ELA/core/types/payload" 27 "github.com/elastos/Elastos.ELA/dpos/p2p" 28 "github.com/elastos/Elastos.ELA/dpos/p2p/msg" 29 "github.com/elastos/Elastos.ELA/dpos/p2p/peer" 30 "github.com/elastos/Elastos.ELA/events" 31 elap2p "github.com/elastos/Elastos.ELA/p2p" 32 ) 33 34 const maxViewOffset = 100 35 36 func (p *Pbft) StartProposal(block *types.Block) error { 37 sealHash := p.SealHash(block.Header()) 38 log.Info("StartProposal", "block hash:", sealHash.String()) 39 40 hash, err := elacom.Uint256FromBytes(sealHash.Bytes()) 41 if err != nil { 42 return err 43 } 44 proposal, err := dpos.StartProposal(p.account, *hash, p.dispatcher.GetConsensusView().GetViewOffset()) 45 if err != nil { 46 log.Error("Start proposal error", "err", err) 47 return err 48 } 49 50 var id peer.PID 51 copy(id[:], p.account.PublicKeyBytes()[:]) 52 if err, _, _ := p.dispatcher.ProcessProposal(id, proposal); err != nil { 53 log.Error("ProcessProposal error", "err", err) 54 } 55 56 m := &msg.Proposal{ 57 Proposal: *proposal, 58 } 59 log.Info("[StartProposal] send proposal message", "proposal", msg.GetMessageHash(m)) 60 p.BroadMessage(m) 61 // Broadcast vote 62 voteMsg := p.dispatcher.AcceptProposal(proposal, p.account) 63 if voteMsg != nil { 64 go p.OnVoteAccepted(id, &voteMsg.Vote) 65 p.BroadMessage(voteMsg) 66 } 67 return nil 68 } 69 70 func (p *Pbft) BroadMessage(msg elap2p.Message) { 71 peers := p.network.DumpPeersInfo() 72 73 for _, peer := range peers { 74 pid := peer.PID[:] 75 producer := p.dispatcher.GetConsensusView().IsProducers(pid) 76 if producer == false { 77 continue 78 } 79 p.network.SendMessageToPeer(peer.PID, msg) 80 } 81 } 82 83 func (p *Pbft) BroadMessageToPeers(msg elap2p.Message, peers [][]byte) { 84 for _, pbk := range peers { 85 pid := peer.PID{} 86 copy(pid[:], pbk) 87 p.network.SendMessageToPeer(pid, msg) 88 } 89 } 90 91 type peerInfo struct { 92 OwnerPublicKey string `json:"ownerpublickey"` 93 NodePublicKey string `json:"nodepublickey"` 94 IP string `json:"ip"` 95 ConnState string `json:"connstate"` 96 } 97 98 func (p *Pbft) GetAtbiterPeersInfo() []peerInfo { 99 if p.account == nil { 100 return nil 101 } 102 103 peers := p.network.DumpPeersInfo() 104 105 result := make([]peerInfo, 0) 106 for _, peer := range peers { 107 pid := peer.PID[:] 108 producer := p.dispatcher.GetConsensusView().IsProducers(pid) 109 if producer == false { 110 continue 111 } 112 result = append(result, peerInfo{ 113 NodePublicKey: common.Bytes2Hex(pid), 114 IP: peer.Addr, 115 ConnState: peer.State.String(), 116 }) 117 } 118 return result 119 } 120 121 func (p *Pbft) GetAllArbiterPeersInfo() []*p2p.PeerInfo { 122 if p.account == nil { 123 return nil 124 } 125 return p.network.DumpPeersInfo() 126 } 127 128 func (p *Pbft) AnnounceDAddr() bool { 129 if p.account == nil { 130 log.Error("is not a super node") 131 return false 132 } 133 currents := p.dispatcher.GetCurrentNeedConnectArbiters() 134 nextArbites := p.dispatcher.GetNextNeedConnectArbiters() 135 log.Info("Announce DAddr ", "currents:", currents, "nextArbites", nextArbites) 136 events.Notify(events.ETDirectPeersChanged, 137 &peer.PeersInfo{CurrentPeers: currents, NextPeers: nextArbites}) 138 return true 139 } 140 141 func (p *Pbft) UpdateCurrentProducers(producers [][]byte, totalCount int, spvHeight uint64) { 142 p.dispatcher.GetConsensusView().UpdateProducers(producers, totalCount, spvHeight) 143 } 144 145 func (p *Pbft) GetCurrentProducers() [][]byte { 146 if p.dispatcher != nil { 147 return p.dispatcher.GetConsensusView().GetProducers() 148 } 149 return [][]byte{} 150 } 151 152 func (p *Pbft) IsProducerByAccount(account []byte) bool { 153 if p.dispatcher != nil { 154 return p.dispatcher.IsProducer(account) 155 } 156 return false 157 } 158 159 func (p *Pbft) BroadBlockMsg(block *types.Block) error { 160 sealHash := p.SealHash(block.Header()) 161 log.Info("BroadPreBlock,", "block Height:", block.NumberU64(), "hash:", sealHash.String()) 162 buffer := bytes.NewBuffer([]byte{}) 163 err := block.EncodeRLP(buffer) 164 if err != nil { 165 return err 166 } 167 msg := dmsg.NewBlockMsg(buffer.Bytes()) 168 p.BroadMessage(msg) 169 p.blockPool.AppendDposBlock(block) 170 return nil 171 } 172 173 func (p *Pbft) RequestAbnormalRecovering() { 174 height := p.chain.CurrentHeader().Height() 175 msgItem := &dmsg.RequestConsensus{Height: height} 176 log.Info("[RequestAbnormalRecovering]", "height", height) 177 p.BroadMessage(msgItem) 178 } 179 180 func (p *Pbft) tryGetCurrentProposal(id peer.PID, v *payload.DPOSProposalVote) (elacom.Uint256, bool) { 181 currentProposal := p.dispatcher.GetProcessingProposal() 182 if currentProposal == nil { 183 if v.ProposalHash.IsEqual(p.dispatcher.GetFinishedProposal()) { 184 log.Info("received finished proposal vote") 185 return elacom.EmptyHash, true 186 } 187 if _, ok := p.requestedProposals[v.ProposalHash]; !ok { 188 requestProposal := &msg.RequestProposal{ProposalHash: v.ProposalHash} 189 go p.network.SendMessageToPeer(id, requestProposal) 190 p.requestedProposals[v.ProposalHash] = struct{}{} 191 } 192 return elacom.EmptyHash, false 193 } 194 return currentProposal.Hash(), true 195 } 196 197 func (p *Pbft) OnPing(id peer.PID, height uint32) { 198 //fmt.Println("OnPing", id, height) 199 } 200 201 func (p *Pbft) OnPong(id peer.PID, height uint32) { 202 //fmt.Println("OnPong", id, height) 203 } 204 205 func (p *Pbft) OnBlock(id peer.PID, block *dmsg.BlockMsg) { 206 log.Info("-----OnBlock received------") 207 b := &types.Block{} 208 209 err := b.DecodeRLP(rlp.NewStream(bytes.NewBuffer(block.GetData()), 0)) 210 if err != nil { 211 panic("OnBlock Decode Block Msg error:" + err.Error()) 212 } 213 if len(b.Extra()) > extraVanity { 214 p.OnBlockReceived(id, block, true) 215 return 216 } 217 218 if b.NumberU64() <= p.chain.CurrentHeader().Number.Uint64() || 219 b.NumberU64() <= p.dispatcher.GetFinishedHeight() { 220 p.blockPool.AddBadBlock(b) 221 log.Warn("old height block coming blockchain.Height", "chain height", p.chain.CurrentHeader().Number.Uint64(), "b.Height", b.NumberU64(), "finishedHeight", p.dispatcher.GetFinishedHeight()) 222 return 223 } 224 sealHash := p.SealHash(b.Header()) 225 log.Info("-----On PreBlock received------", "blockHash:", sealHash.String(), "height:", b.NumberU64()) 226 err = p.blockPool.AppendDposBlock(b) 227 if err == consensus.ErrUnknownAncestor { 228 log.Info("Append Future blocks", "height:", b.NumberU64()) 229 p.blockPool.AppendFutureBlock(b) 230 } 231 232 hash, err := elacom.Uint256FromBytes(sealHash.Bytes()) 233 if err == nil { 234 if c, ok := p.blockPool.GetConfirm(*hash); ok { 235 p.OnConfirmReceived(id, c, b.GetHeight()) 236 } 237 } 238 239 if _, ok := p.requestedBlocks[sealHash]; ok { 240 delete(p.requestedBlocks, sealHash) 241 } 242 } 243 244 func (p *Pbft) AccessFutureBlock(parent *types.Block) { 245 if p.blockPool.HandleParentBlock(parent) { 246 log.Info("----[Send RequestProposal]-----") 247 requestProposal := &msg.RequestProposal{ProposalHash: elacom.EmptyHash} 248 go p.BroadMessage(requestProposal) 249 } 250 } 251 252 func (p *Pbft) OnInsertBlock(block *types.Block) bool { 253 if p.dispatcher == nil { 254 return false 255 } 256 dutyIndex := p.dispatcher.GetConsensusView().GetDutyIndex() 257 isWorkingHeight := spv.SpvIsWorkingHeight() 258 log.Info("[OnInsertBlock]", "dutyIndex", dutyIndex, "isWorkingHeight", isWorkingHeight) 259 if dutyIndex == 0 && isWorkingHeight { 260 curProducers := p.dispatcher.GetConsensusView().GetProducers() 261 isSame := p.dispatcher.GetConsensusView().IsSameProducers(curProducers) 262 if !isSame { 263 p.dispatcher.GetConsensusView().ChangeCurrentProducers(block.NumberU64()+1, spv.GetSpvHeight()) 264 go p.AnnounceDAddr() 265 go p.Recover() 266 p.dispatcher.GetConsensusView().DumpInfo() 267 } else { 268 log.Info("For the same batch of producers, no need to change current producers") 269 } 270 spv.InitNextTurnDposInfo() 271 return !isSame 272 } else if block.Nonce() > 0 { 273 //used to sync completed to consensus 274 spvHeight := spv.GetSpvHeight() 275 if spvHeight < block.Nonce() { 276 spvHeight = block.Nonce() 277 } 278 producers, totalCount, err := spv.GetProducers(spvHeight) 279 if err != nil { 280 log.Error("OnInsertBlock error", "GetProducers", err, "spvHeight", spvHeight) 281 return false 282 } 283 isBackword := p.dispatcher.GetConsensusView().GetSpvHeight() < block.Nonce() 284 isCurrent := p.IsCurrentProducers(producers) 285 log.Info("current producers spvHeight", "height", p.dispatcher.GetConsensusView().GetSpvHeight(), "block.Nonce()", block.Nonce(), "isBackword", isBackword, "isCurrent", isCurrent) 286 if isBackword && !isCurrent { 287 p.dispatcher.GetConsensusView().UpdateProducers(producers, totalCount, spvHeight) 288 go p.AnnounceDAddr() 289 go p.Recover() 290 return true 291 } 292 } 293 return false 294 } 295 296 func (p *Pbft) GetSelfDutyIndex() int { 297 if p.account == nil { 298 return -1 299 } 300 return p.dispatcher.GetConsensusView().ProducerIndex(p.account.PublicKeyBytes()) 301 } 302 303 func (p *Pbft) OnInv(id peer.PID, blockHash elacom.Uint256) { 304 if !p.dispatcher.IsProducer(p.account.PublicKeyBytes()) { 305 return 306 } 307 if p.blockPool.HasBlock(blockHash) { 308 return 309 } 310 hash := common.BytesToHash(blockHash.Bytes()) 311 if _, ok := p.requestedBlocks[hash]; ok { 312 return 313 } 314 315 log.Info("[ProcessInv] send getblock:", "hash", blockHash.String()) 316 p.limitMap(p.requestedBlocks, maxRequestedBlocks) 317 p.requestedBlocks[hash] = struct{}{} 318 go p.network.SendMessageToPeer(id, msg.NewGetBlock(blockHash)) 319 } 320 321 func (p *Pbft) OnGetBlock(id peer.PID, blockHash elacom.Uint256) { 322 if block, ok := p.blockPool.GetBlock(blockHash); ok { 323 if b, suc := block.(*types.Block); suc { 324 buffer := bytes.NewBuffer([]byte{}) 325 err := b.EncodeRLP(buffer) 326 if err != nil { 327 log.Error("[OnGetBlock] Encode Block Error") 328 } 329 log.Info("Send block to peer", "peer:", id, "height:", block.GetHeight()) 330 go p.network.SendMessageToPeer(id, dmsg.NewBlockMsg(buffer.Bytes())) 331 } else { 332 log.Error("block is not ethereum block") 333 } 334 } 335 } 336 337 func (p *Pbft) OnGetBlocks(id peer.PID, startBlockHeight, endBlockHeight uint32) { 338 fmt.Println("OnGetBlocks") 339 } 340 341 func (p *Pbft) OnResponseBlocks(id peer.PID, blockConfirms []*dmsg.BlockMsg) { 342 fmt.Println("OnResponseBlocks") 343 } 344 345 func (p *Pbft) OnRequestConsensus(id peer.PID, height uint64) { 346 log.Info("------- [OnRequestConsensus] -------") 347 if !p.IsProducer() { 348 log.Warn("------- not a producer -------") 349 return 350 } 351 352 status := p.dispatcher.HelpToRecoverAbnormal(id, height, p.chain.CurrentHeader().Height()) 353 if status != nil { 354 msg := &dmsg.ResponseConsensus{Consensus: *status} 355 go p.network.SendMessageToPeer(id, msg) 356 } 357 } 358 359 func (p *Pbft) OnResponseConsensus(id peer.PID, status *dmsg.ConsensusStatus) { 360 if !p.IsProducer() { 361 return 362 } 363 if !p.recoverStarted { 364 return 365 } 366 if p.statusMap[status.ViewOffset][common.Bytes2Hex(id[:])] != nil { 367 return 368 } 369 log.Info("---------[OnResponseConsensus]------------", "pid", id.String(), "status.viewOffset", status.ViewOffset) 370 if _, ok := p.statusMap[status.ViewOffset]; !ok { 371 p.statusMap[status.ViewOffset] = make(map[string]*dmsg.ConsensusStatus) 372 } 373 p.statusMap[status.ViewOffset][common.Bytes2Hex(id[:])] = status 374 } 375 376 func (p *Pbft) OnRequestProposal(id peer.PID, hash elacom.Uint256) { 377 currentProposal := p.dispatcher.GetProcessingProposal() 378 if currentProposal != nil { 379 responseProposal := &msg.Proposal{Proposal: *currentProposal} 380 go p.network.SendMessageToPeer(id, responseProposal) 381 } 382 } 383 384 func (p *Pbft) OnIllegalProposalReceived(id peer.PID, proposals *payload.DPOSIllegalProposals) { 385 fmt.Println("OnIllegalProposalReceived") 386 } 387 388 func (p *Pbft) OnIllegalVotesReceived(id peer.PID, votes *payload.DPOSIllegalVotes) { 389 fmt.Println("OnIllegalVotesReceived") 390 } 391 392 func (p *Pbft) OnProposalReceived(id peer.PID, proposal *payload.DPOSProposal) { 393 log.Info("OnProposalReceived", "hash:", proposal.Hash().String()) 394 if _, ok := p.requestedProposals[proposal.Hash()]; ok { 395 delete(p.requestedProposals, proposal.Hash()) 396 } 397 if p.dispatcher.GetProcessingProposal() != nil && p.dispatcher.GetProcessingProposal().Hash().IsEqual(proposal.Hash()) { 398 log.Info("is processing this proposal") 399 return 400 } 401 if !p.dispatcher.GetConsensusView().IsRunning() { 402 log.Info("consensus is not running") 403 return 404 } 405 p.OnChangeView() 406 407 if proposal.BlockHash.IsEqual(p.dispatcher.GetFinishedBlockSealHash()) { 408 log.Info("already processed block") 409 return 410 } 411 412 isBadProposal := p.blockPool.IsBadBlockProposal(proposal) 413 if _, ok := p.blockPool.GetBlock(proposal.BlockHash); !ok && !isBadProposal { 414 if p.blockPool.IsFutureBlock(proposal.BlockHash) { 415 log.Info("future propsal, wait syncing block") 416 return 417 } 418 log.Info("not have preBlock, request it", "hash:", proposal.BlockHash.String()) 419 p.OnInv(id, proposal.BlockHash) 420 return 421 } 422 var voteMsg *msg.Vote 423 err, isSendReject, handled := p.dispatcher.ProcessProposal(id, proposal) 424 if err != nil { 425 log.Error("Process Proposal error", "err", err) 426 if isSendReject { 427 voteMsg = p.dispatcher.RejectProposal(proposal, p.account) 428 } else if !handled { 429 pubKey := common.Bytes2Hex(id[:]) 430 p.notHandledProposal[pubKey] = struct{}{} 431 count := len(p.notHandledProposal) 432 log.Info("[OnProposalReceived] not handled", "count", count) 433 if p.dispatcher.GetConsensusView().GetViewOffset() != 0 && p.dispatcher.GetConsensusView().HasArbitersMinorityCount(count) { 434 log.Info("[OnProposalReceived] has minority not handled" + 435 " proposals, need recover") 436 if p.recoverAbnormalState() { 437 log.Info("[OnProposalReceived] recover start") 438 } else { 439 log.Error("[OnProposalReceived] has no active peers recover failed") 440 } 441 } 442 } 443 444 } else if isBadProposal { 445 log.Info("bad proposal reject") 446 voteMsg = p.dispatcher.RejectProposal(proposal, p.account) 447 } else { 448 voteMsg = p.dispatcher.AcceptProposal(proposal, p.account) 449 } 450 451 if handled { 452 log.Info("[OnProposalReceived]handled reset notHandledProposal") 453 p.notHandledProposal = make(map[string]struct{}) 454 } 455 if voteMsg != nil && !p.dispatcher.GetProposalProcessFinished() { 456 p.BroadMessage(voteMsg) 457 p.dispatcher.SetProposalProcessFinished() 458 } 459 } 460 461 func (p *Pbft) OnVoteAccepted(id peer.PID, vote *payload.DPOSProposalVote) { 462 if !p.IsProducer() { 463 return 464 } 465 if !p.dispatcher.GetConsensusView().IsRunning() { 466 return 467 } 468 if vote.Accept == true { 469 log.Info("OnVoteAccepted:", "hash:", vote.Hash().String()) 470 } 471 if p.dispatcher.GetFinishedProposal().IsEqual(vote.ProposalHash) { 472 log.Info("all ready finished proposal, no need vote") 473 return 474 } 475 if _, ok := p.blockPool.GetConfirm(vote.ProposalHash); ok { 476 log.Info("all ready confim proposal, no need vote") 477 return 478 } 479 currentProposal, ok := p.tryGetCurrentProposal(id, vote) 480 if !ok { 481 log.Info("not have proposal, get it and push vote into pending vote", "proposal", vote.ProposalHash.String()) 482 p.dispatcher.AddPendingVote(vote) 483 } else if currentProposal.IsEqual(vote.ProposalHash) { 484 processingProposal := p.dispatcher.GetProcessingProposal() 485 if processingProposal == nil { 486 log.Info("GetProcessingProposal is nil") 487 return 488 } 489 if _, ok := p.blockPool.GetConfirm(processingProposal.BlockHash); ok { 490 log.Warn("Has Confirm proposal") 491 return 492 } 493 _, _, err := p.dispatcher.ProcessVote(vote) 494 if err != nil { 495 log.Error("ProcessVote error", "err", err) 496 } 497 } 498 } 499 500 func (p *Pbft) OnVoteRejected(id peer.PID, vote *payload.DPOSProposalVote) { 501 log.Info("OnVoteRejected", "hash:", vote.Hash().String()) 502 p.OnVoteAccepted(id, vote) 503 } 504 505 func (p *Pbft) OnChangeView() { 506 p.dispatcher.OnChangeView() 507 508 if p.dispatcher.GetConsensusView().GetViewOffset() >= maxViewOffset { 509 m := &msg.ResetView{ 510 Sponsor: p.account.PublicKeyBytes(), 511 } 512 buf := new(bytes.Buffer) 513 err := m.SerializeUnsigned(buf) 514 if err != nil { 515 log.Error("failed to serialize ResetView message") 516 return 517 } 518 519 m.Sign = p.account.Sign(buf.Bytes()) 520 log.Info("[TryChangeView] ResetView message created, broadcast it") 521 p.BroadMessage(m) 522 523 // record self 524 if !p.dispatcher.ResetViewRequestIsContain(m.Sponsor) { 525 p.OnResponseResetViewReceived(m) 526 } 527 } 528 } 529 530 func (p *Pbft) OnBadNetwork() { 531 fmt.Println("OnBadNetwork") 532 } 533 534 func (p *Pbft) OnRecover() { 535 if p.account == nil || !p.dispatcher.IsProducer(p.account.PublicKeyBytes()) { 536 return 537 } 538 p.recoverAbnormalState() 539 } 540 541 func (p *Pbft) recoverAbnormalState() bool { 542 if p.recoverStarted { 543 return false 544 } 545 minCount := p.dispatcher.GetConsensusView().GetMajorityCount() 546 if producers := p.dispatcher.GetConsensusView().GetProducers(); len(producers) > 0 { 547 if peers := p.network.GetActivePeers(); len(peers) < minCount { 548 log.Error("[recoverAbnormalState] can not find active peer", "minCount", minCount, "peers.size", len(peers)) 549 return false 550 } 551 p.recoverStarted = true 552 p.RequestAbnormalRecovering() 553 startTime := time.Now() 554 go func() { 555 for { 556 var count int 557 for _, v := range p.statusMap { 558 count += len(v) 559 } 560 if count > minCount { 561 p.OnRecoverTimeout() 562 break 563 } 564 if time.Now().Sub(startTime) > time.Second*3 { 565 p.OnRecoverTimeout() 566 break 567 } 568 time.Sleep(time.Millisecond * 100) 569 } 570 }() 571 return true 572 } 573 return false 574 } 575 576 func (p *Pbft) OnRecoverTimeout() { 577 if p.recoverStarted == true { 578 if len(p.statusMap) != 0 { 579 p.DoRecover() 580 } 581 p.recoverStarted = false 582 p.statusMap = make(map[uint32]map[string]*dmsg.ConsensusStatus) 583 } 584 585 p.isRecoved = true 586 if p.chain.Engine() == p { 587 p.StartMine() 588 } 589 } 590 591 func (p *Pbft) DoRecover() { 592 var maxCountMaxViewOffset uint32 593 for k, _ := range p.statusMap { 594 if maxCountMaxViewOffset < k { 595 maxCountMaxViewOffset = k 596 } 597 } 598 var status *dmsg.ConsensusStatus 599 startTimes := make([]int64, 0) 600 for _, v := range p.statusMap[maxCountMaxViewOffset] { 601 if status == nil { 602 if v.ConsensusStatus == dpos.ConsensusReady { 603 p.notHandledProposal = make(map[string]struct{}) 604 return 605 } 606 status = v 607 } 608 startTimes = append(startTimes, v.ViewStartTime.UnixNano()) 609 } 610 sort.Slice(startTimes, func(i, j int) bool { 611 return startTimes[i] < startTimes[j] 612 }) 613 medianTime := medianOf(startTimes) 614 p.dispatcher.RecoverAbnormal(status, medianTime) 615 p.notHandledProposal = make(map[string]struct{}) 616 } 617 618 func medianOf(nums []int64) int64 { 619 l := len(nums) 620 621 if l == 0 { 622 return 0 623 } 624 625 if l%2 == 0 { 626 return (nums[l/2] + nums[l/2-1]) / 2 627 } 628 629 return nums[l/2] 630 } 631 632 func (p *Pbft) OnResponseResetViewReceived(msg *msg.ResetView) { 633 if !p.IsProducer() { 634 log.Error("[OnResponseResetViewReceived] self is not producer") 635 return 636 } 637 if !p.IsProducerByAccount(msg.Sponsor) { 638 log.Error(fmt.Sprintf("[OnResponseResetViewReceived] %s is not a procuer", common.Bytes2Hex(msg.Sponsor))) 639 return 640 } 641 err := p.dispatcher.OnResponseResetViewReceived(msg) 642 log.Info("[OnResponseResetViewReceived]", "from", common.Bytes2Hex(msg.Sponsor), "error", err, "p.dispatcher.GetResetViewReqCount()", p.dispatcher.GetResetViewReqCount()) 643 if err != nil { 644 return 645 } 646 if p.dispatcher.GetResetViewReqCount() >= p.dispatcher.GetConsensusView().GetMajorityCount() { 647 // do reset 648 header := p.chain.CurrentHeader() 649 p.dispatcher.ResetConsensus(header.Number.Uint64()) 650 log.Info("[reset consensu] start mine") 651 p.StartMine() 652 log.Info("[end reset consensus]", "p.dispatcher.GetResetViewReqCount()", p.dispatcher.GetResetViewReqCount(), "p.dispatcher.GetConsensusView().GetViewOffset()", p.dispatcher.GetConsensusView().GetViewOffset()) 653 } 654 } 655 656 func (p *Pbft) OnBlockReceived(id peer.PID, b *dmsg.BlockMsg, confirmed bool) { 657 log.Info("-------[OnBlockReceived]--------") 658 if !confirmed { 659 return 660 } 661 block := &types.Block{} 662 663 err := block.DecodeRLP(rlp.NewStream(bytes.NewBuffer(b.GetData()), 0)) 664 if err != nil { 665 panic("OnBlock Decode Block Msg error:" + err.Error()) 666 } 667 668 delay := time.Unix(int64(block.Time()), 0).Sub(p.dispatcher.GetNowTime()) 669 log.Info("wait seal time", "delay", delay) 670 time.Sleep(delay) 671 672 parent := p.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) 673 if parent == nil { //ErrUnknownAncestor 674 count := len(p.network.GetActivePeers()) 675 log.Warn("verify block error", "error", consensus.ErrUnknownAncestor, "activePeers", count) 676 if !p.dispatcher.GetConsensusView().HasProducerMajorityCount(count) { 677 go p.AnnounceDAddr() 678 } 679 return 680 } 681 682 blocks := types.Blocks{} 683 blocks = append(blocks, block) 684 log.Info("InsertChain", "height", block.GetHeight(), "block.NumberU64()-p.chain.CurrentBlock().NumberU64() ", block.Number().Cmp(p.chain.CurrentBlock().Number()), "currentBlock", p.chain.CurrentBlock().NumberU64()) 685 686 if block.Number().Cmp(p.chain.CurrentBlock().Number()) >= 0 && block.NumberU64()-p.chain.CurrentBlock().NumberU64() > 1 { 687 log.Warn("is bigger than local number") 688 return 689 } 690 if _, err := p.chain.InsertChain(blocks); err != nil { 691 if p.OnInsertChainError != nil { 692 p.OnInsertChainError(id, block, err) 693 } 694 } 695 } 696 697 func (p *Pbft) OnConfirmReceived(pid peer.PID, c *payload.Confirm, height uint64) { 698 log.Info("OnConfirmReceived", "confirm", c.Proposal.Hash(), "height", height) 699 defer log.Info("OnConfirmReceived end") 700 701 if p.IsOnduty() { 702 p.isSealOver = true 703 go p.Recover() 704 return 705 } 706 707 if height > p.chain.CurrentHeader().Number.Uint64()+1 { 708 log.Info("is future confirm") 709 return 710 } 711 712 if height <= p.dispatcher.GetFinishedHeight() { 713 log.Info("already confirmed block") 714 return 715 } 716 717 if _, hasConfirm := p.blockPool.GetConfirmByHeight(height); hasConfirm { 718 log.Info("has confirmed block", "height", height) 719 return 720 } 721 722 if _, ok := p.blockPool.GetBlock(c.Proposal.BlockHash); !ok { 723 log.Info("not have preBlock, request it", "hash:", c.Proposal.BlockHash.String()) 724 p.OnInv(pid, c.Proposal.BlockHash) 725 return 726 } 727 728 if _, ok := p.blockPool.GetConfirm(c.Proposal.BlockHash); !ok { 729 p.dispatcher.ResetAcceptVotes() 730 for _, vote := range c.Votes { 731 p.dispatcher.ProcessVote(&vote) 732 } 733 return 734 } 735 } 736 737 // limitMap is a helper function for maps that require a maximum limit by 738 // evicting a random transaction if adding a new value would cause it to 739 // overflow the maximum allowed. 740 func (p *Pbft) limitMap(m map[common.Hash]struct{}, limit int) { 741 if len(m)+1 > limit { 742 // Remove a random entry from the map. For most compilers, Go's 743 // range statement iterates starting at a random item although 744 // that is not 100% guaranteed by the spec. The iteration order 745 // is not important here because an adversary would have to be 746 // able to pull off preimage attacks on the hashing function in 747 // order to target eviction of specific entries anyways. 748 for hash := range m { 749 delete(m, hash) 750 return 751 } 752 } 753 } 754 755 func (p *Pbft) OnSmallCroTxReceived(id peer.PID, msg *dmsg.SmallCroTx) { 756 list := p.GetCurrentProducers() 757 total := p.dispatcher.GetConsensusView().GetTotalProducersCount() 758 height := p.chain.CurrentBlock().GetHeight() 759 smallcrosstx.OnReceivedSmallCroTxFromDirectNet(list, total, msg.GetSignature(), msg.GetRawTx(), height) 760 } 761 762 func (p *Pbft) OnFailedWithdrawTxReceived(id peer.PID, msg *dmsg.FailedWithdrawTx) { 763 err := withdrawfailedtx.ReceivedFailedWithdrawTx(msg.GetHash(), msg.GetSignature()) 764 if err != nil { 765 log.Error("ReceivedFailedWithdrawTx", "error", err) 766 } 767 }