github.com/bytom/bytom@v1.1.2-0.20221014091027-bbcba3df6075/netsync/chainmgr/handle.go (about)

     1  package chainmgr
     2  
     3  import (
     4  	"errors"
     5  	"reflect"
     6  	"time"
     7  
     8  	log "github.com/sirupsen/logrus"
     9  
    10  	cfg "github.com/bytom/bytom/config"
    11  	"github.com/bytom/bytom/consensus"
    12  	dbm "github.com/bytom/bytom/database/leveldb"
    13  	"github.com/bytom/bytom/event"
    14  	msgs "github.com/bytom/bytom/netsync/messages"
    15  	"github.com/bytom/bytom/netsync/peers"
    16  	"github.com/bytom/bytom/p2p"
    17  	"github.com/bytom/bytom/p2p/security"
    18  	core "github.com/bytom/bytom/protocol"
    19  	"github.com/bytom/bytom/protocol/bc"
    20  	"github.com/bytom/bytom/protocol/bc/types"
    21  )
    22  
    23  const (
    24  	logModule = "netsync"
    25  )
    26  
    27  // Chain is the interface for Bytom core
    28  type Chain interface {
    29  	BestBlockHeader() *types.BlockHeader
    30  	LastJustifiedHeader() (*types.BlockHeader, error)
    31  	BestBlockHeight() uint64
    32  	GetBlockByHash(*bc.Hash) (*types.Block, error)
    33  	GetBlockByHeight(uint64) (*types.Block, error)
    34  	GetHeaderByHash(*bc.Hash) (*types.BlockHeader, error)
    35  	GetHeaderByHeight(uint64) (*types.BlockHeader, error)
    36  	InMainChain(bc.Hash) bool
    37  	ProcessBlock(*types.Block) (bool, error)
    38  	ValidateTx(*types.Tx) (bool, error)
    39  }
    40  
    41  // Switch is the interface for network layer
    42  type Switch interface {
    43  	AddReactor(name string, reactor p2p.Reactor) p2p.Reactor
    44  	Start() error
    45  	Stop() error
    46  	IsListening() bool
    47  	DialPeerWithAddress(addr *p2p.NetAddress) error
    48  	Peers() *p2p.PeerSet
    49  }
    50  
    51  // Mempool is the interface for Bytom mempool
    52  type Mempool interface {
    53  	GetTransactions() []*core.TxDesc
    54  	IsDust(tx *types.Tx) bool
    55  }
    56  
    57  //Manager is responsible for the business layer information synchronization
    58  type Manager struct {
    59  	sw          Switch
    60  	chain       Chain
    61  	mempool     Mempool
    62  	blockKeeper *blockKeeper
    63  	peers       *peers.PeerSet
    64  
    65  	txSyncCh chan *txSyncMsg
    66  	quit     chan struct{}
    67  	config   *cfg.Config
    68  
    69  	eventDispatcher *event.Dispatcher
    70  	txMsgSub        *event.Subscription
    71  }
    72  
    73  //NewManager create a chain sync manager.
    74  func NewManager(config *cfg.Config, sw Switch, chain Chain, mempool Mempool, dispatcher *event.Dispatcher, peers *peers.PeerSet, fastSyncDB dbm.DB) (*Manager, error) {
    75  	manager := &Manager{
    76  		sw:              sw,
    77  		mempool:         mempool,
    78  		chain:           chain,
    79  		blockKeeper:     newBlockKeeper(chain, peers, fastSyncDB),
    80  		peers:           peers,
    81  		txSyncCh:        make(chan *txSyncMsg),
    82  		quit:            make(chan struct{}),
    83  		config:          config,
    84  		eventDispatcher: dispatcher,
    85  	}
    86  
    87  	if !config.VaultMode {
    88  		protocolReactor := NewProtocolReactor(manager)
    89  		manager.sw.AddReactor("PROTOCOL", protocolReactor)
    90  	}
    91  	return manager, nil
    92  }
    93  
    94  // AddPeer add the network layer peer to logic layer
    95  func (m *Manager) AddPeer(peer peers.BasePeer) {
    96  	m.peers.AddPeer(peer)
    97  }
    98  
    99  //IsCaughtUp check wheather the peer finish the sync
   100  func (m *Manager) IsCaughtUp() bool {
   101  	peer := m.peers.BestPeer(consensus.SFFullNode)
   102  	return peer == nil || peer.Height() <= m.chain.BestBlockHeight()
   103  }
   104  
   105  func (m *Manager) handleBlockMsg(peer *peers.Peer, msg *msgs.BlockMessage) {
   106  	block, err := msg.GetBlock()
   107  	if err != nil {
   108  		return
   109  	}
   110  
   111  	m.blockKeeper.processBlock(peer.ID(), block)
   112  }
   113  
   114  func (m *Manager) handleBlocksMsg(peer *peers.Peer, msg *msgs.BlocksMessage) {
   115  	blocks, err := msg.GetBlocks()
   116  	if err != nil {
   117  		log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleBlocksMsg GetBlocks")
   118  		return
   119  	}
   120  
   121  	m.blockKeeper.processBlocks(peer.ID(), blocks)
   122  }
   123  
   124  func (m *Manager) handleFilterAddMsg(peer *peers.Peer, msg *msgs.FilterAddMessage) {
   125  	peer.AddFilterAddress(msg.Address)
   126  }
   127  
   128  func (m *Manager) handleFilterClearMsg(peer *peers.Peer) {
   129  	peer.FilterClear()
   130  }
   131  
   132  func (m *Manager) handleFilterLoadMsg(peer *peers.Peer, msg *msgs.FilterLoadMessage) {
   133  	peer.AddFilterAddresses(msg.Addresses)
   134  }
   135  
   136  func (m *Manager) handleGetBlockMsg(peer *peers.Peer, msg *msgs.GetBlockMessage) {
   137  	var block *types.Block
   138  	var err error
   139  	if msg.Height != 0 {
   140  		block, err = m.chain.GetBlockByHeight(msg.Height)
   141  	} else {
   142  		block, err = m.chain.GetBlockByHash(msg.GetHash())
   143  	}
   144  	if err != nil {
   145  		log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetBlockMsg get block from chain")
   146  		return
   147  	}
   148  
   149  	ok, err := peer.SendBlock(block)
   150  	if !ok {
   151  		m.peers.RemovePeer(peer.ID())
   152  	}
   153  	if err != nil {
   154  		log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlockMsg sentBlock")
   155  	}
   156  }
   157  
   158  func (m *Manager) handleGetBlocksMsg(peer *peers.Peer, msg *msgs.GetBlocksMessage) {
   159  	endTime := time.Now().Add(requireBlocksTimeout / 10)
   160  	isTimeout := func() bool {
   161  		return time.Now().After(endTime)
   162  	}
   163  
   164  	blocks, err := m.blockKeeper.locateBlocks(msg.GetBlockLocator(), msg.GetStopHash(), isTimeout)
   165  	if err != nil || len(blocks) == 0 {
   166  		log.WithFields(log.Fields{
   167  			"module": logModule,
   168  			"err":    err,
   169  			"size":   len(blocks),
   170  		}).Error("fail on handleGetBlocksMsg locateBlocks")
   171  		return
   172  	}
   173  
   174  	totalSize := 0
   175  	sendBlocks := []*types.Block{}
   176  	for _, block := range blocks {
   177  		rawData, err := block.MarshalText()
   178  		if err != nil {
   179  			log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg marshal block")
   180  			return
   181  		}
   182  
   183  		if totalSize+len(rawData) > msgs.MaxBlockchainResponseSize/2 {
   184  			break
   185  		}
   186  		totalSize += len(rawData)
   187  		sendBlocks = append(sendBlocks, block)
   188  	}
   189  
   190  	ok, err := peer.SendBlocks(sendBlocks)
   191  	if !ok {
   192  		m.peers.RemovePeer(peer.ID())
   193  	}
   194  	if err != nil {
   195  		log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetBlocksMsg sentBlock")
   196  	}
   197  }
   198  
   199  func (m *Manager) handleGetHeadersMsg(peer *peers.Peer, msg *msgs.GetHeadersMessage) {
   200  	headers, err := m.blockKeeper.locateHeaders(msg.GetBlockLocator(), msg.GetStopHash(), msg.GetSkip(), maxNumOfHeadersPerMsg)
   201  	if err != nil || len(headers) == 0 {
   202  		log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleGetHeadersMsg locateHeaders")
   203  		return
   204  	}
   205  
   206  	ok, err := peer.SendHeaders(headers)
   207  	if !ok {
   208  		m.peers.RemovePeer(peer.ID())
   209  	}
   210  	if err != nil {
   211  		log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetHeadersMsg sentBlock")
   212  	}
   213  }
   214  
   215  func (m *Manager) handleGetMerkleBlockMsg(peer *peers.Peer, msg *msgs.GetMerkleBlockMessage) {
   216  	var err error
   217  	var block *types.Block
   218  	if msg.Height != 0 {
   219  		block, err = m.chain.GetBlockByHeight(msg.Height)
   220  	} else {
   221  		block, err = m.chain.GetBlockByHash(msg.GetHash())
   222  	}
   223  	if err != nil {
   224  		log.WithFields(log.Fields{"module": logModule, "err": err}).Warning("fail on handleGetMerkleBlockMsg get block from chain")
   225  		return
   226  	}
   227  
   228  	ok, err := peer.SendMerkleBlock(block)
   229  	if err != nil {
   230  		log.WithFields(log.Fields{"module": logModule, "err": err}).Error("fail on handleGetMerkleBlockMsg sentMerkleBlock")
   231  		return
   232  	}
   233  
   234  	if !ok {
   235  		m.peers.RemovePeer(peer.ID())
   236  	}
   237  }
   238  
   239  func (m *Manager) handleHeadersMsg(peer *peers.Peer, msg *msgs.HeadersMessage) {
   240  	headers, err := msg.GetHeaders()
   241  	if err != nil {
   242  		log.WithFields(log.Fields{"module": logModule, "err": err}).Debug("fail on handleHeadersMsg GetHeaders")
   243  		return
   244  	}
   245  
   246  	m.blockKeeper.processHeaders(peer.ID(), headers)
   247  }
   248  
   249  func (m *Manager) handleStatusMsg(basePeer peers.BasePeer, msg *msgs.StatusMessage) {
   250  	if peer := m.peers.GetPeer(basePeer.ID()); peer != nil {
   251  		peer.SetBestStatus(msg.BestHeight, msg.GetBestHash())
   252  		peer.SetJustifiedStatus(msg.JustifiedHeight, msg.GetIrreversibleHash())
   253  	}
   254  }
   255  
   256  func (m *Manager) handleTransactionMsg(peer *peers.Peer, msg *msgs.TransactionMessage) {
   257  	tx, err := msg.GetTransaction()
   258  	if err != nil {
   259  		m.peers.ProcessIllegal(peer.ID(), security.LevelConnException, "fail on get tx from message")
   260  		return
   261  	}
   262  
   263  	if m.mempool.IsDust(tx) {
   264  		log.WithFields(log.Fields{"tx_hash": tx.ID.String(), "peer": peer.Addr()}).Warn("receive dust tx msg")
   265  		return
   266  	}
   267  
   268  	m.peers.MarkTx(peer.ID(), tx.ID)
   269  	if isOrphan, err := m.chain.ValidateTx(tx); err != nil && err != core.ErrDustTx && !isOrphan {
   270  		m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "fail on validate tx transaction")
   271  	}
   272  }
   273  
   274  func (m *Manager) handleTransactionsMsg(peer *peers.Peer, msg *msgs.TransactionsMessage) {
   275  	txs, err := msg.GetTransactions()
   276  	if err != nil {
   277  		m.peers.ProcessIllegal(peer.ID(), security.LevelConnException, "fail on get txs from message")
   278  		return
   279  	}
   280  
   281  	if len(txs) > msgs.TxsMsgMaxTxNum {
   282  		m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "exceeded the maximum tx number limit")
   283  		return
   284  	}
   285  
   286  	for _, tx := range txs {
   287  		if m.mempool.IsDust(tx) {
   288  			m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "receive dust txs msg")
   289  			continue
   290  		}
   291  
   292  		m.peers.MarkTx(peer.ID(), tx.ID)
   293  		if isOrphan, err := m.chain.ValidateTx(tx); err != nil && !isOrphan {
   294  			m.peers.ProcessIllegal(peer.ID(), security.LevelMsgIllegal, "fail on validate tx transaction")
   295  			return
   296  		}
   297  	}
   298  }
   299  
   300  func (m *Manager) processMsg(basePeer peers.BasePeer, msgType byte, msg msgs.BlockchainMessage) {
   301  	peer := m.peers.GetPeer(basePeer.ID())
   302  	if peer == nil {
   303  		return
   304  	}
   305  
   306  	log.WithFields(log.Fields{
   307  		"module":  logModule,
   308  		"peer":    basePeer.Addr(),
   309  		"type":    reflect.TypeOf(msg),
   310  		"message": msg.String(),
   311  	}).Debug("receive message from peer")
   312  
   313  	switch msg := msg.(type) {
   314  	case *msgs.GetBlockMessage:
   315  		m.handleGetBlockMsg(peer, msg)
   316  
   317  	case *msgs.BlockMessage:
   318  		m.handleBlockMsg(peer, msg)
   319  
   320  	case *msgs.StatusMessage:
   321  		m.handleStatusMsg(basePeer, msg)
   322  
   323  	case *msgs.TransactionMessage:
   324  		m.handleTransactionMsg(peer, msg)
   325  
   326  	case *msgs.TransactionsMessage:
   327  		m.handleTransactionsMsg(peer, msg)
   328  
   329  	case *msgs.GetHeadersMessage:
   330  		m.handleGetHeadersMsg(peer, msg)
   331  
   332  	case *msgs.HeadersMessage:
   333  		m.handleHeadersMsg(peer, msg)
   334  
   335  	case *msgs.GetBlocksMessage:
   336  		m.handleGetBlocksMsg(peer, msg)
   337  
   338  	case *msgs.BlocksMessage:
   339  		m.handleBlocksMsg(peer, msg)
   340  
   341  	case *msgs.FilterLoadMessage:
   342  		m.handleFilterLoadMsg(peer, msg)
   343  
   344  	case *msgs.FilterAddMessage:
   345  		m.handleFilterAddMsg(peer, msg)
   346  
   347  	case *msgs.FilterClearMessage:
   348  		m.handleFilterClearMsg(peer)
   349  
   350  	case *msgs.GetMerkleBlockMessage:
   351  		m.handleGetMerkleBlockMsg(peer, msg)
   352  
   353  	default:
   354  		log.WithFields(log.Fields{
   355  			"module":       logModule,
   356  			"peer":         basePeer.Addr(),
   357  			"message_type": reflect.TypeOf(msg),
   358  		}).Error("unhandled message type")
   359  	}
   360  }
   361  
   362  // RemovePeer delete peer for peer set
   363  func (m *Manager) RemovePeer(peerID string) {
   364  	m.peers.RemovePeer(peerID)
   365  }
   366  
   367  // SendStatus sent the current self status to remote peer
   368  func (m *Manager) SendStatus(peer peers.BasePeer) error {
   369  	p := m.peers.GetPeer(peer.ID())
   370  	if p == nil {
   371  		return errors.New("invalid peer")
   372  	}
   373  
   374  	lastJustifiedHeader, err := m.chain.LastJustifiedHeader()
   375  	if err != nil {
   376  		return err
   377  	}
   378  
   379  	if err := p.SendStatus(m.chain.BestBlockHeader(), lastJustifiedHeader); err != nil {
   380  		m.peers.RemovePeer(p.ID())
   381  		return err
   382  	}
   383  	return nil
   384  }
   385  
   386  // Start the network logic layer
   387  func (m *Manager) Start() error {
   388  	var err error
   389  	m.txMsgSub, err = m.eventDispatcher.Subscribe(core.TxMsgEvent{})
   390  	if err != nil {
   391  		return err
   392  	}
   393  	m.blockKeeper.start()
   394  	go m.broadcastTxsLoop()
   395  	go m.syncMempoolLoop()
   396  
   397  	return nil
   398  }
   399  
   400  //Stop stop sync manager
   401  func (m *Manager) Stop() {
   402  	m.blockKeeper.stop()
   403  	close(m.quit)
   404  }