github.com/iotexproject/iotex-core@v1.14.1-rc1/dispatcher/dispatcher.go (about)

     1  // Copyright (c) 2019 IoTeX Foundation
     2  // This source code is provided 'as is' and no warranties are given as to title or non-infringement, merchantability
     3  // or fitness for purpose and, to the extent permitted by law, all liability for your use of the code is disclaimed.
     4  // This source code is governed by Apache License 2.0 that can be found in the LICENSE file.
     5  
     6  package dispatcher
     7  
     8  import (
     9  	"context"
    10  	"fmt"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/libp2p/go-libp2p-core/peer"
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"go.uber.org/zap"
    17  	"google.golang.org/protobuf/proto"
    18  
    19  	"github.com/iotexproject/iotex-core/pkg/lifecycle"
    20  	"github.com/iotexproject/iotex-core/pkg/log"
    21  	goproto "github.com/iotexproject/iotex-proto/golang"
    22  	"github.com/iotexproject/iotex-proto/golang/iotexrpc"
    23  	"github.com/iotexproject/iotex-proto/golang/iotextypes"
    24  )
    25  
    26  type (
    27  	// Config is the config for dispatcher
    28  	Config struct {
    29  		ActionChanSize             uint          `yaml:"actionChanSize"`
    30  		BlockChanSize              uint          `yaml:"blockChanSize"`
    31  		BlockSyncChanSize          uint          `yaml:"blockSyncChanSize"`
    32  		ProcessSyncRequestInterval time.Duration `yaml:"processSyncRequestInterval"`
    33  		// TODO: explorer dependency deleted at #1085, need to revive by migrating to api
    34  	}
    35  )
    36  
    37  var (
    38  	// DefaultConfig is the default config
    39  	DefaultConfig = Config{
    40  		ActionChanSize:             5000,
    41  		BlockChanSize:              1000,
    42  		BlockSyncChanSize:          400,
    43  		ProcessSyncRequestInterval: 0 * time.Second,
    44  	}
    45  )
    46  
    47  // Subscriber is the dispatcher subscriber interface
    48  type Subscriber interface {
    49  	ReportFullness(context.Context, iotexrpc.MessageType, float32)
    50  	HandleAction(context.Context, *iotextypes.Action) error
    51  	HandleBlock(context.Context, string, *iotextypes.Block) error
    52  	HandleSyncRequest(context.Context, peer.AddrInfo, *iotexrpc.BlockSync) error
    53  	HandleConsensusMsg(*iotextypes.ConsensusMessage) error
    54  	HandleNodeInfoRequest(context.Context, peer.AddrInfo, *iotextypes.NodeInfoRequest) error
    55  	HandleNodeInfo(context.Context, string, *iotextypes.NodeInfo) error
    56  }
    57  
    58  // Dispatcher is used by peers, handles incoming block and header notifications and relays announcements of new blocks.
    59  type Dispatcher interface {
    60  	lifecycle.StartStopper
    61  
    62  	// AddSubscriber adds to dispatcher
    63  	AddSubscriber(uint32, Subscriber)
    64  	// HandleBroadcast handles the incoming broadcast message. The transportation layer semantics is at least once.
    65  	// That said, the handler is likely to receive duplicate messages.
    66  	HandleBroadcast(context.Context, uint32, string, proto.Message)
    67  	// HandleTell handles the incoming tell message. The transportation layer semantics is exact once. The sender is
    68  	// given for the sake of replying the message
    69  	HandleTell(context.Context, uint32, peer.AddrInfo, proto.Message)
    70  }
    71  
    72  var (
    73  	requestMtc = prometheus.NewCounterVec(
    74  		prometheus.CounterOpts{
    75  			Name: "iotex_dispatch_request",
    76  			Help: "Dispatcher request counter.",
    77  		},
    78  		[]string{"method", "succeed"},
    79  	)
    80  )
    81  
    82  func init() {
    83  	prometheus.MustRegister(requestMtc)
    84  }
    85  
    86  // blockMsg packages a proto block message.
    87  type blockMsg struct {
    88  	ctx     context.Context
    89  	chainID uint32
    90  	block   *iotextypes.Block
    91  	peer    string
    92  }
    93  
    94  func (m blockMsg) ChainID() uint32 {
    95  	return m.chainID
    96  }
    97  
    98  // blockSyncMsg packages a proto block sync message.
    99  type blockSyncMsg struct {
   100  	ctx     context.Context
   101  	chainID uint32
   102  	sync    *iotexrpc.BlockSync
   103  	peer    peer.AddrInfo
   104  }
   105  
   106  func (m blockSyncMsg) ChainID() uint32 {
   107  	return m.chainID
   108  }
   109  
   110  // actionMsg packages a proto action message.
   111  type actionMsg struct {
   112  	ctx     context.Context
   113  	chainID uint32
   114  	action  *iotextypes.Action
   115  }
   116  
   117  func (m actionMsg) ChainID() uint32 {
   118  	return m.chainID
   119  }
   120  
   121  // IotxDispatcher is the request and event dispatcher for iotx node.
   122  type IotxDispatcher struct {
   123  	lifecycle.Readiness
   124  	actionChanLock sync.RWMutex
   125  	blockChanLock  sync.RWMutex
   126  	syncChanLock   sync.RWMutex
   127  	actionChan     chan *actionMsg
   128  	blockChan      chan *blockMsg
   129  	syncChan       chan *blockSyncMsg
   130  	eventAudit     map[iotexrpc.MessageType]int
   131  	eventAuditLock sync.RWMutex
   132  	wg             sync.WaitGroup
   133  	quit           chan struct{}
   134  	subscribers    map[uint32]Subscriber
   135  	subscribersMU  sync.RWMutex
   136  	peerLastSync   map[string]time.Time
   137  	syncInterval   time.Duration
   138  }
   139  
   140  // NewDispatcher creates a new Dispatcher
   141  func NewDispatcher(cfg Config) (Dispatcher, error) {
   142  	d := &IotxDispatcher{
   143  		actionChan:   make(chan *actionMsg, cfg.ActionChanSize),
   144  		blockChan:    make(chan *blockMsg, cfg.BlockChanSize),
   145  		syncChan:     make(chan *blockSyncMsg, cfg.BlockSyncChanSize),
   146  		eventAudit:   make(map[iotexrpc.MessageType]int),
   147  		quit:         make(chan struct{}),
   148  		subscribers:  make(map[uint32]Subscriber),
   149  		peerLastSync: make(map[string]time.Time),
   150  		syncInterval: cfg.ProcessSyncRequestInterval,
   151  	}
   152  	return d, nil
   153  }
   154  
   155  // AddSubscriber adds a subscriber to dispatcher
   156  func (d *IotxDispatcher) AddSubscriber(
   157  	chainID uint32,
   158  	subscriber Subscriber,
   159  ) {
   160  	d.subscribersMU.Lock()
   161  	d.subscribers[chainID] = subscriber
   162  	d.subscribersMU.Unlock()
   163  }
   164  
   165  // Start starts the dispatcher.
   166  func (d *IotxDispatcher) Start(ctx context.Context) error {
   167  	log.L().Info("Starting dispatcher.")
   168  
   169  	// setup mutiple action consumers to enqueue actions into actpool
   170  	for i := 0; i < cap(d.actionChan)/5; i++ {
   171  		d.wg.Add(1)
   172  		go d.actionHandler()
   173  	}
   174  
   175  	d.wg.Add(1)
   176  	go d.blockHandler()
   177  
   178  	d.wg.Add(1)
   179  	go d.syncHandler()
   180  
   181  	return d.TurnOn()
   182  }
   183  
   184  // Stop gracefully shuts down the dispatcher by stopping all handlers and waiting for them to finish.
   185  func (d *IotxDispatcher) Stop(ctx context.Context) error {
   186  	if err := d.TurnOff(); err != nil {
   187  		log.L().Warn("Dispatcher already in the process of shutting down.")
   188  		return err
   189  	}
   190  	log.L().Info("Dispatcher is shutting down.")
   191  	close(d.quit)
   192  	d.wg.Wait()
   193  	return nil
   194  }
   195  
   196  // EventQueueSize returns the event queue size
   197  func (d *IotxDispatcher) EventQueueSize() map[string]int {
   198  	return map[string]int{
   199  		"action": len(d.actionChan),
   200  		"block":  len(d.blockChan),
   201  		"sync":   len(d.syncChan),
   202  	}
   203  }
   204  
   205  // EventAudit returns the event audit map
   206  func (d *IotxDispatcher) EventAudit() map[iotexrpc.MessageType]int {
   207  	d.eventAuditLock.RLock()
   208  	defer d.eventAuditLock.RUnlock()
   209  	snapshot := make(map[iotexrpc.MessageType]int)
   210  	for k, v := range d.eventAudit {
   211  		snapshot[k] = v
   212  	}
   213  	return snapshot
   214  }
   215  
   216  func (d *IotxDispatcher) actionHandler() {
   217  	defer d.wg.Done()
   218  	for {
   219  		select {
   220  		case a := <-d.actionChan:
   221  			d.handleActionMsg(a)
   222  		case <-d.quit:
   223  			log.L().Debug("action handler is terminated.")
   224  			return
   225  		}
   226  	}
   227  }
   228  
   229  // blockHandler is the main handler for handling all news from peers.
   230  func (d *IotxDispatcher) blockHandler() {
   231  	defer d.wg.Done()
   232  	for {
   233  		select {
   234  		case b := <-d.blockChan:
   235  			d.handleBlockMsg(b)
   236  		case <-d.quit:
   237  			log.L().Info("block handler is terminated.")
   238  			return
   239  		}
   240  	}
   241  }
   242  
   243  // syncHandler handles incoming block sync requests
   244  func (d *IotxDispatcher) syncHandler() {
   245  	defer d.wg.Done()
   246  	for {
   247  		select {
   248  		case m := <-d.syncChan:
   249  			d.handleBlockSyncMsg(m)
   250  		case <-d.quit:
   251  			log.L().Info("block sync handler done.")
   252  			return
   253  		}
   254  	}
   255  }
   256  
   257  func (d *IotxDispatcher) subscriber(chainID uint32) Subscriber {
   258  	d.subscribersMU.RLock()
   259  	defer d.subscribersMU.RUnlock()
   260  	subscriber, ok := d.subscribers[chainID]
   261  	if !ok {
   262  		return nil
   263  	}
   264  
   265  	return subscriber
   266  }
   267  
   268  // handleActionMsg handles actionMsg from all peers.
   269  func (d *IotxDispatcher) handleActionMsg(m *actionMsg) {
   270  	log.L().Debug("receive actionMsg.")
   271  
   272  	if subscriber := d.subscriber(m.ChainID()); subscriber != nil {
   273  		d.updateEventAudit(iotexrpc.MessageType_ACTION)
   274  		if err := subscriber.HandleAction(m.ctx, m.action); err != nil {
   275  			requestMtc.WithLabelValues("AddAction", "false").Inc()
   276  			log.L().Debug("Handle action request error.", zap.Error(err))
   277  		}
   278  		d.actionChanLock.RLock()
   279  		defer d.actionChanLock.RUnlock()
   280  
   281  		subscriber.ReportFullness(m.ctx, iotexrpc.MessageType_ACTION, float32(len(d.actionChan))/float32(cap(d.actionChan)))
   282  	} else {
   283  		log.L().Info("No subscriber specified in the dispatcher.", zap.Uint32("chainID", m.ChainID()))
   284  	}
   285  }
   286  
   287  // handleBlockMsg handles blockMsg from peers.
   288  func (d *IotxDispatcher) handleBlockMsg(m *blockMsg) {
   289  	log.L().Debug("receive blockMsg.", zap.Uint64("height", m.block.GetHeader().GetCore().GetHeight()))
   290  
   291  	if subscriber := d.subscriber(m.ChainID()); subscriber != nil {
   292  		d.updateEventAudit(iotexrpc.MessageType_BLOCK)
   293  		if err := subscriber.HandleBlock(m.ctx, m.peer, m.block); err != nil {
   294  			log.L().Error("Fail to handle the block.", zap.Error(err))
   295  		}
   296  		d.blockChanLock.RLock()
   297  		defer d.blockChanLock.RUnlock()
   298  
   299  		subscriber.ReportFullness(m.ctx, iotexrpc.MessageType_BLOCK, float32(len(d.blockChan))/float32(cap(d.blockChan)))
   300  	} else {
   301  		log.L().Info("No subscriber specified in the dispatcher.", zap.Uint32("chainID", m.ChainID()))
   302  	}
   303  }
   304  
   305  // handleBlockSyncMsg handles block messages from peers.
   306  func (d *IotxDispatcher) handleBlockSyncMsg(m *blockSyncMsg) {
   307  	log.L().Debug("Receive blockSyncMsg.",
   308  		zap.String("src", fmt.Sprintf("%v", m.peer)),
   309  		zap.Uint64("start", m.sync.Start),
   310  		zap.Uint64("end", m.sync.End))
   311  
   312  	if subscriber := d.subscriber(m.ChainID()); subscriber != nil {
   313  		d.updateEventAudit(iotexrpc.MessageType_BLOCK_REQUEST)
   314  		// dispatch to block sync
   315  		if err := subscriber.HandleSyncRequest(m.ctx, m.peer, m.sync); err != nil {
   316  			log.L().Error("Failed to handle sync request.", zap.Error(err))
   317  		}
   318  		d.syncChanLock.RLock()
   319  		defer d.syncChanLock.RUnlock()
   320  
   321  		subscriber.ReportFullness(m.ctx, iotexrpc.MessageType_BLOCK_REQUEST, float32(len(d.syncChan))/float32(cap(d.syncChan)))
   322  	} else {
   323  		log.L().Info("No subscriber specified in the dispatcher.", zap.Uint32("chainID", m.ChainID()))
   324  	}
   325  }
   326  
   327  // dispatchAction adds the passed action message to the news handling queue.
   328  func (d *IotxDispatcher) dispatchAction(ctx context.Context, chainID uint32, msg *iotextypes.Action) {
   329  	if !d.IsReady() {
   330  		return
   331  	}
   332  	subscriber := d.subscriber(chainID)
   333  	if subscriber == nil {
   334  		log.L().Debug("no subscriber for this chain id, drop the action", zap.Uint32("chain id", chainID))
   335  		return
   336  	}
   337  	d.actionChanLock.Lock()
   338  	defer d.actionChanLock.Unlock()
   339  	l := len(d.actionChan)
   340  	c := cap(d.actionChan)
   341  	if l < c {
   342  		d.actionChan <- &actionMsg{
   343  			ctx:     ctx,
   344  			chainID: chainID,
   345  			action:  msg,
   346  		}
   347  		l++
   348  	} else {
   349  		log.L().Warn("dispatcher action channel is full, drop an event.")
   350  	}
   351  	subscriber.ReportFullness(ctx, iotexrpc.MessageType_ACTION, float32(l)/float32(c))
   352  }
   353  
   354  // dispatchBlock adds the passed block message to the news handling queue.
   355  func (d *IotxDispatcher) dispatchBlock(ctx context.Context, chainID uint32, peer string, msg *iotextypes.Block) {
   356  	if !d.IsReady() {
   357  		return
   358  	}
   359  	subscriber := d.subscriber(chainID)
   360  	if subscriber == nil {
   361  		log.L().Debug("no subscriber for this chain id, drop the block", zap.Uint32("chain id", chainID))
   362  		return
   363  	}
   364  	d.blockChanLock.Lock()
   365  	defer d.blockChanLock.Unlock()
   366  	l := len(d.blockChan)
   367  	c := cap(d.blockChan)
   368  	if l < c {
   369  		d.blockChan <- &blockMsg{
   370  			ctx:     ctx,
   371  			chainID: chainID,
   372  			block:   msg,
   373  			peer:    peer,
   374  		}
   375  		l++
   376  	} else {
   377  		log.L().Warn("dispatcher block channel is full, drop an event.")
   378  	}
   379  	subscriber.ReportFullness(ctx, iotexrpc.MessageType_BLOCK, float32(l)/float32(c))
   380  }
   381  
   382  // dispatchBlockSyncReq adds the passed block sync request to the news handling queue.
   383  func (d *IotxDispatcher) dispatchBlockSyncReq(ctx context.Context, chainID uint32, peer peer.AddrInfo, msg proto.Message) {
   384  	if !d.IsReady() {
   385  		return
   386  	}
   387  	subscriber := d.subscriber(chainID)
   388  	if subscriber == nil {
   389  		log.L().Debug("no subscriber for this chain id, drop the request", zap.Uint32("chain id", chainID))
   390  		return
   391  	}
   392  	now := time.Now()
   393  	peerID := peer.ID.Pretty()
   394  	d.syncChanLock.Lock()
   395  	defer d.syncChanLock.Unlock()
   396  	last, ok := d.peerLastSync[peerID]
   397  	if ok && last.Add(d.syncInterval).After(now) {
   398  		return
   399  	}
   400  	d.peerLastSync[peerID] = now
   401  	l := len(d.syncChan)
   402  	c := cap(d.syncChan)
   403  	if l < c {
   404  		d.syncChan <- &blockSyncMsg{
   405  			ctx:     ctx,
   406  			chainID: chainID,
   407  			peer:    peer,
   408  			sync:    (msg).(*iotexrpc.BlockSync),
   409  		}
   410  		l++
   411  	} else {
   412  		log.L().Warn("dispatcher sync channel is full, drop an event.")
   413  	}
   414  	subscriber.ReportFullness(ctx, iotexrpc.MessageType_BLOCK_REQUEST, float32(l)/float32(c))
   415  }
   416  
   417  // HandleBroadcast handles incoming broadcast message
   418  func (d *IotxDispatcher) HandleBroadcast(ctx context.Context, chainID uint32, peer string, message proto.Message) {
   419  	subscriber := d.subscriber(chainID)
   420  	if subscriber == nil {
   421  		log.L().Warn("chainID has not been registered in dispatcher.", zap.Uint32("chainID", chainID))
   422  		return
   423  	}
   424  
   425  	switch msg := message.(type) {
   426  	case *iotextypes.ConsensusMessage:
   427  		if err := subscriber.HandleConsensusMsg(msg); err != nil {
   428  			log.L().Debug("Failed to handle consensus message.", zap.Error(err))
   429  		}
   430  	case *iotextypes.Action:
   431  		d.dispatchAction(ctx, chainID, message.(*iotextypes.Action))
   432  	case *iotextypes.Actions:
   433  		acts := message.(*iotextypes.Actions)
   434  		for i := range acts.Actions {
   435  			d.dispatchAction(ctx, chainID, acts.Actions[i])
   436  		}
   437  	case *iotextypes.Block:
   438  		d.dispatchBlock(ctx, chainID, peer, message.(*iotextypes.Block))
   439  	case *iotextypes.NodeInfo:
   440  		if err := subscriber.HandleNodeInfo(ctx, peer, msg); err != nil {
   441  			log.L().Warn("Failed to handle node info message.", zap.Error(err))
   442  		}
   443  	default:
   444  		msgType, _ := goproto.GetTypeFromRPCMsg(message)
   445  		log.L().Warn("Unexpected msgType handled by HandleBroadcast.", zap.Any("msgType", msgType))
   446  	}
   447  }
   448  
   449  // HandleTell handles incoming unicast message
   450  func (d *IotxDispatcher) HandleTell(ctx context.Context, chainID uint32, peer peer.AddrInfo, message proto.Message) {
   451  	msgType, err := goproto.GetTypeFromRPCMsg(message)
   452  	if err != nil {
   453  		log.L().Warn("Unexpected message handled by HandleTell.", zap.Error(err))
   454  	}
   455  	switch msgType {
   456  	case iotexrpc.MessageType_BLOCK_REQUEST:
   457  		d.dispatchBlockSyncReq(ctx, chainID, peer, message)
   458  	case iotexrpc.MessageType_BLOCK:
   459  		d.dispatchBlock(ctx, chainID, peer.ID.Pretty(), message.(*iotextypes.Block))
   460  	case iotexrpc.MessageType_NODE_INFO_REQUEST:
   461  		d.dispatchNodeInfoRequest(ctx, chainID, peer, message.(*iotextypes.NodeInfoRequest))
   462  	case iotexrpc.MessageType_NODE_INFO:
   463  		d.dispatchNodeInfo(ctx, chainID, peer.ID.Pretty(), message.(*iotextypes.NodeInfo))
   464  	default:
   465  		log.L().Warn("Unexpected msgType handled by HandleTell.", zap.Any("msgType", msgType))
   466  	}
   467  }
   468  
   469  func (d *IotxDispatcher) dispatchNodeInfoRequest(ctx context.Context, chainID uint32, peer peer.AddrInfo, message *iotextypes.NodeInfoRequest) {
   470  	if !d.IsReady() {
   471  		return
   472  	}
   473  	subscriber := d.subscriber(chainID)
   474  	if subscriber == nil {
   475  		log.L().Debug("no subscriber for this chain id, drop the node info", zap.Uint32("chain id", chainID))
   476  		return
   477  	}
   478  	if err := subscriber.HandleNodeInfoRequest(ctx, peer, message); err != nil {
   479  		log.L().Warn("failed to handle request node info message", zap.Error(err))
   480  	}
   481  }
   482  
   483  func (d *IotxDispatcher) dispatchNodeInfo(ctx context.Context, chainID uint32, peerID string, message *iotextypes.NodeInfo) {
   484  	if !d.IsReady() {
   485  		return
   486  	}
   487  	subscriber := d.subscriber(chainID)
   488  	if subscriber == nil {
   489  		log.L().Debug("no subscriber for this chain id, drop the node info", zap.Uint32("chain id", chainID))
   490  		return
   491  	}
   492  	if err := subscriber.HandleNodeInfo(ctx, peerID, message); err != nil {
   493  		log.L().Warn("failed to handle node info message", zap.Error(err))
   494  	}
   495  }
   496  
   497  func (d *IotxDispatcher) updateEventAudit(t iotexrpc.MessageType) {
   498  	d.eventAuditLock.Lock()
   499  	defer d.eventAuditLock.Unlock()
   500  	d.eventAudit[t]++
   501  }