github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/internal/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"net"
    11  	"reflect"
    12  	"runtime/debug"
    13  	"sync"
    14  	"sync/atomic"
    15  	"time"
    16  
    17  	"github.com/gogo/protobuf/proto"
    18  
    19  	"github.com/ari-anchor/sei-tendermint/internal/libs/flowrate"
    20  	"github.com/ari-anchor/sei-tendermint/internal/libs/protoio"
    21  	"github.com/ari-anchor/sei-tendermint/internal/libs/timer"
    22  	"github.com/ari-anchor/sei-tendermint/libs/log"
    23  	tmmath "github.com/ari-anchor/sei-tendermint/libs/math"
    24  	"github.com/ari-anchor/sei-tendermint/libs/service"
    25  	tmp2p "github.com/ari-anchor/sei-tendermint/proto/tendermint/p2p"
    26  )
    27  
    28  const (
    29  	// mirrors MaxPacketMsgPayloadSize from config/config.go
    30  	defaultMaxPacketMsgPayloadSize = 1400
    31  
    32  	numBatchPacketMsgs = 10
    33  	minReadBufferSize  = 1024
    34  	minWriteBufferSize = 65536
    35  	updateStats        = 2 * time.Second
    36  
    37  	// some of these defaults are written in the user config
    38  	// flushThrottle, sendRate, recvRate
    39  	// TODO: remove values present in config
    40  	defaultFlushThrottle = 100 * time.Millisecond
    41  
    42  	defaultSendQueueCapacity   = 1
    43  	defaultRecvBufferCapacity  = 4096
    44  	defaultRecvMessageCapacity = 22020096      // 21MB
    45  	defaultSendRate            = int64(512000) // 500KB/s
    46  	defaultRecvRate            = int64(512000) // 500KB/s
    47  	defaultSendTimeout         = 10 * time.Second
    48  	defaultPingInterval        = 60 * time.Second
    49  	defaultPongTimeout         = 90 * time.Second
    50  )
    51  
    52  type receiveCbFunc func(ctx context.Context, chID ChannelID, msgBytes []byte)
    53  type errorCbFunc func(context.Context, interface{})
    54  
    55  /*
    56  Each peer has one `MConnection` (multiplex connection) instance.
    57  
    58  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    59  several messages along a single channel of communication.
    60  
    61  Each `MConnection` handles message transmission on multiple abstract communication
    62  `Channel`s.  Each channel has a globally unique byte id.
    63  The byte id and the relative priorities of each `Channel` are configured upon
    64  initialization of the connection.
    65  
    66  There are two methods for sending messages:
    67  
    68  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    69  
    70  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    71  successfully queued for the channel with the given id byte `chID`, or until the
    72  request times out.  The message `msg` is serialized using Protobuf.
    73  
    74  Inbound message bytes are handled with an onReceive callback function.
    75  */
    76  type MConnection struct {
    77  	service.BaseService
    78  	logger log.Logger
    79  
    80  	conn          net.Conn
    81  	bufConnReader *bufio.Reader
    82  	bufConnWriter *bufio.Writer
    83  	sendMonitor   *flowrate.Monitor
    84  	recvMonitor   *flowrate.Monitor
    85  	send          chan struct{}
    86  	pong          chan struct{}
    87  	channels      []*channel
    88  	channelsIdx   map[ChannelID]*channel
    89  	onReceive     receiveCbFunc
    90  	onError       errorCbFunc
    91  	errored       uint32
    92  	config        MConnConfig
    93  
    94  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    95  	// doneSendRoutine is closed when the sendRoutine actually quits.
    96  	quitSendRoutine chan struct{}
    97  	doneSendRoutine chan struct{}
    98  
    99  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   100  	quitRecvRoutine chan struct{}
   101  
   102  	// used to ensure FlushStop and OnStop
   103  	// are safe to call concurrently.
   104  	stopMtx sync.Mutex
   105  
   106  	cancel context.CancelFunc
   107  
   108  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   109  	pingTimer  *time.Ticker         // send pings periodically
   110  
   111  	// close conn if pong is not received in pongTimeout
   112  	lastMsgRecv struct {
   113  		sync.Mutex
   114  		at time.Time
   115  	}
   116  
   117  	chStatsTimer *time.Ticker // update channel stats periodically
   118  
   119  	created time.Time // time of creation
   120  
   121  	_maxPacketMsgSize int
   122  }
   123  
   124  // MConnConfig is a MConnection configuration.
   125  type MConnConfig struct {
   126  	SendRate int64 `mapstructure:"send_rate"`
   127  	RecvRate int64 `mapstructure:"recv_rate"`
   128  
   129  	// Maximum payload size
   130  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   131  
   132  	// Interval to flush writes (throttled)
   133  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   134  
   135  	// Interval to send pings
   136  	PingInterval time.Duration `mapstructure:"ping_interval"`
   137  
   138  	// Maximum wait time for pongs
   139  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   140  
   141  	// Process/Transport Start time
   142  	StartTime time.Time `mapstructure:",omitempty"`
   143  }
   144  
   145  // DefaultMConnConfig returns the default config.
   146  func DefaultMConnConfig() MConnConfig {
   147  	return MConnConfig{
   148  		SendRate:                defaultSendRate,
   149  		RecvRate:                defaultRecvRate,
   150  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   151  		FlushThrottle:           defaultFlushThrottle,
   152  		PingInterval:            defaultPingInterval,
   153  		PongTimeout:             defaultPongTimeout,
   154  		StartTime:               time.Now(),
   155  	}
   156  }
   157  
   158  // NewMConnection wraps net.Conn and creates multiplex connection with a config
   159  func NewMConnection(
   160  	logger log.Logger,
   161  	conn net.Conn,
   162  	chDescs []*ChannelDescriptor,
   163  	onReceive receiveCbFunc,
   164  	onError errorCbFunc,
   165  	config MConnConfig,
   166  ) *MConnection {
   167  	mconn := &MConnection{
   168  		logger:        logger,
   169  		conn:          conn,
   170  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   171  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   172  		sendMonitor:   flowrate.New(config.StartTime, 0, 0),
   173  		recvMonitor:   flowrate.New(config.StartTime, 0, 0),
   174  		send:          make(chan struct{}, 1),
   175  		pong:          make(chan struct{}, 1),
   176  		onReceive:     onReceive,
   177  		onError:       onError,
   178  		config:        config,
   179  		created:       time.Now(),
   180  		cancel:        func() {},
   181  	}
   182  
   183  	mconn.BaseService = *service.NewBaseService(logger, "MConnection", mconn)
   184  
   185  	// Create channels
   186  	var channelsIdx = map[ChannelID]*channel{}
   187  	var channels = []*channel{}
   188  
   189  	for _, desc := range chDescs {
   190  		channel := newChannel(mconn, *desc)
   191  		channelsIdx[channel.desc.ID] = channel
   192  		channels = append(channels, channel)
   193  	}
   194  	mconn.channels = channels
   195  	mconn.channelsIdx = channelsIdx
   196  
   197  	// maxPacketMsgSize() is a bit heavy, so call just once
   198  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   199  
   200  	return mconn
   201  }
   202  
   203  // OnStart implements BaseService
   204  func (c *MConnection) OnStart(ctx context.Context) error {
   205  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   206  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   207  	c.chStatsTimer = time.NewTicker(updateStats)
   208  	c.quitSendRoutine = make(chan struct{})
   209  	c.doneSendRoutine = make(chan struct{})
   210  	c.quitRecvRoutine = make(chan struct{})
   211  	c.setRecvLastMsgAt(time.Now())
   212  	go c.sendRoutine(ctx)
   213  	go c.recvRoutine(ctx)
   214  	return nil
   215  }
   216  
   217  func (c *MConnection) setRecvLastMsgAt(t time.Time) {
   218  	c.lastMsgRecv.Lock()
   219  	defer c.lastMsgRecv.Unlock()
   220  	c.lastMsgRecv.at = t
   221  }
   222  
   223  func (c *MConnection) getLastMessageAt() time.Time {
   224  	c.lastMsgRecv.Lock()
   225  	defer c.lastMsgRecv.Unlock()
   226  	return c.lastMsgRecv.at
   227  }
   228  
   229  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   230  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   231  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   232  func (c *MConnection) stopServices() (alreadyStopped bool) {
   233  	c.stopMtx.Lock()
   234  	defer c.stopMtx.Unlock()
   235  
   236  	select {
   237  	case <-c.quitSendRoutine:
   238  		// already quit
   239  		return true
   240  	default:
   241  	}
   242  
   243  	select {
   244  	case <-c.quitRecvRoutine:
   245  		// already quit
   246  		return true
   247  	default:
   248  	}
   249  
   250  	c.flushTimer.Stop()
   251  	c.pingTimer.Stop()
   252  	c.chStatsTimer.Stop()
   253  
   254  	// inform the recvRouting that we are shutting down
   255  	close(c.quitRecvRoutine)
   256  	close(c.quitSendRoutine)
   257  	return false
   258  }
   259  
   260  // OnStop implements BaseService
   261  func (c *MConnection) OnStop() {
   262  	if c.stopServices() {
   263  		return
   264  	}
   265  
   266  	c.conn.Close()
   267  
   268  	// We can't close pong safely here because
   269  	// recvRoutine may write to it after we've stopped.
   270  	// Though it doesn't need to get closed at all,
   271  	// we close it @ recvRoutine.
   272  }
   273  
   274  func (c *MConnection) String() string {
   275  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   276  }
   277  
   278  func (c *MConnection) flush() {
   279  	c.logger.Debug("Flush", "conn", c)
   280  	err := c.bufConnWriter.Flush()
   281  	if err != nil {
   282  		c.logger.Debug("MConnection flush failed", "err", err)
   283  	}
   284  }
   285  
   286  // Catch panics, usually caused by remote disconnects.
   287  func (c *MConnection) _recover(ctx context.Context) {
   288  	if r := recover(); r != nil {
   289  		c.logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   290  		c.stopForError(ctx, fmt.Errorf("recovered from panic: %v", r))
   291  	}
   292  }
   293  
   294  func (c *MConnection) stopForError(ctx context.Context, r interface{}) {
   295  	c.Stop()
   296  
   297  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   298  		if c.onError != nil {
   299  			c.onError(ctx, r)
   300  		}
   301  	}
   302  }
   303  
   304  // Queues a message to be sent to channel.
   305  func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool {
   306  	if !c.IsRunning() {
   307  		return false
   308  	}
   309  
   310  	c.logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", msgBytes)
   311  
   312  	// Send message to channel.
   313  	channel, ok := c.channelsIdx[chID]
   314  	if !ok {
   315  		c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   316  		return false
   317  	}
   318  
   319  	success := channel.sendBytes(msgBytes)
   320  	if success {
   321  		// Wake up sendRoutine if necessary
   322  		select {
   323  		case c.send <- struct{}{}:
   324  		default:
   325  		}
   326  	} else {
   327  		c.logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", msgBytes)
   328  	}
   329  	return success
   330  }
   331  
   332  // sendRoutine polls for packets to send from channels.
   333  func (c *MConnection) sendRoutine(ctx context.Context) {
   334  	defer c._recover(ctx)
   335  	protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
   336  
   337  	pongTimeout := time.NewTicker(c.config.PongTimeout)
   338  	defer pongTimeout.Stop()
   339  FOR_LOOP:
   340  	for {
   341  		var _n int
   342  		var err error
   343  	SELECTION:
   344  		select {
   345  		case <-c.flushTimer.Ch:
   346  			// NOTE: flushTimer.Set() must be called every time
   347  			// something is written to .bufConnWriter.
   348  			c.flush()
   349  		case <-c.chStatsTimer.C:
   350  			for _, channel := range c.channels {
   351  				channel.updateStats()
   352  			}
   353  		case <-c.pingTimer.C:
   354  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{}))
   355  			if err != nil {
   356  				c.logger.Error("Failed to send PacketPing", "err", err)
   357  				break SELECTION
   358  			}
   359  			c.sendMonitor.Update(_n)
   360  			c.flush()
   361  		case <-c.pong:
   362  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{}))
   363  			if err != nil {
   364  				c.logger.Error("Failed to send PacketPong", "err", err)
   365  				break SELECTION
   366  			}
   367  			c.sendMonitor.Update(_n)
   368  			c.flush()
   369  		case <-ctx.Done():
   370  			break FOR_LOOP
   371  		case <-c.quitSendRoutine:
   372  			break FOR_LOOP
   373  		case <-pongTimeout.C:
   374  			// the point of the pong timer is to check to
   375  			// see if we've seen a message recently, so we
   376  			// want to make sure that we escape this
   377  			// select statement on an interval to ensure
   378  			// that we avoid hanging on to dead
   379  			// connections for too long.
   380  			break SELECTION
   381  		case <-c.send:
   382  			// Send some PacketMsgs
   383  			eof := c.sendSomePacketMsgs(ctx)
   384  			if !eof {
   385  				// Keep sendRoutine awake.
   386  				select {
   387  				case c.send <- struct{}{}:
   388  				default:
   389  				}
   390  			}
   391  		}
   392  
   393  		if time.Since(c.getLastMessageAt()) > c.config.PongTimeout {
   394  			err = errors.New("pong timeout")
   395  		}
   396  
   397  		if err != nil {
   398  			c.logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   399  			c.stopForError(ctx, err)
   400  			break FOR_LOOP
   401  		}
   402  		if !c.IsRunning() {
   403  			break FOR_LOOP
   404  		}
   405  	}
   406  
   407  	// Cleanup
   408  	close(c.doneSendRoutine)
   409  }
   410  
   411  // Returns true if messages from channels were exhausted.
   412  // Blocks in accordance to .sendMonitor throttling.
   413  func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool {
   414  	// Block until .sendMonitor says we can write.
   415  	// Once we're ready we send more than we asked for,
   416  	// but amortized it should even out.
   417  	c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true)
   418  
   419  	// Now send some PacketMsgs.
   420  	for i := 0; i < numBatchPacketMsgs; i++ {
   421  		if c.sendPacketMsg(ctx) {
   422  			return true
   423  		}
   424  	}
   425  	return false
   426  }
   427  
   428  // Returns true if messages from channels were exhausted.
   429  func (c *MConnection) sendPacketMsg(ctx context.Context) bool {
   430  	// Choose a channel to create a PacketMsg from.
   431  	// The chosen channel will be the one whose recentlySent/priority is the least.
   432  	var leastRatio float32 = math.MaxFloat32
   433  	var leastChannel *channel
   434  	for _, channel := range c.channels {
   435  		// If nothing to send, skip this channel
   436  		if !channel.isSendPending() {
   437  			continue
   438  		}
   439  		// Get ratio, and keep track of lowest ratio.
   440  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   441  		if ratio < leastRatio {
   442  			leastRatio = ratio
   443  			leastChannel = channel
   444  		}
   445  	}
   446  
   447  	// Nothing to send?
   448  	if leastChannel == nil {
   449  		return true
   450  	}
   451  	// c.logger.Info("Found a msgPacket to send")
   452  
   453  	// Make & send a PacketMsg from this channel
   454  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   455  	if err != nil {
   456  		c.logger.Error("Failed to write PacketMsg", "err", err)
   457  		c.stopForError(ctx, err)
   458  		return true
   459  	}
   460  	c.sendMonitor.Update(_n)
   461  	c.flushTimer.Set()
   462  	return false
   463  }
   464  
   465  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   466  // After a whole message has been assembled, it's pushed to onReceive().
   467  // Blocks depending on how the connection is throttled.
   468  // Otherwise, it never blocks.
   469  func (c *MConnection) recvRoutine(ctx context.Context) {
   470  	defer c._recover(ctx)
   471  
   472  	protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize)
   473  
   474  FOR_LOOP:
   475  	for {
   476  		select {
   477  		case <-ctx.Done():
   478  			break FOR_LOOP
   479  		case <-c.doneSendRoutine:
   480  			break FOR_LOOP
   481  		default:
   482  		}
   483  
   484  		// Block until .recvMonitor says we can read.
   485  		c.recvMonitor.Limit(c._maxPacketMsgSize, c.config.RecvRate, true)
   486  
   487  		// Peek into bufConnReader for debugging
   488  		/*
   489  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   490  				bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   491  				if err == nil {
   492  					// return
   493  				} else {
   494  					c.logger.Debug("error peeking connection buffer", "err", err)
   495  					// return nil
   496  				}
   497  				c.logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   498  			}
   499  		*/
   500  
   501  		// Read packet type
   502  		var packet tmp2p.Packet
   503  
   504  		_n, err := protoReader.ReadMsg(&packet)
   505  		c.recvMonitor.Update(_n)
   506  		if err != nil {
   507  			// stopServices was invoked and we are shutting down
   508  			// receiving is excpected to fail since we will close the connection
   509  			select {
   510  			case <-ctx.Done():
   511  			case <-c.quitRecvRoutine:
   512  				break FOR_LOOP
   513  			default:
   514  			}
   515  
   516  			if c.IsRunning() {
   517  				if err == io.EOF {
   518  					c.logger.Debug("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   519  				} else {
   520  					c.logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   521  				}
   522  				c.stopForError(ctx, err)
   523  			}
   524  			break FOR_LOOP
   525  		}
   526  
   527  		// record for pong/heartbeat
   528  		c.setRecvLastMsgAt(time.Now())
   529  
   530  		// Read more depending on packet type.
   531  		switch pkt := packet.Sum.(type) {
   532  		case *tmp2p.Packet_PacketPing:
   533  			// TODO: prevent abuse, as they cause flush()'s.
   534  			// https://github.com/ari-anchor/sei-tendermint/issues/1190
   535  			select {
   536  			case c.pong <- struct{}{}:
   537  			default:
   538  				// never block
   539  			}
   540  		case *tmp2p.Packet_PacketPong:
   541  			// do nothing, we updated the "last message
   542  			// received" timestamp above, so we can ignore
   543  			// this message
   544  		case *tmp2p.Packet_PacketMsg:
   545  			channelID := ChannelID(pkt.PacketMsg.ChannelID)
   546  			channel, ok := c.channelsIdx[channelID]
   547  			if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil {
   548  				err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID)
   549  				c.logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   550  				c.stopForError(ctx, err)
   551  				break FOR_LOOP
   552  			}
   553  
   554  			msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg)
   555  			if err != nil {
   556  				if c.IsRunning() {
   557  					c.logger.Error("Connection failed @ recvRoutine recvPacketMsg", "conn", c, "err", err)
   558  					c.stopForError(ctx, err)
   559  				}
   560  				break FOR_LOOP
   561  			}
   562  			if msgBytes != nil {
   563  				c.logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes)
   564  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   565  				c.onReceive(ctx, channelID, msgBytes)
   566  			}
   567  		default:
   568  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   569  			c.logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   570  			c.stopForError(ctx, err)
   571  			break FOR_LOOP
   572  		}
   573  	}
   574  
   575  	// Cleanup
   576  	close(c.pong)
   577  	for range c.pong {
   578  		// Drain
   579  	}
   580  }
   581  
   582  // maxPacketMsgSize returns a maximum size of PacketMsg
   583  func (c *MConnection) maxPacketMsgSize() int {
   584  	bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{
   585  		ChannelID: 0x01,
   586  		EOF:       true,
   587  		Data:      make([]byte, c.config.MaxPacketMsgPayloadSize),
   588  	}))
   589  	if err != nil {
   590  		panic(err)
   591  	}
   592  	return len(bz)
   593  }
   594  
   595  type ChannelStatus struct {
   596  	ID                byte
   597  	SendQueueCapacity int
   598  	SendQueueSize     int
   599  	Priority          int
   600  	RecentlySent      int64
   601  }
   602  
   603  // -----------------------------------------------------------------------------
   604  // ChannelID is an arbitrary channel ID.
   605  type ChannelID uint16
   606  
   607  type ChannelDescriptor struct {
   608  	ID       ChannelID
   609  	Priority int
   610  
   611  	MessageType proto.Message
   612  
   613  	// TODO: Remove once p2p refactor is complete.
   614  	SendQueueCapacity   int
   615  	RecvMessageCapacity int
   616  
   617  	// RecvBufferCapacity defines the max buffer size of inbound messages for a
   618  	// given p2p Channel queue.
   619  	RecvBufferCapacity int
   620  
   621  	// Human readable name of the channel, used in logging and
   622  	// diagnostics.
   623  	Name string
   624  }
   625  
   626  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   627  	if chDesc.SendQueueCapacity == 0 {
   628  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   629  	}
   630  	if chDesc.RecvBufferCapacity == 0 {
   631  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   632  	}
   633  	if chDesc.RecvMessageCapacity == 0 {
   634  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   635  	}
   636  	filled = chDesc
   637  	return
   638  }
   639  
   640  // NOTE: not goroutine-safe.
   641  type channel struct {
   642  	// Exponential moving average.
   643  	// This field must be accessed atomically.
   644  	// It is first in the struct to ensure correct alignment.
   645  	// See https://github.com/ari-anchor/sei-tendermint/issues/7000.
   646  	recentlySent int64
   647  
   648  	conn          *MConnection
   649  	desc          ChannelDescriptor
   650  	sendQueue     chan []byte
   651  	sendQueueSize int32 // atomic.
   652  	recving       []byte
   653  	sending       []byte
   654  
   655  	maxPacketMsgPayloadSize int
   656  
   657  	logger log.Logger
   658  }
   659  
   660  func newChannel(conn *MConnection, desc ChannelDescriptor) *channel {
   661  	desc = desc.FillDefaults()
   662  	if desc.Priority <= 0 {
   663  		panic("Channel default priority must be a positive integer")
   664  	}
   665  	return &channel{
   666  		conn:                    conn,
   667  		desc:                    desc,
   668  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   669  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   670  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   671  		logger:                  conn.logger,
   672  	}
   673  }
   674  
   675  // Queues message to send to this channel.
   676  // Goroutine-safe
   677  // Times out (and returns false) after defaultSendTimeout
   678  func (ch *channel) sendBytes(bytes []byte) bool {
   679  	select {
   680  	case ch.sendQueue <- bytes:
   681  		atomic.AddInt32(&ch.sendQueueSize, 1)
   682  		return true
   683  	case <-time.After(defaultSendTimeout):
   684  		return false
   685  	}
   686  }
   687  
   688  // Returns true if any PacketMsgs are pending to be sent.
   689  // Call before calling nextPacketMsg()
   690  // Goroutine-safe
   691  func (ch *channel) isSendPending() bool {
   692  	if len(ch.sending) == 0 {
   693  		if len(ch.sendQueue) == 0 {
   694  			return false
   695  		}
   696  		ch.sending = <-ch.sendQueue
   697  	}
   698  	return true
   699  }
   700  
   701  // Creates a new PacketMsg to send.
   702  // Not goroutine-safe
   703  func (ch *channel) nextPacketMsg() tmp2p.PacketMsg {
   704  	packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)}
   705  	maxSize := ch.maxPacketMsgPayloadSize
   706  	packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   707  	if len(ch.sending) <= maxSize {
   708  		packet.EOF = true
   709  		ch.sending = nil
   710  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   711  	} else {
   712  		packet.EOF = false
   713  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   714  	}
   715  	return packet
   716  }
   717  
   718  // Writes next PacketMsg to w and updates c.recentlySent.
   719  // Not goroutine-safe
   720  func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) {
   721  	packet := ch.nextPacketMsg()
   722  	n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet))
   723  	atomic.AddInt64(&ch.recentlySent, int64(n))
   724  	return
   725  }
   726  
   727  // Handles incoming PacketMsgs. It returns a message bytes if message is
   728  // complete, which is owned by the caller and will not be modified.
   729  // Not goroutine-safe
   730  func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) {
   731  	ch.logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   732  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data)
   733  	if recvCap < recvReceived {
   734  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   735  	}
   736  	ch.recving = append(ch.recving, packet.Data...)
   737  	if packet.EOF {
   738  		msgBytes := ch.recving
   739  		ch.recving = make([]byte, 0, ch.desc.RecvBufferCapacity)
   740  		return msgBytes, nil
   741  	}
   742  	return nil, nil
   743  }
   744  
   745  // Call this periodically to update stats for throttling purposes.
   746  // Not goroutine-safe
   747  func (ch *channel) updateStats() {
   748  	// Exponential decay of stats.
   749  	// TODO: optimize.
   750  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   751  }
   752  
   753  //----------------------------------------
   754  // Packet
   755  
   756  // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message.
   757  func mustWrapPacket(pb proto.Message) *tmp2p.Packet {
   758  	var msg tmp2p.Packet
   759  
   760  	switch pb := pb.(type) {
   761  	case *tmp2p.Packet: // already a packet
   762  		msg = *pb
   763  	case *tmp2p.PacketPing:
   764  		msg = tmp2p.Packet{
   765  			Sum: &tmp2p.Packet_PacketPing{
   766  				PacketPing: pb,
   767  			},
   768  		}
   769  	case *tmp2p.PacketPong:
   770  		msg = tmp2p.Packet{
   771  			Sum: &tmp2p.Packet_PacketPong{
   772  				PacketPong: pb,
   773  			},
   774  		}
   775  	case *tmp2p.PacketMsg:
   776  		msg = tmp2p.Packet{
   777  			Sum: &tmp2p.Packet_PacketMsg{
   778  				PacketMsg: pb,
   779  			},
   780  		}
   781  	default:
   782  		panic(fmt.Errorf("unknown packet type %T", pb))
   783  	}
   784  
   785  	return &msg
   786  }