github.com/vipernet-xyz/tm@v0.34.24/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"reflect"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/gogo/protobuf/proto"
    16  
    17  	flow "github.com/vipernet-xyz/tm/libs/flowrate"
    18  	"github.com/vipernet-xyz/tm/libs/log"
    19  	tmmath "github.com/vipernet-xyz/tm/libs/math"
    20  	"github.com/vipernet-xyz/tm/libs/protoio"
    21  	"github.com/vipernet-xyz/tm/libs/service"
    22  	tmsync "github.com/vipernet-xyz/tm/libs/sync"
    23  	"github.com/vipernet-xyz/tm/libs/timer"
    24  	tmp2p "github.com/vipernet-xyz/tm/proto/tendermint/p2p"
    25  )
    26  
    27  const (
    28  	defaultMaxPacketMsgPayloadSize = 1024
    29  
    30  	numBatchPacketMsgs = 10
    31  	minReadBufferSize  = 1024
    32  	minWriteBufferSize = 65536
    33  	updateStats        = 2 * time.Second
    34  
    35  	// some of these defaults are written in the user config
    36  	// flushThrottle, sendRate, recvRate
    37  	// TODO: remove values present in config
    38  	defaultFlushThrottle = 100 * time.Millisecond
    39  
    40  	defaultSendQueueCapacity   = 1
    41  	defaultRecvBufferCapacity  = 4096
    42  	defaultRecvMessageCapacity = 22020096      // 21MB
    43  	defaultSendRate            = int64(512000) // 500KB/s
    44  	defaultRecvRate            = int64(512000) // 500KB/s
    45  	defaultSendTimeout         = 10 * time.Second
    46  	defaultPingInterval        = 60 * time.Second
    47  	defaultPongTimeout         = 45 * time.Second
    48  )
    49  
    50  type receiveCbFunc func(chID byte, msgBytes []byte)
    51  type errorCbFunc func(interface{})
    52  
    53  /*
    54  Each peer has one `MConnection` (multiplex connection) instance.
    55  
    56  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    57  several messages along a single channel of communication.
    58  
    59  Each `MConnection` handles message transmission on multiple abstract communication
    60  `Channel`s.  Each channel has a globally unique byte id.
    61  The byte id and the relative priorities of each `Channel` are configured upon
    62  initialization of the connection.
    63  
    64  There are two methods for sending messages:
    65  
    66  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    67  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    68  
    69  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    70  successfully queued for the channel with the given id byte `chID`, or until the
    71  request times out.  The message `msg` is serialized using Protobuf.
    72  
    73  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    74  channel's queue is full.
    75  
    76  Inbound message bytes are handled with an onReceive callback function.
    77  */
    78  type MConnection struct {
    79  	service.BaseService
    80  
    81  	conn          net.Conn
    82  	bufConnReader *bufio.Reader
    83  	bufConnWriter *bufio.Writer
    84  	sendMonitor   *flow.Monitor
    85  	recvMonitor   *flow.Monitor
    86  	send          chan struct{}
    87  	pong          chan struct{}
    88  	channels      []*Channel
    89  	channelsIdx   map[byte]*Channel
    90  	onReceive     receiveCbFunc
    91  	onError       errorCbFunc
    92  	errored       uint32
    93  	config        MConnConfig
    94  
    95  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    96  	// doneSendRoutine is closed when the sendRoutine actually quits.
    97  	quitSendRoutine chan struct{}
    98  	doneSendRoutine chan struct{}
    99  
   100  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   101  	quitRecvRoutine chan struct{}
   102  
   103  	// used to ensure FlushStop and OnStop
   104  	// are safe to call concurrently.
   105  	stopMtx tmsync.Mutex
   106  
   107  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   108  	pingTimer  *time.Ticker         // send pings periodically
   109  
   110  	// close conn if pong is not received in pongTimeout
   111  	pongTimer     *time.Timer
   112  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   113  
   114  	chStatsTimer *time.Ticker // update channel stats periodically
   115  
   116  	created time.Time // time of creation
   117  
   118  	_maxPacketMsgSize int
   119  }
   120  
   121  // MConnConfig is a MConnection configuration.
   122  type MConnConfig struct {
   123  	SendRate int64 `mapstructure:"send_rate"`
   124  	RecvRate int64 `mapstructure:"recv_rate"`
   125  
   126  	// Maximum payload size
   127  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   128  
   129  	// Interval to flush writes (throttled)
   130  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   131  
   132  	// Interval to send pings
   133  	PingInterval time.Duration `mapstructure:"ping_interval"`
   134  
   135  	// Maximum wait time for pongs
   136  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   137  }
   138  
   139  // DefaultMConnConfig returns the default config.
   140  func DefaultMConnConfig() MConnConfig {
   141  	return MConnConfig{
   142  		SendRate:                defaultSendRate,
   143  		RecvRate:                defaultRecvRate,
   144  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   145  		FlushThrottle:           defaultFlushThrottle,
   146  		PingInterval:            defaultPingInterval,
   147  		PongTimeout:             defaultPongTimeout,
   148  	}
   149  }
   150  
   151  // NewMConnection wraps net.Conn and creates multiplex connection
   152  func NewMConnection(
   153  	conn net.Conn,
   154  	chDescs []*ChannelDescriptor,
   155  	onReceive receiveCbFunc,
   156  	onError errorCbFunc,
   157  ) *MConnection {
   158  	return NewMConnectionWithConfig(
   159  		conn,
   160  		chDescs,
   161  		onReceive,
   162  		onError,
   163  		DefaultMConnConfig())
   164  }
   165  
   166  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   167  func NewMConnectionWithConfig(
   168  	conn net.Conn,
   169  	chDescs []*ChannelDescriptor,
   170  	onReceive receiveCbFunc,
   171  	onError errorCbFunc,
   172  	config MConnConfig,
   173  ) *MConnection {
   174  	if config.PongTimeout >= config.PingInterval {
   175  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   176  	}
   177  
   178  	mconn := &MConnection{
   179  		conn:          conn,
   180  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   181  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   182  		sendMonitor:   flow.New(0, 0),
   183  		recvMonitor:   flow.New(0, 0),
   184  		send:          make(chan struct{}, 1),
   185  		pong:          make(chan struct{}, 1),
   186  		onReceive:     onReceive,
   187  		onError:       onError,
   188  		config:        config,
   189  		created:       time.Now(),
   190  	}
   191  
   192  	// Create channels
   193  	var channelsIdx = map[byte]*Channel{}
   194  	var channels = []*Channel{}
   195  
   196  	for _, desc := range chDescs {
   197  		channel := newChannel(mconn, *desc)
   198  		channelsIdx[channel.desc.ID] = channel
   199  		channels = append(channels, channel)
   200  	}
   201  	mconn.channels = channels
   202  	mconn.channelsIdx = channelsIdx
   203  
   204  	mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn)
   205  
   206  	// maxPacketMsgSize() is a bit heavy, so call just once
   207  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   208  
   209  	return mconn
   210  }
   211  
   212  func (c *MConnection) SetLogger(l log.Logger) {
   213  	c.BaseService.SetLogger(l)
   214  	for _, ch := range c.channels {
   215  		ch.SetLogger(l)
   216  	}
   217  }
   218  
   219  // OnStart implements BaseService
   220  func (c *MConnection) OnStart() error {
   221  	if err := c.BaseService.OnStart(); err != nil {
   222  		return err
   223  	}
   224  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   225  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   226  	c.pongTimeoutCh = make(chan bool, 1)
   227  	c.chStatsTimer = time.NewTicker(updateStats)
   228  	c.quitSendRoutine = make(chan struct{})
   229  	c.doneSendRoutine = make(chan struct{})
   230  	c.quitRecvRoutine = make(chan struct{})
   231  	go c.sendRoutine()
   232  	go c.recvRoutine()
   233  	return nil
   234  }
   235  
   236  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   237  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   238  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   239  func (c *MConnection) stopServices() (alreadyStopped bool) {
   240  	c.stopMtx.Lock()
   241  	defer c.stopMtx.Unlock()
   242  
   243  	select {
   244  	case <-c.quitSendRoutine:
   245  		// already quit
   246  		return true
   247  	default:
   248  	}
   249  
   250  	select {
   251  	case <-c.quitRecvRoutine:
   252  		// already quit
   253  		return true
   254  	default:
   255  	}
   256  
   257  	c.BaseService.OnStop()
   258  	c.flushTimer.Stop()
   259  	c.pingTimer.Stop()
   260  	c.chStatsTimer.Stop()
   261  
   262  	// inform the recvRouting that we are shutting down
   263  	close(c.quitRecvRoutine)
   264  	close(c.quitSendRoutine)
   265  	return false
   266  }
   267  
   268  // FlushStop replicates the logic of OnStop.
   269  // It additionally ensures that all successful
   270  // .Send() calls will get flushed before closing
   271  // the connection.
   272  func (c *MConnection) FlushStop() {
   273  	if c.stopServices() {
   274  		return
   275  	}
   276  
   277  	// this block is unique to FlushStop
   278  	{
   279  		// wait until the sendRoutine exits
   280  		// so we dont race on calling sendSomePacketMsgs
   281  		<-c.doneSendRoutine
   282  
   283  		// Send and flush all pending msgs.
   284  		// Since sendRoutine has exited, we can call this
   285  		// safely
   286  		eof := c.sendSomePacketMsgs()
   287  		for !eof {
   288  			eof = c.sendSomePacketMsgs()
   289  		}
   290  		c.flush()
   291  
   292  		// Now we can close the connection
   293  	}
   294  
   295  	c.conn.Close()
   296  
   297  	// We can't close pong safely here because
   298  	// recvRoutine may write to it after we've stopped.
   299  	// Though it doesn't need to get closed at all,
   300  	// we close it @ recvRoutine.
   301  
   302  	// c.Stop()
   303  }
   304  
   305  // OnStop implements BaseService
   306  func (c *MConnection) OnStop() {
   307  	if c.stopServices() {
   308  		return
   309  	}
   310  
   311  	c.conn.Close()
   312  
   313  	// We can't close pong safely here because
   314  	// recvRoutine may write to it after we've stopped.
   315  	// Though it doesn't need to get closed at all,
   316  	// we close it @ recvRoutine.
   317  }
   318  
   319  func (c *MConnection) String() string {
   320  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   321  }
   322  
   323  func (c *MConnection) flush() {
   324  	c.Logger.Debug("Flush", "conn", c)
   325  	err := c.bufConnWriter.Flush()
   326  	if err != nil {
   327  		c.Logger.Debug("MConnection flush failed", "err", err)
   328  	}
   329  }
   330  
   331  // Catch panics, usually caused by remote disconnects.
   332  func (c *MConnection) _recover() {
   333  	if r := recover(); r != nil {
   334  		c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   335  		c.stopForError(fmt.Errorf("recovered from panic: %v", r))
   336  	}
   337  }
   338  
   339  func (c *MConnection) stopForError(r interface{}) {
   340  	if err := c.Stop(); err != nil {
   341  		c.Logger.Error("Error stopping connection", "err", err)
   342  	}
   343  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   344  		if c.onError != nil {
   345  			c.onError(r)
   346  		}
   347  	}
   348  }
   349  
   350  // Queues a message to be sent to channel.
   351  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   352  	if !c.IsRunning() {
   353  		return false
   354  	}
   355  
   356  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
   357  
   358  	// Send message to channel.
   359  	channel, ok := c.channelsIdx[chID]
   360  	if !ok {
   361  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   362  		return false
   363  	}
   364  
   365  	success := channel.sendBytes(msgBytes)
   366  	if success {
   367  		// Wake up sendRoutine if necessary
   368  		select {
   369  		case c.send <- struct{}{}:
   370  		default:
   371  		}
   372  	} else {
   373  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
   374  	}
   375  	return success
   376  }
   377  
   378  // Queues a message to be sent to channel.
   379  // Nonblocking, returns true if successful.
   380  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   381  	if !c.IsRunning() {
   382  		return false
   383  	}
   384  
   385  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
   386  
   387  	// Send message to channel.
   388  	channel, ok := c.channelsIdx[chID]
   389  	if !ok {
   390  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   391  		return false
   392  	}
   393  
   394  	ok = channel.trySendBytes(msgBytes)
   395  	if ok {
   396  		// Wake up sendRoutine if necessary
   397  		select {
   398  		case c.send <- struct{}{}:
   399  		default:
   400  		}
   401  	}
   402  
   403  	return ok
   404  }
   405  
   406  // CanSend returns true if you can send more data onto the chID, false
   407  // otherwise. Use only as a heuristic.
   408  func (c *MConnection) CanSend(chID byte) bool {
   409  	if !c.IsRunning() {
   410  		return false
   411  	}
   412  
   413  	channel, ok := c.channelsIdx[chID]
   414  	if !ok {
   415  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   416  		return false
   417  	}
   418  	return channel.canSend()
   419  }
   420  
   421  // sendRoutine polls for packets to send from channels.
   422  func (c *MConnection) sendRoutine() {
   423  	defer c._recover()
   424  
   425  	protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
   426  
   427  FOR_LOOP:
   428  	for {
   429  		var _n int
   430  		var err error
   431  	SELECTION:
   432  		select {
   433  		case <-c.flushTimer.Ch:
   434  			// NOTE: flushTimer.Set() must be called every time
   435  			// something is written to .bufConnWriter.
   436  			c.flush()
   437  		case <-c.chStatsTimer.C:
   438  			for _, channel := range c.channels {
   439  				channel.updateStats()
   440  			}
   441  		case <-c.pingTimer.C:
   442  			c.Logger.Debug("Send Ping")
   443  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{}))
   444  			if err != nil {
   445  				c.Logger.Error("Failed to send PacketPing", "err", err)
   446  				break SELECTION
   447  			}
   448  			c.sendMonitor.Update(_n)
   449  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   450  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   451  				select {
   452  				case c.pongTimeoutCh <- true:
   453  				default:
   454  				}
   455  			})
   456  			c.flush()
   457  		case timeout := <-c.pongTimeoutCh:
   458  			if timeout {
   459  				c.Logger.Debug("Pong timeout")
   460  				err = errors.New("pong timeout")
   461  			} else {
   462  				c.stopPongTimer()
   463  			}
   464  		case <-c.pong:
   465  			c.Logger.Debug("Send Pong")
   466  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{}))
   467  			if err != nil {
   468  				c.Logger.Error("Failed to send PacketPong", "err", err)
   469  				break SELECTION
   470  			}
   471  			c.sendMonitor.Update(_n)
   472  			c.flush()
   473  		case <-c.quitSendRoutine:
   474  			break FOR_LOOP
   475  		case <-c.send:
   476  			// Send some PacketMsgs
   477  			eof := c.sendSomePacketMsgs()
   478  			if !eof {
   479  				// Keep sendRoutine awake.
   480  				select {
   481  				case c.send <- struct{}{}:
   482  				default:
   483  				}
   484  			}
   485  		}
   486  
   487  		if !c.IsRunning() {
   488  			break FOR_LOOP
   489  		}
   490  		if err != nil {
   491  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   492  			c.stopForError(err)
   493  			break FOR_LOOP
   494  		}
   495  	}
   496  
   497  	// Cleanup
   498  	c.stopPongTimer()
   499  	close(c.doneSendRoutine)
   500  }
   501  
   502  // Returns true if messages from channels were exhausted.
   503  // Blocks in accordance to .sendMonitor throttling.
   504  func (c *MConnection) sendSomePacketMsgs() bool {
   505  	// Block until .sendMonitor says we can write.
   506  	// Once we're ready we send more than we asked for,
   507  	// but amortized it should even out.
   508  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   509  
   510  	// Now send some PacketMsgs.
   511  	for i := 0; i < numBatchPacketMsgs; i++ {
   512  		if c.sendPacketMsg() {
   513  			return true
   514  		}
   515  	}
   516  	return false
   517  }
   518  
   519  // Returns true if messages from channels were exhausted.
   520  func (c *MConnection) sendPacketMsg() bool {
   521  	// Choose a channel to create a PacketMsg from.
   522  	// The chosen channel will be the one whose recentlySent/priority is the least.
   523  	var leastRatio float32 = math.MaxFloat32
   524  	var leastChannel *Channel
   525  	for _, channel := range c.channels {
   526  		// If nothing to send, skip this channel
   527  		if !channel.isSendPending() {
   528  			continue
   529  		}
   530  		// Get ratio, and keep track of lowest ratio.
   531  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   532  		if ratio < leastRatio {
   533  			leastRatio = ratio
   534  			leastChannel = channel
   535  		}
   536  	}
   537  
   538  	// Nothing to send?
   539  	if leastChannel == nil {
   540  		return true
   541  	}
   542  	// c.Logger.Info("Found a msgPacket to send")
   543  
   544  	// Make & send a PacketMsg from this channel
   545  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   546  	if err != nil {
   547  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   548  		c.stopForError(err)
   549  		return true
   550  	}
   551  	c.sendMonitor.Update(_n)
   552  	c.flushTimer.Set()
   553  	return false
   554  }
   555  
   556  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   557  // After a whole message has been assembled, it's pushed to onReceive().
   558  // Blocks depending on how the connection is throttled.
   559  // Otherwise, it never blocks.
   560  func (c *MConnection) recvRoutine() {
   561  	defer c._recover()
   562  
   563  	protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize)
   564  
   565  FOR_LOOP:
   566  	for {
   567  		// Block until .recvMonitor says we can read.
   568  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   569  
   570  		// Peek into bufConnReader for debugging
   571  		/*
   572  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   573  				bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   574  				if err == nil {
   575  					// return
   576  				} else {
   577  					c.Logger.Debug("Error peeking connection buffer", "err", err)
   578  					// return nil
   579  				}
   580  				c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   581  			}
   582  		*/
   583  
   584  		// Read packet type
   585  		var packet tmp2p.Packet
   586  
   587  		_n, err := protoReader.ReadMsg(&packet)
   588  		c.recvMonitor.Update(_n)
   589  		if err != nil {
   590  			// stopServices was invoked and we are shutting down
   591  			// receiving is excpected to fail since we will close the connection
   592  			select {
   593  			case <-c.quitRecvRoutine:
   594  				break FOR_LOOP
   595  			default:
   596  			}
   597  
   598  			if c.IsRunning() {
   599  				if err == io.EOF {
   600  					c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   601  				} else {
   602  					c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   603  				}
   604  				c.stopForError(err)
   605  			}
   606  			break FOR_LOOP
   607  		}
   608  
   609  		// Read more depending on packet type.
   610  		switch pkt := packet.Sum.(type) {
   611  		case *tmp2p.Packet_PacketPing:
   612  			// TODO: prevent abuse, as they cause flush()'s.
   613  			// https://github.com/vipernet-xyz/tm/issues/1190
   614  			c.Logger.Debug("Receive Ping")
   615  			select {
   616  			case c.pong <- struct{}{}:
   617  			default:
   618  				// never block
   619  			}
   620  		case *tmp2p.Packet_PacketPong:
   621  			c.Logger.Debug("Receive Pong")
   622  			select {
   623  			case c.pongTimeoutCh <- false:
   624  			default:
   625  				// never block
   626  			}
   627  		case *tmp2p.Packet_PacketMsg:
   628  			channelID := byte(pkt.PacketMsg.ChannelID)
   629  			channel, ok := c.channelsIdx[channelID]
   630  			if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil {
   631  				err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID)
   632  				c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   633  				c.stopForError(err)
   634  				break FOR_LOOP
   635  			}
   636  
   637  			msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg)
   638  			if err != nil {
   639  				if c.IsRunning() {
   640  					c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   641  					c.stopForError(err)
   642  				}
   643  				break FOR_LOOP
   644  			}
   645  			if msgBytes != nil {
   646  				c.Logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes)
   647  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   648  				c.onReceive(channelID, msgBytes)
   649  			}
   650  		default:
   651  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   652  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   653  			c.stopForError(err)
   654  			break FOR_LOOP
   655  		}
   656  	}
   657  
   658  	// Cleanup
   659  	close(c.pong)
   660  	for range c.pong {
   661  		// Drain
   662  	}
   663  }
   664  
   665  // not goroutine-safe
   666  func (c *MConnection) stopPongTimer() {
   667  	if c.pongTimer != nil {
   668  		_ = c.pongTimer.Stop()
   669  		c.pongTimer = nil
   670  	}
   671  }
   672  
   673  // maxPacketMsgSize returns a maximum size of PacketMsg
   674  func (c *MConnection) maxPacketMsgSize() int {
   675  	bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{
   676  		ChannelID: 0x01,
   677  		EOF:       true,
   678  		Data:      make([]byte, c.config.MaxPacketMsgPayloadSize),
   679  	}))
   680  	if err != nil {
   681  		panic(err)
   682  	}
   683  	return len(bz)
   684  }
   685  
   686  type ConnectionStatus struct {
   687  	Duration    time.Duration
   688  	SendMonitor flow.Status
   689  	RecvMonitor flow.Status
   690  	Channels    []ChannelStatus
   691  }
   692  
   693  type ChannelStatus struct {
   694  	ID                byte
   695  	SendQueueCapacity int
   696  	SendQueueSize     int
   697  	Priority          int
   698  	RecentlySent      int64
   699  }
   700  
   701  func (c *MConnection) Status() ConnectionStatus {
   702  	var status ConnectionStatus
   703  	status.Duration = time.Since(c.created)
   704  	status.SendMonitor = c.sendMonitor.Status()
   705  	status.RecvMonitor = c.recvMonitor.Status()
   706  	status.Channels = make([]ChannelStatus, len(c.channels))
   707  	for i, channel := range c.channels {
   708  		status.Channels[i] = ChannelStatus{
   709  			ID:                channel.desc.ID,
   710  			SendQueueCapacity: cap(channel.sendQueue),
   711  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   712  			Priority:          channel.desc.Priority,
   713  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   714  		}
   715  	}
   716  	return status
   717  }
   718  
   719  //-----------------------------------------------------------------------------
   720  
   721  type ChannelDescriptor struct {
   722  	ID                  byte
   723  	Priority            int
   724  	SendQueueCapacity   int
   725  	RecvBufferCapacity  int
   726  	RecvMessageCapacity int
   727  	MessageType         proto.Message
   728  }
   729  
   730  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   731  	if chDesc.SendQueueCapacity == 0 {
   732  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   733  	}
   734  	if chDesc.RecvBufferCapacity == 0 {
   735  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   736  	}
   737  	if chDesc.RecvMessageCapacity == 0 {
   738  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   739  	}
   740  	filled = chDesc
   741  	return
   742  }
   743  
   744  // TODO: lowercase.
   745  // NOTE: not goroutine-safe.
   746  type Channel struct {
   747  	conn          *MConnection
   748  	desc          ChannelDescriptor
   749  	sendQueue     chan []byte
   750  	sendQueueSize int32 // atomic.
   751  	recving       []byte
   752  	sending       []byte
   753  	recentlySent  int64 // exponential moving average
   754  
   755  	maxPacketMsgPayloadSize int
   756  
   757  	Logger log.Logger
   758  }
   759  
   760  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   761  	desc = desc.FillDefaults()
   762  	if desc.Priority <= 0 {
   763  		panic("Channel default priority must be a positive integer")
   764  	}
   765  	return &Channel{
   766  		conn:                    conn,
   767  		desc:                    desc,
   768  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   769  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   770  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   771  	}
   772  }
   773  
   774  func (ch *Channel) SetLogger(l log.Logger) {
   775  	ch.Logger = l
   776  }
   777  
   778  // Queues message to send to this channel.
   779  // Goroutine-safe
   780  // Times out (and returns false) after defaultSendTimeout
   781  func (ch *Channel) sendBytes(bytes []byte) bool {
   782  	select {
   783  	case ch.sendQueue <- bytes:
   784  		atomic.AddInt32(&ch.sendQueueSize, 1)
   785  		return true
   786  	case <-time.After(defaultSendTimeout):
   787  		return false
   788  	}
   789  }
   790  
   791  // Queues message to send to this channel.
   792  // Nonblocking, returns true if successful.
   793  // Goroutine-safe
   794  func (ch *Channel) trySendBytes(bytes []byte) bool {
   795  	select {
   796  	case ch.sendQueue <- bytes:
   797  		atomic.AddInt32(&ch.sendQueueSize, 1)
   798  		return true
   799  	default:
   800  		return false
   801  	}
   802  }
   803  
   804  // Goroutine-safe
   805  func (ch *Channel) loadSendQueueSize() (size int) {
   806  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   807  }
   808  
   809  // Goroutine-safe
   810  // Use only as a heuristic.
   811  func (ch *Channel) canSend() bool {
   812  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   813  }
   814  
   815  // Returns true if any PacketMsgs are pending to be sent.
   816  // Call before calling nextPacketMsg()
   817  // Goroutine-safe
   818  func (ch *Channel) isSendPending() bool {
   819  	if len(ch.sending) == 0 {
   820  		if len(ch.sendQueue) == 0 {
   821  			return false
   822  		}
   823  		ch.sending = <-ch.sendQueue
   824  	}
   825  	return true
   826  }
   827  
   828  // Creates a new PacketMsg to send.
   829  // Not goroutine-safe
   830  func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg {
   831  	packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)}
   832  	maxSize := ch.maxPacketMsgPayloadSize
   833  	packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   834  	if len(ch.sending) <= maxSize {
   835  		packet.EOF = true
   836  		ch.sending = nil
   837  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   838  	} else {
   839  		packet.EOF = false
   840  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   841  	}
   842  	return packet
   843  }
   844  
   845  // Writes next PacketMsg to w and updates c.recentlySent.
   846  // Not goroutine-safe
   847  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) {
   848  	packet := ch.nextPacketMsg()
   849  	n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet))
   850  	atomic.AddInt64(&ch.recentlySent, int64(n))
   851  	return
   852  }
   853  
   854  // Handles incoming PacketMsgs. It returns a message bytes if message is
   855  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   856  // Not goroutine-safe
   857  func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) {
   858  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   859  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data)
   860  	if recvCap < recvReceived {
   861  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   862  	}
   863  	ch.recving = append(ch.recving, packet.Data...)
   864  	if packet.EOF {
   865  		msgBytes := ch.recving
   866  
   867  		// clear the slice without re-allocating.
   868  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   869  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   870  		//	at which point the recving slice stops being used and should be garbage collected
   871  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   872  		return msgBytes, nil
   873  	}
   874  	return nil, nil
   875  }
   876  
   877  // Call this periodically to update stats for throttling purposes.
   878  // Not goroutine-safe
   879  func (ch *Channel) updateStats() {
   880  	// Exponential decay of stats.
   881  	// TODO: optimize.
   882  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   883  }
   884  
   885  //----------------------------------------
   886  // Packet
   887  
   888  // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message.
   889  func mustWrapPacket(pb proto.Message) *tmp2p.Packet {
   890  	var msg tmp2p.Packet
   891  
   892  	switch pb := pb.(type) {
   893  	case *tmp2p.Packet: // already a packet
   894  		msg = *pb
   895  	case *tmp2p.PacketPing:
   896  		msg = tmp2p.Packet{
   897  			Sum: &tmp2p.Packet_PacketPing{
   898  				PacketPing: pb,
   899  			},
   900  		}
   901  	case *tmp2p.PacketPong:
   902  		msg = tmp2p.Packet{
   903  			Sum: &tmp2p.Packet_PacketPong{
   904  				PacketPong: pb,
   905  			},
   906  		}
   907  	case *tmp2p.PacketMsg:
   908  		msg = tmp2p.Packet{
   909  			Sum: &tmp2p.Packet_PacketMsg{
   910  				PacketMsg: pb,
   911  			},
   912  		}
   913  	default:
   914  		panic(fmt.Errorf("unknown packet type %T", pb))
   915  	}
   916  
   917  	return &msg
   918  }