github.com/project-88388/tendermint-v0.34.14-terra.2@v1.0.0/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"reflect"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/gogo/protobuf/proto"
    16  
    17  	flow "github.com/tendermint/tendermint/libs/flowrate"
    18  	"github.com/tendermint/tendermint/libs/log"
    19  	tmmath "github.com/tendermint/tendermint/libs/math"
    20  	"github.com/tendermint/tendermint/libs/protoio"
    21  	"github.com/tendermint/tendermint/libs/service"
    22  	tmsync "github.com/tendermint/tendermint/libs/sync"
    23  	"github.com/tendermint/tendermint/libs/timer"
    24  	tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
    25  )
    26  
    27  const (
    28  	defaultMaxPacketMsgPayloadSize = 1024
    29  
    30  	numBatchPacketMsgs = 10
    31  	minReadBufferSize  = 1024
    32  	minWriteBufferSize = 65536
    33  	updateStats        = 2 * time.Second
    34  
    35  	// some of these defaults are written in the user config
    36  	// flushThrottle, sendRate, recvRate
    37  	// TODO: remove values present in config
    38  	defaultFlushThrottle = 100 * time.Millisecond
    39  
    40  	defaultSendQueueCapacity   = 1
    41  	defaultRecvBufferCapacity  = 4096
    42  	defaultRecvMessageCapacity = 22020096      // 21MB
    43  	defaultSendRate            = int64(512000) // 500KB/s
    44  	defaultRecvRate            = int64(512000) // 500KB/s
    45  	defaultSendTimeout         = 10 * time.Second
    46  	defaultPingInterval        = 60 * time.Second
    47  	defaultPongTimeout         = 45 * time.Second
    48  )
    49  
    50  type receiveCbFunc func(chID byte, msgBytes []byte)
    51  type errorCbFunc func(interface{})
    52  
    53  /*
    54  Each peer has one `MConnection` (multiplex connection) instance.
    55  
    56  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    57  several messages along a single channel of communication.
    58  
    59  Each `MConnection` handles message transmission on multiple abstract communication
    60  `Channel`s.  Each channel has a globally unique byte id.
    61  The byte id and the relative priorities of each `Channel` are configured upon
    62  initialization of the connection.
    63  
    64  There are two methods for sending messages:
    65  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    66  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    67  
    68  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    69  successfully queued for the channel with the given id byte `chID`, or until the
    70  request times out.  The message `msg` is serialized using Protobuf.
    71  
    72  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    73  channel's queue is full.
    74  
    75  Inbound message bytes are handled with an onReceive callback function.
    76  */
    77  type MConnection struct {
    78  	service.BaseService
    79  
    80  	conn          net.Conn
    81  	bufConnReader *bufio.Reader
    82  	bufConnWriter *bufio.Writer
    83  	sendMonitor   *flow.Monitor
    84  	recvMonitor   *flow.Monitor
    85  	send          chan struct{}
    86  	pong          chan struct{}
    87  	channels      []*Channel
    88  	channelsIdx   map[byte]*Channel
    89  	onReceive     receiveCbFunc
    90  	onError       errorCbFunc
    91  	errored       uint32
    92  	config        MConnConfig
    93  
    94  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    95  	// doneSendRoutine is closed when the sendRoutine actually quits.
    96  	quitSendRoutine chan struct{}
    97  	doneSendRoutine chan struct{}
    98  
    99  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   100  	quitRecvRoutine chan struct{}
   101  
   102  	// used to ensure FlushStop and OnStop
   103  	// are safe to call concurrently.
   104  	stopMtx tmsync.Mutex
   105  
   106  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   107  	pingTimer  *time.Ticker         // send pings periodically
   108  
   109  	// close conn if pong is not received in pongTimeout
   110  	pongTimer     *time.Timer
   111  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   112  
   113  	chStatsTimer *time.Ticker // update channel stats periodically
   114  
   115  	created time.Time // time of creation
   116  
   117  	_maxPacketMsgSize int
   118  }
   119  
   120  // MConnConfig is a MConnection configuration.
   121  type MConnConfig struct {
   122  	SendRate int64 `mapstructure:"send_rate"`
   123  	RecvRate int64 `mapstructure:"recv_rate"`
   124  
   125  	// Maximum payload size
   126  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   127  
   128  	// Interval to flush writes (throttled)
   129  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   130  
   131  	// Interval to send pings
   132  	PingInterval time.Duration `mapstructure:"ping_interval"`
   133  
   134  	// Maximum wait time for pongs
   135  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   136  }
   137  
   138  // DefaultMConnConfig returns the default config.
   139  func DefaultMConnConfig() MConnConfig {
   140  	return MConnConfig{
   141  		SendRate:                defaultSendRate,
   142  		RecvRate:                defaultRecvRate,
   143  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   144  		FlushThrottle:           defaultFlushThrottle,
   145  		PingInterval:            defaultPingInterval,
   146  		PongTimeout:             defaultPongTimeout,
   147  	}
   148  }
   149  
   150  // NewMConnection wraps net.Conn and creates multiplex connection
   151  func NewMConnection(
   152  	conn net.Conn,
   153  	chDescs []*ChannelDescriptor,
   154  	onReceive receiveCbFunc,
   155  	onError errorCbFunc,
   156  ) *MConnection {
   157  	return NewMConnectionWithConfig(
   158  		conn,
   159  		chDescs,
   160  		onReceive,
   161  		onError,
   162  		DefaultMConnConfig())
   163  }
   164  
   165  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   166  func NewMConnectionWithConfig(
   167  	conn net.Conn,
   168  	chDescs []*ChannelDescriptor,
   169  	onReceive receiveCbFunc,
   170  	onError errorCbFunc,
   171  	config MConnConfig,
   172  ) *MConnection {
   173  	if config.PongTimeout >= config.PingInterval {
   174  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   175  	}
   176  
   177  	mconn := &MConnection{
   178  		conn:          conn,
   179  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   180  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   181  		sendMonitor:   flow.New(0, 0),
   182  		recvMonitor:   flow.New(0, 0),
   183  		send:          make(chan struct{}, 1),
   184  		pong:          make(chan struct{}, 1),
   185  		onReceive:     onReceive,
   186  		onError:       onError,
   187  		config:        config,
   188  		created:       time.Now(),
   189  	}
   190  
   191  	// Create channels
   192  	var channelsIdx = map[byte]*Channel{}
   193  	var channels = []*Channel{}
   194  
   195  	for _, desc := range chDescs {
   196  		channel := newChannel(mconn, *desc)
   197  		channelsIdx[channel.desc.ID] = channel
   198  		channels = append(channels, channel)
   199  	}
   200  	mconn.channels = channels
   201  	mconn.channelsIdx = channelsIdx
   202  
   203  	mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn)
   204  
   205  	// maxPacketMsgSize() is a bit heavy, so call just once
   206  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   207  
   208  	return mconn
   209  }
   210  
   211  func (c *MConnection) SetLogger(l log.Logger) {
   212  	c.BaseService.SetLogger(l)
   213  	for _, ch := range c.channels {
   214  		ch.SetLogger(l)
   215  	}
   216  }
   217  
   218  // OnStart implements BaseService
   219  func (c *MConnection) OnStart() error {
   220  	if err := c.BaseService.OnStart(); err != nil {
   221  		return err
   222  	}
   223  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   224  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   225  	c.pongTimeoutCh = make(chan bool, 1)
   226  	c.chStatsTimer = time.NewTicker(updateStats)
   227  	c.quitSendRoutine = make(chan struct{})
   228  	c.doneSendRoutine = make(chan struct{})
   229  	c.quitRecvRoutine = make(chan struct{})
   230  	go c.sendRoutine()
   231  	go c.recvRoutine()
   232  	return nil
   233  }
   234  
   235  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   236  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   237  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   238  func (c *MConnection) stopServices() (alreadyStopped bool) {
   239  	c.stopMtx.Lock()
   240  	defer c.stopMtx.Unlock()
   241  
   242  	select {
   243  	case <-c.quitSendRoutine:
   244  		// already quit
   245  		return true
   246  	default:
   247  	}
   248  
   249  	select {
   250  	case <-c.quitRecvRoutine:
   251  		// already quit
   252  		return true
   253  	default:
   254  	}
   255  
   256  	c.BaseService.OnStop()
   257  	c.flushTimer.Stop()
   258  	c.pingTimer.Stop()
   259  	c.chStatsTimer.Stop()
   260  
   261  	// inform the recvRouting that we are shutting down
   262  	close(c.quitRecvRoutine)
   263  	close(c.quitSendRoutine)
   264  	return false
   265  }
   266  
   267  // FlushStop replicates the logic of OnStop.
   268  // It additionally ensures that all successful
   269  // .Send() calls will get flushed before closing
   270  // the connection.
   271  func (c *MConnection) FlushStop() {
   272  	if c.stopServices() {
   273  		return
   274  	}
   275  
   276  	// this block is unique to FlushStop
   277  	{
   278  		// wait until the sendRoutine exits
   279  		// so we dont race on calling sendSomePacketMsgs
   280  		<-c.doneSendRoutine
   281  
   282  		// Send and flush all pending msgs.
   283  		// Since sendRoutine has exited, we can call this
   284  		// safely
   285  		eof := c.sendSomePacketMsgs()
   286  		for !eof {
   287  			eof = c.sendSomePacketMsgs()
   288  		}
   289  		c.flush()
   290  
   291  		// Now we can close the connection
   292  	}
   293  
   294  	c.conn.Close()
   295  
   296  	// We can't close pong safely here because
   297  	// recvRoutine may write to it after we've stopped.
   298  	// Though it doesn't need to get closed at all,
   299  	// we close it @ recvRoutine.
   300  
   301  	// c.Stop()
   302  }
   303  
   304  // OnStop implements BaseService
   305  func (c *MConnection) OnStop() {
   306  	if c.stopServices() {
   307  		return
   308  	}
   309  
   310  	c.conn.Close()
   311  
   312  	// We can't close pong safely here because
   313  	// recvRoutine may write to it after we've stopped.
   314  	// Though it doesn't need to get closed at all,
   315  	// we close it @ recvRoutine.
   316  }
   317  
   318  func (c *MConnection) String() string {
   319  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   320  }
   321  
   322  func (c *MConnection) flush() {
   323  	c.Logger.Debug("Flush", "conn", c)
   324  	err := c.bufConnWriter.Flush()
   325  	if err != nil {
   326  		c.Logger.Debug("MConnection flush failed", "err", err)
   327  	}
   328  }
   329  
   330  // Catch panics, usually caused by remote disconnects.
   331  func (c *MConnection) _recover() {
   332  	if r := recover(); r != nil {
   333  		c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   334  		c.stopForError(fmt.Errorf("recovered from panic: %v", r))
   335  	}
   336  }
   337  
   338  func (c *MConnection) stopForError(r interface{}) {
   339  	if err := c.Stop(); err != nil {
   340  		c.Logger.Error("Error stopping connection", "err", err)
   341  	}
   342  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   343  		if c.onError != nil {
   344  			c.onError(r)
   345  		}
   346  	}
   347  }
   348  
   349  // Queues a message to be sent to channel.
   350  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   351  	if !c.IsRunning() {
   352  		return false
   353  	}
   354  
   355  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   356  
   357  	// Send message to channel.
   358  	channel, ok := c.channelsIdx[chID]
   359  	if !ok {
   360  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   361  		return false
   362  	}
   363  
   364  	success := channel.sendBytes(msgBytes)
   365  	if success {
   366  		// Wake up sendRoutine if necessary
   367  		select {
   368  		case c.send <- struct{}{}:
   369  		default:
   370  		}
   371  	} else {
   372  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   373  	}
   374  	return success
   375  }
   376  
   377  // Queues a message to be sent to channel.
   378  // Nonblocking, returns true if successful.
   379  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   380  	if !c.IsRunning() {
   381  		return false
   382  	}
   383  
   384  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   385  
   386  	// Send message to channel.
   387  	channel, ok := c.channelsIdx[chID]
   388  	if !ok {
   389  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   390  		return false
   391  	}
   392  
   393  	ok = channel.trySendBytes(msgBytes)
   394  	if ok {
   395  		// Wake up sendRoutine if necessary
   396  		select {
   397  		case c.send <- struct{}{}:
   398  		default:
   399  		}
   400  	}
   401  
   402  	return ok
   403  }
   404  
   405  // CanSend returns true if you can send more data onto the chID, false
   406  // otherwise. Use only as a heuristic.
   407  func (c *MConnection) CanSend(chID byte) bool {
   408  	if !c.IsRunning() {
   409  		return false
   410  	}
   411  
   412  	channel, ok := c.channelsIdx[chID]
   413  	if !ok {
   414  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   415  		return false
   416  	}
   417  	return channel.canSend()
   418  }
   419  
   420  // sendRoutine polls for packets to send from channels.
   421  func (c *MConnection) sendRoutine() {
   422  	defer c._recover()
   423  
   424  	protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
   425  
   426  FOR_LOOP:
   427  	for {
   428  		var _n int
   429  		var err error
   430  	SELECTION:
   431  		select {
   432  		case <-c.flushTimer.Ch:
   433  			// NOTE: flushTimer.Set() must be called every time
   434  			// something is written to .bufConnWriter.
   435  			c.flush()
   436  		case <-c.chStatsTimer.C:
   437  			for _, channel := range c.channels {
   438  				channel.updateStats()
   439  			}
   440  		case <-c.pingTimer.C:
   441  			c.Logger.Debug("Send Ping")
   442  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{}))
   443  			if err != nil {
   444  				c.Logger.Error("Failed to send PacketPing", "err", err)
   445  				break SELECTION
   446  			}
   447  			c.sendMonitor.Update(_n)
   448  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   449  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   450  				select {
   451  				case c.pongTimeoutCh <- true:
   452  				default:
   453  				}
   454  			})
   455  			c.flush()
   456  		case timeout := <-c.pongTimeoutCh:
   457  			if timeout {
   458  				c.Logger.Debug("Pong timeout")
   459  				err = errors.New("pong timeout")
   460  			} else {
   461  				c.stopPongTimer()
   462  			}
   463  		case <-c.pong:
   464  			c.Logger.Debug("Send Pong")
   465  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{}))
   466  			if err != nil {
   467  				c.Logger.Error("Failed to send PacketPong", "err", err)
   468  				break SELECTION
   469  			}
   470  			c.sendMonitor.Update(_n)
   471  			c.flush()
   472  		case <-c.quitSendRoutine:
   473  			break FOR_LOOP
   474  		case <-c.send:
   475  			// Send some PacketMsgs
   476  			eof := c.sendSomePacketMsgs()
   477  			if !eof {
   478  				// Keep sendRoutine awake.
   479  				select {
   480  				case c.send <- struct{}{}:
   481  				default:
   482  				}
   483  			}
   484  		}
   485  
   486  		if !c.IsRunning() {
   487  			break FOR_LOOP
   488  		}
   489  		if err != nil {
   490  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   491  			c.stopForError(err)
   492  			break FOR_LOOP
   493  		}
   494  	}
   495  
   496  	// Cleanup
   497  	c.stopPongTimer()
   498  	close(c.doneSendRoutine)
   499  }
   500  
   501  // Returns true if messages from channels were exhausted.
   502  // Blocks in accordance to .sendMonitor throttling.
   503  func (c *MConnection) sendSomePacketMsgs() bool {
   504  	// Block until .sendMonitor says we can write.
   505  	// Once we're ready we send more than we asked for,
   506  	// but amortized it should even out.
   507  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   508  
   509  	// Now send some PacketMsgs.
   510  	for i := 0; i < numBatchPacketMsgs; i++ {
   511  		if c.sendPacketMsg() {
   512  			return true
   513  		}
   514  	}
   515  	return false
   516  }
   517  
   518  // Returns true if messages from channels were exhausted.
   519  func (c *MConnection) sendPacketMsg() bool {
   520  	// Choose a channel to create a PacketMsg from.
   521  	// The chosen channel will be the one whose recentlySent/priority is the least.
   522  	var leastRatio float32 = math.MaxFloat32
   523  	var leastChannel *Channel
   524  	for _, channel := range c.channels {
   525  		// If nothing to send, skip this channel
   526  		if !channel.isSendPending() {
   527  			continue
   528  		}
   529  		// Get ratio, and keep track of lowest ratio.
   530  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   531  		if ratio < leastRatio {
   532  			leastRatio = ratio
   533  			leastChannel = channel
   534  		}
   535  	}
   536  
   537  	// Nothing to send?
   538  	if leastChannel == nil {
   539  		return true
   540  	}
   541  	// c.Logger.Info("Found a msgPacket to send")
   542  
   543  	// Make & send a PacketMsg from this channel
   544  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   545  	if err != nil {
   546  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   547  		c.stopForError(err)
   548  		return true
   549  	}
   550  	c.sendMonitor.Update(_n)
   551  	c.flushTimer.Set()
   552  	return false
   553  }
   554  
   555  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   556  // After a whole message has been assembled, it's pushed to onReceive().
   557  // Blocks depending on how the connection is throttled.
   558  // Otherwise, it never blocks.
   559  func (c *MConnection) recvRoutine() {
   560  	defer c._recover()
   561  
   562  	protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize)
   563  
   564  FOR_LOOP:
   565  	for {
   566  		// Block until .recvMonitor says we can read.
   567  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   568  
   569  		// Peek into bufConnReader for debugging
   570  		/*
   571  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   572  				bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   573  				if err == nil {
   574  					// return
   575  				} else {
   576  					c.Logger.Debug("Error peeking connection buffer", "err", err)
   577  					// return nil
   578  				}
   579  				c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   580  			}
   581  		*/
   582  
   583  		// Read packet type
   584  		var packet tmp2p.Packet
   585  
   586  		_n, err := protoReader.ReadMsg(&packet)
   587  		c.recvMonitor.Update(_n)
   588  		if err != nil {
   589  			// stopServices was invoked and we are shutting down
   590  			// receiving is excpected to fail since we will close the connection
   591  			select {
   592  			case <-c.quitRecvRoutine:
   593  				break FOR_LOOP
   594  			default:
   595  			}
   596  
   597  			if c.IsRunning() {
   598  				if err == io.EOF {
   599  					c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   600  				} else {
   601  					c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   602  				}
   603  				c.stopForError(err)
   604  			}
   605  			break FOR_LOOP
   606  		}
   607  
   608  		// Read more depending on packet type.
   609  		switch pkt := packet.Sum.(type) {
   610  		case *tmp2p.Packet_PacketPing:
   611  			// TODO: prevent abuse, as they cause flush()'s.
   612  			// https://github.com/tendermint/tendermint/issues/1190
   613  			c.Logger.Debug("Receive Ping")
   614  			select {
   615  			case c.pong <- struct{}{}:
   616  			default:
   617  				// never block
   618  			}
   619  		case *tmp2p.Packet_PacketPong:
   620  			c.Logger.Debug("Receive Pong")
   621  			select {
   622  			case c.pongTimeoutCh <- false:
   623  			default:
   624  				// never block
   625  			}
   626  		case *tmp2p.Packet_PacketMsg:
   627  			channelID := byte(pkt.PacketMsg.ChannelID)
   628  			channel, ok := c.channelsIdx[channelID]
   629  			if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil {
   630  				err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID)
   631  				c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   632  				c.stopForError(err)
   633  				break FOR_LOOP
   634  			}
   635  
   636  			msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg)
   637  			if err != nil {
   638  				if c.IsRunning() {
   639  					c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   640  					c.stopForError(err)
   641  				}
   642  				break FOR_LOOP
   643  			}
   644  			if msgBytes != nil {
   645  				c.Logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes)
   646  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   647  				c.onReceive(channelID, msgBytes)
   648  			}
   649  		default:
   650  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   651  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   652  			c.stopForError(err)
   653  			break FOR_LOOP
   654  		}
   655  	}
   656  
   657  	// Cleanup
   658  	close(c.pong)
   659  	for range c.pong {
   660  		// Drain
   661  	}
   662  }
   663  
   664  // not goroutine-safe
   665  func (c *MConnection) stopPongTimer() {
   666  	if c.pongTimer != nil {
   667  		_ = c.pongTimer.Stop()
   668  		c.pongTimer = nil
   669  	}
   670  }
   671  
   672  // maxPacketMsgSize returns a maximum size of PacketMsg
   673  func (c *MConnection) maxPacketMsgSize() int {
   674  	bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{
   675  		ChannelID: 0x01,
   676  		EOF:       true,
   677  		Data:      make([]byte, c.config.MaxPacketMsgPayloadSize),
   678  	}))
   679  	if err != nil {
   680  		panic(err)
   681  	}
   682  	return len(bz)
   683  }
   684  
   685  type ConnectionStatus struct {
   686  	Duration    time.Duration
   687  	SendMonitor flow.Status
   688  	RecvMonitor flow.Status
   689  	Channels    []ChannelStatus
   690  }
   691  
   692  type ChannelStatus struct {
   693  	ID                byte
   694  	SendQueueCapacity int
   695  	SendQueueSize     int
   696  	Priority          int
   697  	RecentlySent      int64
   698  }
   699  
   700  func (c *MConnection) Status() ConnectionStatus {
   701  	var status ConnectionStatus
   702  	status.Duration = time.Since(c.created)
   703  	status.SendMonitor = c.sendMonitor.Status()
   704  	status.RecvMonitor = c.recvMonitor.Status()
   705  	status.Channels = make([]ChannelStatus, len(c.channels))
   706  	for i, channel := range c.channels {
   707  		status.Channels[i] = ChannelStatus{
   708  			ID:                channel.desc.ID,
   709  			SendQueueCapacity: cap(channel.sendQueue),
   710  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   711  			Priority:          channel.desc.Priority,
   712  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   713  		}
   714  	}
   715  	return status
   716  }
   717  
   718  //-----------------------------------------------------------------------------
   719  
   720  type ChannelDescriptor struct {
   721  	ID                  byte
   722  	Priority            int
   723  	SendQueueCapacity   int
   724  	RecvBufferCapacity  int
   725  	RecvMessageCapacity int
   726  }
   727  
   728  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   729  	if chDesc.SendQueueCapacity == 0 {
   730  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   731  	}
   732  	if chDesc.RecvBufferCapacity == 0 {
   733  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   734  	}
   735  	if chDesc.RecvMessageCapacity == 0 {
   736  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   737  	}
   738  	filled = chDesc
   739  	return
   740  }
   741  
   742  // TODO: lowercase.
   743  // NOTE: not goroutine-safe.
   744  type Channel struct {
   745  	conn          *MConnection
   746  	desc          ChannelDescriptor
   747  	sendQueue     chan []byte
   748  	sendQueueSize int32 // atomic.
   749  	recving       []byte
   750  	sending       []byte
   751  	recentlySent  int64 // exponential moving average
   752  
   753  	maxPacketMsgPayloadSize int
   754  
   755  	Logger log.Logger
   756  }
   757  
   758  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   759  	desc = desc.FillDefaults()
   760  	if desc.Priority <= 0 {
   761  		panic("Channel default priority must be a positive integer")
   762  	}
   763  	return &Channel{
   764  		conn:                    conn,
   765  		desc:                    desc,
   766  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   767  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   768  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   769  	}
   770  }
   771  
   772  func (ch *Channel) SetLogger(l log.Logger) {
   773  	ch.Logger = l
   774  }
   775  
   776  // Queues message to send to this channel.
   777  // Goroutine-safe
   778  // Times out (and returns false) after defaultSendTimeout
   779  func (ch *Channel) sendBytes(bytes []byte) bool {
   780  	select {
   781  	case ch.sendQueue <- bytes:
   782  		atomic.AddInt32(&ch.sendQueueSize, 1)
   783  		return true
   784  	case <-time.After(defaultSendTimeout):
   785  		return false
   786  	}
   787  }
   788  
   789  // Queues message to send to this channel.
   790  // Nonblocking, returns true if successful.
   791  // Goroutine-safe
   792  func (ch *Channel) trySendBytes(bytes []byte) bool {
   793  	select {
   794  	case ch.sendQueue <- bytes:
   795  		atomic.AddInt32(&ch.sendQueueSize, 1)
   796  		return true
   797  	default:
   798  		return false
   799  	}
   800  }
   801  
   802  // Goroutine-safe
   803  func (ch *Channel) loadSendQueueSize() (size int) {
   804  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   805  }
   806  
   807  // Goroutine-safe
   808  // Use only as a heuristic.
   809  func (ch *Channel) canSend() bool {
   810  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   811  }
   812  
   813  // Returns true if any PacketMsgs are pending to be sent.
   814  // Call before calling nextPacketMsg()
   815  // Goroutine-safe
   816  func (ch *Channel) isSendPending() bool {
   817  	if len(ch.sending) == 0 {
   818  		if len(ch.sendQueue) == 0 {
   819  			return false
   820  		}
   821  		ch.sending = <-ch.sendQueue
   822  	}
   823  	return true
   824  }
   825  
   826  // Creates a new PacketMsg to send.
   827  // Not goroutine-safe
   828  func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg {
   829  	packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)}
   830  	maxSize := ch.maxPacketMsgPayloadSize
   831  	packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   832  	if len(ch.sending) <= maxSize {
   833  		packet.EOF = true
   834  		ch.sending = nil
   835  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   836  	} else {
   837  		packet.EOF = false
   838  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   839  	}
   840  	return packet
   841  }
   842  
   843  // Writes next PacketMsg to w and updates c.recentlySent.
   844  // Not goroutine-safe
   845  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) {
   846  	packet := ch.nextPacketMsg()
   847  	n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet))
   848  	atomic.AddInt64(&ch.recentlySent, int64(n))
   849  	return
   850  }
   851  
   852  // Handles incoming PacketMsgs. It returns a message bytes if message is
   853  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   854  // Not goroutine-safe
   855  func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) {
   856  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   857  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data)
   858  	if recvCap < recvReceived {
   859  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   860  	}
   861  	ch.recving = append(ch.recving, packet.Data...)
   862  	if packet.EOF {
   863  		msgBytes := ch.recving
   864  
   865  		// clear the slice without re-allocating.
   866  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   867  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   868  		//	at which point the recving slice stops being used and should be garbage collected
   869  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   870  		return msgBytes, nil
   871  	}
   872  	return nil, nil
   873  }
   874  
   875  // Call this periodically to update stats for throttling purposes.
   876  // Not goroutine-safe
   877  func (ch *Channel) updateStats() {
   878  	// Exponential decay of stats.
   879  	// TODO: optimize.
   880  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   881  }
   882  
   883  //----------------------------------------
   884  // Packet
   885  
   886  // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message.
   887  func mustWrapPacket(pb proto.Message) *tmp2p.Packet {
   888  	var msg tmp2p.Packet
   889  
   890  	switch pb := pb.(type) {
   891  	case *tmp2p.Packet: // already a packet
   892  		msg = *pb
   893  	case *tmp2p.PacketPing:
   894  		msg = tmp2p.Packet{
   895  			Sum: &tmp2p.Packet_PacketPing{
   896  				PacketPing: pb,
   897  			},
   898  		}
   899  	case *tmp2p.PacketPong:
   900  		msg = tmp2p.Packet{
   901  			Sum: &tmp2p.Packet_PacketPong{
   902  				PacketPong: pb,
   903  			},
   904  		}
   905  	case *tmp2p.PacketMsg:
   906  		msg = tmp2p.Packet{
   907  			Sum: &tmp2p.Packet_PacketMsg{
   908  				PacketMsg: pb,
   909  			},
   910  		}
   911  	default:
   912  		panic(fmt.Errorf("unknown packet type %T", pb))
   913  	}
   914  
   915  	return &msg
   916  }