github.com/adoriasoft/tendermint@v0.34.0-dev1.0.20200722151356-96d84601a75a/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"reflect"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/gogo/protobuf/proto"
    16  
    17  	flow "github.com/tendermint/tendermint/libs/flowrate"
    18  	"github.com/tendermint/tendermint/libs/log"
    19  	tmmath "github.com/tendermint/tendermint/libs/math"
    20  	"github.com/tendermint/tendermint/libs/protoio"
    21  	"github.com/tendermint/tendermint/libs/service"
    22  	tmsync "github.com/tendermint/tendermint/libs/sync"
    23  	"github.com/tendermint/tendermint/libs/timer"
    24  	tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
    25  )
    26  
    27  const (
    28  	defaultMaxPacketMsgPayloadSize = 1024
    29  
    30  	numBatchPacketMsgs = 10
    31  	minReadBufferSize  = 1024
    32  	minWriteBufferSize = 65536
    33  	updateStats        = 2 * time.Second
    34  
    35  	// some of these defaults are written in the user config
    36  	// flushThrottle, sendRate, recvRate
    37  	// TODO: remove values present in config
    38  	defaultFlushThrottle = 100 * time.Millisecond
    39  
    40  	defaultSendQueueCapacity   = 1
    41  	defaultRecvBufferCapacity  = 4096
    42  	defaultRecvMessageCapacity = 22020096      // 21MB
    43  	defaultSendRate            = int64(512000) // 500KB/s
    44  	defaultRecvRate            = int64(512000) // 500KB/s
    45  	defaultSendTimeout         = 10 * time.Second
    46  	defaultPingInterval        = 60 * time.Second
    47  	defaultPongTimeout         = 45 * time.Second
    48  )
    49  
    50  type receiveCbFunc func(chID byte, msgBytes []byte)
    51  type errorCbFunc func(interface{})
    52  
    53  /*
    54  Each peer has one `MConnection` (multiplex connection) instance.
    55  
    56  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    57  several messages along a single channel of communication.
    58  
    59  Each `MConnection` handles message transmission on multiple abstract communication
    60  `Channel`s.  Each channel has a globally unique byte id.
    61  The byte id and the relative priorities of each `Channel` are configured upon
    62  initialization of the connection.
    63  
    64  There are two methods for sending messages:
    65  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    66  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    67  
    68  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    69  successfully queued for the channel with the given id byte `chID`, or until the
    70  request times out.  The message `msg` is serialized using Protobuf.
    71  
    72  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    73  channel's queue is full.
    74  
    75  Inbound message bytes are handled with an onReceive callback function.
    76  */
    77  type MConnection struct {
    78  	service.BaseService
    79  
    80  	conn          net.Conn
    81  	bufConnReader *bufio.Reader
    82  	bufConnWriter *bufio.Writer
    83  	sendMonitor   *flow.Monitor
    84  	recvMonitor   *flow.Monitor
    85  	send          chan struct{}
    86  	pong          chan struct{}
    87  	channels      []*Channel
    88  	channelsIdx   map[byte]*Channel
    89  	onReceive     receiveCbFunc
    90  	onError       errorCbFunc
    91  	errored       uint32
    92  	config        MConnConfig
    93  
    94  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    95  	// doneSendRoutine is closed when the sendRoutine actually quits.
    96  	quitSendRoutine chan struct{}
    97  	doneSendRoutine chan struct{}
    98  
    99  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   100  	quitRecvRoutine chan struct{}
   101  
   102  	// used to ensure FlushStop and OnStop
   103  	// are safe to call concurrently.
   104  	stopMtx tmsync.Mutex
   105  
   106  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   107  	pingTimer  *time.Ticker         // send pings periodically
   108  
   109  	// close conn if pong is not received in pongTimeout
   110  	pongTimer     *time.Timer
   111  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   112  
   113  	chStatsTimer *time.Ticker // update channel stats periodically
   114  
   115  	created time.Time // time of creation
   116  
   117  	_maxPacketMsgSize int
   118  }
   119  
   120  // MConnConfig is a MConnection configuration.
   121  type MConnConfig struct {
   122  	SendRate int64 `mapstructure:"send_rate"`
   123  	RecvRate int64 `mapstructure:"recv_rate"`
   124  
   125  	// Maximum payload size
   126  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   127  
   128  	// Interval to flush writes (throttled)
   129  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   130  
   131  	// Interval to send pings
   132  	PingInterval time.Duration `mapstructure:"ping_interval"`
   133  
   134  	// Maximum wait time for pongs
   135  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   136  }
   137  
   138  // DefaultMConnConfig returns the default config.
   139  func DefaultMConnConfig() MConnConfig {
   140  	return MConnConfig{
   141  		SendRate:                defaultSendRate,
   142  		RecvRate:                defaultRecvRate,
   143  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   144  		FlushThrottle:           defaultFlushThrottle,
   145  		PingInterval:            defaultPingInterval,
   146  		PongTimeout:             defaultPongTimeout,
   147  	}
   148  }
   149  
   150  // NewMConnection wraps net.Conn and creates multiplex connection
   151  func NewMConnection(
   152  	conn net.Conn,
   153  	chDescs []*ChannelDescriptor,
   154  	onReceive receiveCbFunc,
   155  	onError errorCbFunc,
   156  ) *MConnection {
   157  	return NewMConnectionWithConfig(
   158  		conn,
   159  		chDescs,
   160  		onReceive,
   161  		onError,
   162  		DefaultMConnConfig())
   163  }
   164  
   165  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   166  func NewMConnectionWithConfig(
   167  	conn net.Conn,
   168  	chDescs []*ChannelDescriptor,
   169  	onReceive receiveCbFunc,
   170  	onError errorCbFunc,
   171  	config MConnConfig,
   172  ) *MConnection {
   173  	if config.PongTimeout >= config.PingInterval {
   174  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   175  	}
   176  
   177  	mconn := &MConnection{
   178  		conn:          conn,
   179  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   180  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   181  		sendMonitor:   flow.New(0, 0),
   182  		recvMonitor:   flow.New(0, 0),
   183  		send:          make(chan struct{}, 1),
   184  		pong:          make(chan struct{}, 1),
   185  		onReceive:     onReceive,
   186  		onError:       onError,
   187  		config:        config,
   188  		created:       time.Now(),
   189  	}
   190  
   191  	// Create channels
   192  	var channelsIdx = map[byte]*Channel{}
   193  	var channels = []*Channel{}
   194  
   195  	for _, desc := range chDescs {
   196  		channel := newChannel(mconn, *desc)
   197  		channelsIdx[channel.desc.ID] = channel
   198  		channels = append(channels, channel)
   199  	}
   200  	mconn.channels = channels
   201  	mconn.channelsIdx = channelsIdx
   202  
   203  	mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn)
   204  
   205  	// maxPacketMsgSize() is a bit heavy, so call just once
   206  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   207  
   208  	return mconn
   209  }
   210  
   211  func (c *MConnection) SetLogger(l log.Logger) {
   212  	c.BaseService.SetLogger(l)
   213  	for _, ch := range c.channels {
   214  		ch.SetLogger(l)
   215  	}
   216  }
   217  
   218  // OnStart implements BaseService
   219  func (c *MConnection) OnStart() error {
   220  	if err := c.BaseService.OnStart(); err != nil {
   221  		return err
   222  	}
   223  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   224  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   225  	c.pongTimeoutCh = make(chan bool, 1)
   226  	c.chStatsTimer = time.NewTicker(updateStats)
   227  	c.quitSendRoutine = make(chan struct{})
   228  	c.doneSendRoutine = make(chan struct{})
   229  	c.quitRecvRoutine = make(chan struct{})
   230  	go c.sendRoutine()
   231  	go c.recvRoutine()
   232  	return nil
   233  }
   234  
   235  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   236  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   237  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   238  func (c *MConnection) stopServices() (alreadyStopped bool) {
   239  	c.stopMtx.Lock()
   240  	defer c.stopMtx.Unlock()
   241  
   242  	select {
   243  	case <-c.quitSendRoutine:
   244  		// already quit
   245  		return true
   246  	default:
   247  	}
   248  
   249  	select {
   250  	case <-c.quitRecvRoutine:
   251  		// already quit
   252  		return true
   253  	default:
   254  	}
   255  
   256  	c.BaseService.OnStop()
   257  	c.flushTimer.Stop()
   258  	c.pingTimer.Stop()
   259  	c.chStatsTimer.Stop()
   260  
   261  	// inform the recvRouting that we are shutting down
   262  	close(c.quitRecvRoutine)
   263  	close(c.quitSendRoutine)
   264  	return false
   265  }
   266  
   267  // FlushStop replicates the logic of OnStop.
   268  // It additionally ensures that all successful
   269  // .Send() calls will get flushed before closing
   270  // the connection.
   271  func (c *MConnection) FlushStop() {
   272  	if c.stopServices() {
   273  		return
   274  	}
   275  
   276  	// this block is unique to FlushStop
   277  	{
   278  		// wait until the sendRoutine exits
   279  		// so we dont race on calling sendSomePacketMsgs
   280  		<-c.doneSendRoutine
   281  
   282  		// Send and flush all pending msgs.
   283  		// Since sendRoutine has exited, we can call this
   284  		// safely
   285  		eof := c.sendSomePacketMsgs()
   286  		for !eof {
   287  			eof = c.sendSomePacketMsgs()
   288  		}
   289  		c.flush()
   290  
   291  		// Now we can close the connection
   292  	}
   293  
   294  	c.conn.Close()
   295  
   296  	// We can't close pong safely here because
   297  	// recvRoutine may write to it after we've stopped.
   298  	// Though it doesn't need to get closed at all,
   299  	// we close it @ recvRoutine.
   300  
   301  	// c.Stop()
   302  }
   303  
   304  // OnStop implements BaseService
   305  func (c *MConnection) OnStop() {
   306  	if c.stopServices() {
   307  		return
   308  	}
   309  
   310  	c.conn.Close()
   311  
   312  	// We can't close pong safely here because
   313  	// recvRoutine may write to it after we've stopped.
   314  	// Though it doesn't need to get closed at all,
   315  	// we close it @ recvRoutine.
   316  }
   317  
   318  func (c *MConnection) String() string {
   319  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   320  }
   321  
   322  func (c *MConnection) flush() {
   323  	c.Logger.Debug("Flush", "conn", c)
   324  	err := c.bufConnWriter.Flush()
   325  	if err != nil {
   326  		c.Logger.Error("MConnection flush failed", "err", err)
   327  	}
   328  }
   329  
   330  // Catch panics, usually caused by remote disconnects.
   331  func (c *MConnection) _recover() {
   332  	if r := recover(); r != nil {
   333  		c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   334  		c.stopForError(fmt.Errorf("recovered from panic: %v", r))
   335  	}
   336  }
   337  
   338  func (c *MConnection) stopForError(r interface{}) {
   339  	c.Stop()
   340  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   341  		if c.onError != nil {
   342  			c.onError(r)
   343  		}
   344  	}
   345  }
   346  
   347  // Queues a message to be sent to channel.
   348  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   349  	if !c.IsRunning() {
   350  		return false
   351  	}
   352  
   353  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   354  
   355  	// Send message to channel.
   356  	channel, ok := c.channelsIdx[chID]
   357  	if !ok {
   358  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   359  		return false
   360  	}
   361  
   362  	success := channel.sendBytes(msgBytes)
   363  	if success {
   364  		// Wake up sendRoutine if necessary
   365  		select {
   366  		case c.send <- struct{}{}:
   367  		default:
   368  		}
   369  	} else {
   370  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   371  	}
   372  	return success
   373  }
   374  
   375  // Queues a message to be sent to channel.
   376  // Nonblocking, returns true if successful.
   377  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   378  	if !c.IsRunning() {
   379  		return false
   380  	}
   381  
   382  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   383  
   384  	// Send message to channel.
   385  	channel, ok := c.channelsIdx[chID]
   386  	if !ok {
   387  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   388  		return false
   389  	}
   390  
   391  	ok = channel.trySendBytes(msgBytes)
   392  	if ok {
   393  		// Wake up sendRoutine if necessary
   394  		select {
   395  		case c.send <- struct{}{}:
   396  		default:
   397  		}
   398  	}
   399  
   400  	return ok
   401  }
   402  
   403  // CanSend returns true if you can send more data onto the chID, false
   404  // otherwise. Use only as a heuristic.
   405  func (c *MConnection) CanSend(chID byte) bool {
   406  	if !c.IsRunning() {
   407  		return false
   408  	}
   409  
   410  	channel, ok := c.channelsIdx[chID]
   411  	if !ok {
   412  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   413  		return false
   414  	}
   415  	return channel.canSend()
   416  }
   417  
   418  // sendRoutine polls for packets to send from channels.
   419  func (c *MConnection) sendRoutine() {
   420  	defer c._recover()
   421  
   422  	protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
   423  
   424  FOR_LOOP:
   425  	for {
   426  		var _n int
   427  		var err error
   428  	SELECTION:
   429  		select {
   430  		case <-c.flushTimer.Ch:
   431  			// NOTE: flushTimer.Set() must be called every time
   432  			// something is written to .bufConnWriter.
   433  			c.flush()
   434  		case <-c.chStatsTimer.C:
   435  			for _, channel := range c.channels {
   436  				channel.updateStats()
   437  			}
   438  		case <-c.pingTimer.C:
   439  			c.Logger.Debug("Send Ping")
   440  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{}))
   441  			if err != nil {
   442  				c.Logger.Error("Failed to send PacketPing", "err", err)
   443  				break SELECTION
   444  			}
   445  			c.sendMonitor.Update(_n)
   446  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   447  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   448  				select {
   449  				case c.pongTimeoutCh <- true:
   450  				default:
   451  				}
   452  			})
   453  			c.flush()
   454  		case timeout := <-c.pongTimeoutCh:
   455  			if timeout {
   456  				c.Logger.Debug("Pong timeout")
   457  				err = errors.New("pong timeout")
   458  			} else {
   459  				c.stopPongTimer()
   460  			}
   461  		case <-c.pong:
   462  			c.Logger.Debug("Send Pong")
   463  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{}))
   464  			if err != nil {
   465  				c.Logger.Error("Failed to send PacketPong", "err", err)
   466  				break SELECTION
   467  			}
   468  			c.sendMonitor.Update(_n)
   469  			c.flush()
   470  		case <-c.quitSendRoutine:
   471  			break FOR_LOOP
   472  		case <-c.send:
   473  			// Send some PacketMsgs
   474  			eof := c.sendSomePacketMsgs()
   475  			if !eof {
   476  				// Keep sendRoutine awake.
   477  				select {
   478  				case c.send <- struct{}{}:
   479  				default:
   480  				}
   481  			}
   482  		}
   483  
   484  		if !c.IsRunning() {
   485  			break FOR_LOOP
   486  		}
   487  		if err != nil {
   488  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   489  			c.stopForError(err)
   490  			break FOR_LOOP
   491  		}
   492  	}
   493  
   494  	// Cleanup
   495  	c.stopPongTimer()
   496  	close(c.doneSendRoutine)
   497  }
   498  
   499  // Returns true if messages from channels were exhausted.
   500  // Blocks in accordance to .sendMonitor throttling.
   501  func (c *MConnection) sendSomePacketMsgs() bool {
   502  	// Block until .sendMonitor says we can write.
   503  	// Once we're ready we send more than we asked for,
   504  	// but amortized it should even out.
   505  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   506  
   507  	// Now send some PacketMsgs.
   508  	for i := 0; i < numBatchPacketMsgs; i++ {
   509  		if c.sendPacketMsg() {
   510  			return true
   511  		}
   512  	}
   513  	return false
   514  }
   515  
   516  // Returns true if messages from channels were exhausted.
   517  func (c *MConnection) sendPacketMsg() bool {
   518  	// Choose a channel to create a PacketMsg from.
   519  	// The chosen channel will be the one whose recentlySent/priority is the least.
   520  	var leastRatio float32 = math.MaxFloat32
   521  	var leastChannel *Channel
   522  	for _, channel := range c.channels {
   523  		// If nothing to send, skip this channel
   524  		if !channel.isSendPending() {
   525  			continue
   526  		}
   527  		// Get ratio, and keep track of lowest ratio.
   528  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   529  		if ratio < leastRatio {
   530  			leastRatio = ratio
   531  			leastChannel = channel
   532  		}
   533  	}
   534  
   535  	// Nothing to send?
   536  	if leastChannel == nil {
   537  		return true
   538  	}
   539  	// c.Logger.Info("Found a msgPacket to send")
   540  
   541  	// Make & send a PacketMsg from this channel
   542  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   543  	if err != nil {
   544  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   545  		c.stopForError(err)
   546  		return true
   547  	}
   548  	c.sendMonitor.Update(_n)
   549  	c.flushTimer.Set()
   550  	return false
   551  }
   552  
   553  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   554  // After a whole message has been assembled, it's pushed to onReceive().
   555  // Blocks depending on how the connection is throttled.
   556  // Otherwise, it never blocks.
   557  func (c *MConnection) recvRoutine() {
   558  	defer c._recover()
   559  
   560  	protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize)
   561  
   562  FOR_LOOP:
   563  	for {
   564  		// Block until .recvMonitor says we can read.
   565  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   566  
   567  		// Peek into bufConnReader for debugging
   568  		/*
   569  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   570  				bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   571  				if err == nil {
   572  					// return
   573  				} else {
   574  					c.Logger.Debug("Error peeking connection buffer", "err", err)
   575  					// return nil
   576  				}
   577  				c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   578  			}
   579  		*/
   580  
   581  		// Read packet type
   582  		var packet tmp2p.Packet
   583  
   584  		err := protoReader.ReadMsg(&packet)
   585  		if err != nil {
   586  			// stopServices was invoked and we are shutting down
   587  			// receiving is excpected to fail since we will close the connection
   588  			select {
   589  			case <-c.quitRecvRoutine:
   590  				break FOR_LOOP
   591  			default:
   592  			}
   593  
   594  			if c.IsRunning() {
   595  				if err == io.EOF {
   596  					c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   597  				} else {
   598  					c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   599  				}
   600  				c.stopForError(err)
   601  			}
   602  			break FOR_LOOP
   603  		}
   604  
   605  		// Read more depending on packet type.
   606  		switch pkt := packet.Sum.(type) {
   607  		case *tmp2p.Packet_PacketPing:
   608  			// TODO: prevent abuse, as they cause flush()'s.
   609  			// https://github.com/tendermint/tendermint/issues/1190
   610  			c.Logger.Debug("Receive Ping")
   611  			select {
   612  			case c.pong <- struct{}{}:
   613  			default:
   614  				// never block
   615  			}
   616  		case *tmp2p.Packet_PacketPong:
   617  			c.Logger.Debug("Receive Pong")
   618  			select {
   619  			case c.pongTimeoutCh <- false:
   620  			default:
   621  				// never block
   622  			}
   623  		case *tmp2p.Packet_PacketMsg:
   624  			channel, ok := c.channelsIdx[byte(pkt.PacketMsg.ChannelID)]
   625  			if !ok || channel == nil {
   626  				err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID)
   627  				c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   628  				c.stopForError(err)
   629  				break FOR_LOOP
   630  			}
   631  
   632  			msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg)
   633  			if err != nil {
   634  				if c.IsRunning() {
   635  					c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   636  					c.stopForError(err)
   637  				}
   638  				break FOR_LOOP
   639  			}
   640  			if msgBytes != nil {
   641  				c.Logger.Debug("Received bytes", "chID", pkt.PacketMsg.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes))
   642  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   643  				c.onReceive(byte(pkt.PacketMsg.ChannelID), msgBytes)
   644  			}
   645  		default:
   646  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   647  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   648  			c.stopForError(err)
   649  			break FOR_LOOP
   650  		}
   651  	}
   652  
   653  	// Cleanup
   654  	close(c.pong)
   655  	for range c.pong {
   656  		// Drain
   657  	}
   658  }
   659  
   660  // not goroutine-safe
   661  func (c *MConnection) stopPongTimer() {
   662  	if c.pongTimer != nil {
   663  		_ = c.pongTimer.Stop()
   664  		c.pongTimer = nil
   665  	}
   666  }
   667  
   668  // maxPacketMsgSize returns a maximum size of PacketMsg
   669  func (c *MConnection) maxPacketMsgSize() int {
   670  	bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{
   671  		ChannelID: 0x01,
   672  		EOF:       true,
   673  		Data:      make([]byte, c.config.MaxPacketMsgPayloadSize),
   674  	}))
   675  	if err != nil {
   676  		panic(err)
   677  	}
   678  	return len(bz)
   679  }
   680  
   681  type ConnectionStatus struct {
   682  	Duration    time.Duration
   683  	SendMonitor flow.Status
   684  	RecvMonitor flow.Status
   685  	Channels    []ChannelStatus
   686  }
   687  
   688  type ChannelStatus struct {
   689  	ID                byte
   690  	SendQueueCapacity int
   691  	SendQueueSize     int
   692  	Priority          int
   693  	RecentlySent      int64
   694  }
   695  
   696  func (c *MConnection) Status() ConnectionStatus {
   697  	var status ConnectionStatus
   698  	status.Duration = time.Since(c.created)
   699  	status.SendMonitor = c.sendMonitor.Status()
   700  	status.RecvMonitor = c.recvMonitor.Status()
   701  	status.Channels = make([]ChannelStatus, len(c.channels))
   702  	for i, channel := range c.channels {
   703  		status.Channels[i] = ChannelStatus{
   704  			ID:                channel.desc.ID,
   705  			SendQueueCapacity: cap(channel.sendQueue),
   706  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   707  			Priority:          channel.desc.Priority,
   708  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   709  		}
   710  	}
   711  	return status
   712  }
   713  
   714  //-----------------------------------------------------------------------------
   715  
   716  type ChannelDescriptor struct {
   717  	ID                  byte
   718  	Priority            int
   719  	SendQueueCapacity   int
   720  	RecvBufferCapacity  int
   721  	RecvMessageCapacity int
   722  }
   723  
   724  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   725  	if chDesc.SendQueueCapacity == 0 {
   726  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   727  	}
   728  	if chDesc.RecvBufferCapacity == 0 {
   729  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   730  	}
   731  	if chDesc.RecvMessageCapacity == 0 {
   732  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   733  	}
   734  	filled = chDesc
   735  	return
   736  }
   737  
   738  // TODO: lowercase.
   739  // NOTE: not goroutine-safe.
   740  type Channel struct {
   741  	conn          *MConnection
   742  	desc          ChannelDescriptor
   743  	sendQueue     chan []byte
   744  	sendQueueSize int32 // atomic.
   745  	recving       []byte
   746  	sending       []byte
   747  	recentlySent  int64 // exponential moving average
   748  
   749  	maxPacketMsgPayloadSize int
   750  
   751  	Logger log.Logger
   752  }
   753  
   754  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   755  	desc = desc.FillDefaults()
   756  	if desc.Priority <= 0 {
   757  		panic("Channel default priority must be a positive integer")
   758  	}
   759  	return &Channel{
   760  		conn:                    conn,
   761  		desc:                    desc,
   762  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   763  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   764  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   765  	}
   766  }
   767  
   768  func (ch *Channel) SetLogger(l log.Logger) {
   769  	ch.Logger = l
   770  }
   771  
   772  // Queues message to send to this channel.
   773  // Goroutine-safe
   774  // Times out (and returns false) after defaultSendTimeout
   775  func (ch *Channel) sendBytes(bytes []byte) bool {
   776  	select {
   777  	case ch.sendQueue <- bytes:
   778  		atomic.AddInt32(&ch.sendQueueSize, 1)
   779  		return true
   780  	case <-time.After(defaultSendTimeout):
   781  		return false
   782  	}
   783  }
   784  
   785  // Queues message to send to this channel.
   786  // Nonblocking, returns true if successful.
   787  // Goroutine-safe
   788  func (ch *Channel) trySendBytes(bytes []byte) bool {
   789  	select {
   790  	case ch.sendQueue <- bytes:
   791  		atomic.AddInt32(&ch.sendQueueSize, 1)
   792  		return true
   793  	default:
   794  		return false
   795  	}
   796  }
   797  
   798  // Goroutine-safe
   799  func (ch *Channel) loadSendQueueSize() (size int) {
   800  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   801  }
   802  
   803  // Goroutine-safe
   804  // Use only as a heuristic.
   805  func (ch *Channel) canSend() bool {
   806  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   807  }
   808  
   809  // Returns true if any PacketMsgs are pending to be sent.
   810  // Call before calling nextPacketMsg()
   811  // Goroutine-safe
   812  func (ch *Channel) isSendPending() bool {
   813  	if len(ch.sending) == 0 {
   814  		if len(ch.sendQueue) == 0 {
   815  			return false
   816  		}
   817  		ch.sending = <-ch.sendQueue
   818  	}
   819  	return true
   820  }
   821  
   822  // Creates a new PacketMsg to send.
   823  // Not goroutine-safe
   824  func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg {
   825  	packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)}
   826  	maxSize := ch.maxPacketMsgPayloadSize
   827  	packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   828  	if len(ch.sending) <= maxSize {
   829  		packet.EOF = true
   830  		ch.sending = nil
   831  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   832  	} else {
   833  		packet.EOF = false
   834  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   835  	}
   836  	return packet
   837  }
   838  
   839  // Writes next PacketMsg to w and updates c.recentlySent.
   840  // Not goroutine-safe
   841  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) {
   842  	packet := ch.nextPacketMsg()
   843  	n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet))
   844  	atomic.AddInt64(&ch.recentlySent, int64(n))
   845  	return
   846  }
   847  
   848  // Handles incoming PacketMsgs. It returns a message bytes if message is
   849  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   850  // Not goroutine-safe
   851  func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) {
   852  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   853  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data)
   854  	if recvCap < recvReceived {
   855  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   856  	}
   857  	ch.recving = append(ch.recving, packet.Data...)
   858  	if packet.EOF {
   859  		msgBytes := ch.recving
   860  
   861  		// clear the slice without re-allocating.
   862  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   863  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   864  		//	at which point the recving slice stops being used and should be garbage collected
   865  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   866  		return msgBytes, nil
   867  	}
   868  	return nil, nil
   869  }
   870  
   871  // Call this periodically to update stats for throttling purposes.
   872  // Not goroutine-safe
   873  func (ch *Channel) updateStats() {
   874  	// Exponential decay of stats.
   875  	// TODO: optimize.
   876  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   877  }
   878  
   879  //----------------------------------------
   880  // Packet
   881  
   882  // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message.
   883  func mustWrapPacket(pb proto.Message) *tmp2p.Packet {
   884  	var msg tmp2p.Packet
   885  
   886  	switch pb := pb.(type) {
   887  	case *tmp2p.Packet: // already a packet
   888  		msg = *pb
   889  	case *tmp2p.PacketPing:
   890  		msg = tmp2p.Packet{
   891  			Sum: &tmp2p.Packet_PacketPing{
   892  				PacketPing: pb,
   893  			},
   894  		}
   895  	case *tmp2p.PacketPong:
   896  		msg = tmp2p.Packet{
   897  			Sum: &tmp2p.Packet_PacketPong{
   898  				PacketPong: pb,
   899  			},
   900  		}
   901  	case *tmp2p.PacketMsg:
   902  		msg = tmp2p.Packet{
   903  			Sum: &tmp2p.Packet_PacketMsg{
   904  				PacketMsg: pb,
   905  			},
   906  		}
   907  	default:
   908  		panic(fmt.Errorf("unknown packet type %T", pb))
   909  	}
   910  
   911  	return &msg
   912  }