github.com/soomindae/tendermint@v0.0.5-0.20210528140126-84a0c70c8162/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"reflect"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/gogo/protobuf/proto"
    16  
    17  	flow "github.com/soomindae/tendermint/libs/flowrate"
    18  	"github.com/soomindae/tendermint/libs/log"
    19  	tmmath "github.com/soomindae/tendermint/libs/math"
    20  	"github.com/soomindae/tendermint/libs/protoio"
    21  	"github.com/soomindae/tendermint/libs/service"
    22  	tmsync "github.com/soomindae/tendermint/libs/sync"
    23  	"github.com/soomindae/tendermint/libs/timer"
    24  	tmp2p "github.com/soomindae/tendermint/proto/tendermint/p2p"
    25  )
    26  
    27  const (
    28  	defaultMaxPacketMsgPayloadSize = 1024
    29  
    30  	numBatchPacketMsgs = 10
    31  	minReadBufferSize  = 1024
    32  	minWriteBufferSize = 65536
    33  	updateStats        = 2 * time.Second
    34  
    35  	// some of these defaults are written in the user config
    36  	// flushThrottle, sendRate, recvRate
    37  	// TODO: remove values present in config
    38  	defaultFlushThrottle = 100 * time.Millisecond
    39  
    40  	defaultSendQueueCapacity   = 1
    41  	defaultRecvBufferCapacity  = 4096
    42  	defaultRecvMessageCapacity = 22020096      // 21MB
    43  	defaultSendRate            = int64(512000) // 500KB/s
    44  	defaultRecvRate            = int64(512000) // 500KB/s
    45  	defaultSendTimeout         = 10 * time.Second
    46  	defaultPingInterval        = 60 * time.Second
    47  	defaultPongTimeout         = 45 * time.Second
    48  )
    49  
    50  type receiveCbFunc func(chID byte, msgBytes []byte)
    51  type errorCbFunc func(interface{})
    52  
    53  /*
    54  Each peer has one `MConnection` (multiplex connection) instance.
    55  
    56  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    57  several messages along a single channel of communication.
    58  
    59  Each `MConnection` handles message transmission on multiple abstract communication
    60  `Channel`s.  Each channel has a globally unique byte id.
    61  The byte id and the relative priorities of each `Channel` are configured upon
    62  initialization of the connection.
    63  
    64  There are two methods for sending messages:
    65  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    66  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    67  
    68  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    69  successfully queued for the channel with the given id byte `chID`, or until the
    70  request times out.  The message `msg` is serialized using Protobuf.
    71  
    72  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    73  channel's queue is full.
    74  
    75  Inbound message bytes are handled with an onReceive callback function.
    76  */
    77  type MConnection struct {
    78  	service.BaseService
    79  
    80  	conn          net.Conn
    81  	bufConnReader *bufio.Reader
    82  	bufConnWriter *bufio.Writer
    83  	sendMonitor   *flow.Monitor
    84  	recvMonitor   *flow.Monitor
    85  	send          chan struct{}
    86  	pong          chan struct{}
    87  	channels      []*Channel
    88  	channelsIdx   map[byte]*Channel
    89  	onReceive     receiveCbFunc
    90  	onError       errorCbFunc
    91  	errored       uint32
    92  	config        MConnConfig
    93  
    94  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    95  	// doneSendRoutine is closed when the sendRoutine actually quits.
    96  	quitSendRoutine chan struct{}
    97  	doneSendRoutine chan struct{}
    98  
    99  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   100  	quitRecvRoutine chan struct{}
   101  
   102  	// used to ensure FlushStop and OnStop
   103  	// are safe to call concurrently.
   104  	stopMtx tmsync.Mutex
   105  
   106  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   107  	pingTimer  *time.Ticker         // send pings periodically
   108  
   109  	// close conn if pong is not received in pongTimeout
   110  	pongTimer     *time.Timer
   111  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   112  
   113  	chStatsTimer *time.Ticker // update channel stats periodically
   114  
   115  	created time.Time // time of creation
   116  
   117  	_maxPacketMsgSize int
   118  }
   119  
   120  // MConnConfig is a MConnection configuration.
   121  type MConnConfig struct {
   122  	SendRate int64 `mapstructure:"send_rate"`
   123  	RecvRate int64 `mapstructure:"recv_rate"`
   124  
   125  	// Maximum payload size
   126  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   127  
   128  	// Interval to flush writes (throttled)
   129  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   130  
   131  	// Interval to send pings
   132  	PingInterval time.Duration `mapstructure:"ping_interval"`
   133  
   134  	// Maximum wait time for pongs
   135  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   136  }
   137  
   138  // DefaultMConnConfig returns the default config.
   139  func DefaultMConnConfig() MConnConfig {
   140  	return MConnConfig{
   141  		SendRate:                defaultSendRate,
   142  		RecvRate:                defaultRecvRate,
   143  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   144  		FlushThrottle:           defaultFlushThrottle,
   145  		PingInterval:            defaultPingInterval,
   146  		PongTimeout:             defaultPongTimeout,
   147  	}
   148  }
   149  
   150  // NewMConnection wraps net.Conn and creates multiplex connection
   151  func NewMConnection(
   152  	conn net.Conn,
   153  	chDescs []*ChannelDescriptor,
   154  	onReceive receiveCbFunc,
   155  	onError errorCbFunc,
   156  ) *MConnection {
   157  	return NewMConnectionWithConfig(
   158  		conn,
   159  		chDescs,
   160  		onReceive,
   161  		onError,
   162  		DefaultMConnConfig())
   163  }
   164  
   165  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   166  func NewMConnectionWithConfig(
   167  	conn net.Conn,
   168  	chDescs []*ChannelDescriptor,
   169  	onReceive receiveCbFunc,
   170  	onError errorCbFunc,
   171  	config MConnConfig,
   172  ) *MConnection {
   173  	if config.PongTimeout >= config.PingInterval {
   174  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   175  	}
   176  
   177  	mconn := &MConnection{
   178  		conn:          conn,
   179  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   180  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   181  		sendMonitor:   flow.New(0, 0),
   182  		recvMonitor:   flow.New(0, 0),
   183  		send:          make(chan struct{}, 1),
   184  		pong:          make(chan struct{}, 1),
   185  		onReceive:     onReceive,
   186  		onError:       onError,
   187  		config:        config,
   188  		created:       time.Now(),
   189  	}
   190  
   191  	// Create channels
   192  	var channelsIdx = map[byte]*Channel{}
   193  	var channels = []*Channel{}
   194  
   195  	for _, desc := range chDescs {
   196  		channel := newChannel(mconn, *desc)
   197  		channelsIdx[channel.desc.ID] = channel
   198  		channels = append(channels, channel)
   199  	}
   200  	mconn.channels = channels
   201  	mconn.channelsIdx = channelsIdx
   202  
   203  	mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn)
   204  
   205  	// maxPacketMsgSize() is a bit heavy, so call just once
   206  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   207  
   208  	return mconn
   209  }
   210  
   211  func (c *MConnection) SetLogger(l log.Logger) {
   212  	c.BaseService.SetLogger(l)
   213  	for _, ch := range c.channels {
   214  		ch.SetLogger(l)
   215  	}
   216  }
   217  
   218  // OnStart implements BaseService
   219  func (c *MConnection) OnStart() error {
   220  	if err := c.BaseService.OnStart(); err != nil {
   221  		return err
   222  	}
   223  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   224  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   225  	c.pongTimeoutCh = make(chan bool, 1)
   226  	c.chStatsTimer = time.NewTicker(updateStats)
   227  	c.quitSendRoutine = make(chan struct{})
   228  	c.doneSendRoutine = make(chan struct{})
   229  	c.quitRecvRoutine = make(chan struct{})
   230  	go c.sendRoutine()
   231  	go c.recvRoutine()
   232  	return nil
   233  }
   234  
   235  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   236  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   237  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   238  func (c *MConnection) stopServices() (alreadyStopped bool) {
   239  	c.stopMtx.Lock()
   240  	defer c.stopMtx.Unlock()
   241  
   242  	select {
   243  	case <-c.quitSendRoutine:
   244  		// already quit
   245  		return true
   246  	default:
   247  	}
   248  
   249  	select {
   250  	case <-c.quitRecvRoutine:
   251  		// already quit
   252  		return true
   253  	default:
   254  	}
   255  
   256  	c.BaseService.OnStop()
   257  	c.flushTimer.Stop()
   258  	c.pingTimer.Stop()
   259  	c.chStatsTimer.Stop()
   260  
   261  	// inform the recvRouting that we are shutting down
   262  	close(c.quitRecvRoutine)
   263  	close(c.quitSendRoutine)
   264  	return false
   265  }
   266  
   267  // FlushStop replicates the logic of OnStop.
   268  // It additionally ensures that all successful
   269  // .Send() calls will get flushed before closing
   270  // the connection.
   271  func (c *MConnection) FlushStop() {
   272  	if c.stopServices() {
   273  		return
   274  	}
   275  
   276  	// this block is unique to FlushStop
   277  	{
   278  		// wait until the sendRoutine exits
   279  		// so we dont race on calling sendSomePacketMsgs
   280  		<-c.doneSendRoutine
   281  
   282  		// Send and flush all pending msgs.
   283  		// Since sendRoutine has exited, we can call this
   284  		// safely
   285  		eof := c.sendSomePacketMsgs()
   286  		for !eof {
   287  			eof = c.sendSomePacketMsgs()
   288  		}
   289  		c.flush()
   290  
   291  		// Now we can close the connection
   292  	}
   293  
   294  	c.conn.Close()
   295  
   296  	// We can't close pong safely here because
   297  	// recvRoutine may write to it after we've stopped.
   298  	// Though it doesn't need to get closed at all,
   299  	// we close it @ recvRoutine.
   300  
   301  	// c.Stop()
   302  }
   303  
   304  // OnStop implements BaseService
   305  func (c *MConnection) OnStop() {
   306  	if c.stopServices() {
   307  		return
   308  	}
   309  
   310  	c.conn.Close()
   311  
   312  	// We can't close pong safely here because
   313  	// recvRoutine may write to it after we've stopped.
   314  	// Though it doesn't need to get closed at all,
   315  	// we close it @ recvRoutine.
   316  }
   317  
   318  func (c *MConnection) String() string {
   319  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   320  }
   321  
   322  func (c *MConnection) flush() {
   323  	c.Logger.Debug("Flush", "conn", c)
   324  	err := c.bufConnWriter.Flush()
   325  	if err != nil {
   326  		c.Logger.Debug("MConnection flush failed", "err", err)
   327  	}
   328  }
   329  
   330  // Catch panics, usually caused by remote disconnects.
   331  func (c *MConnection) _recover() {
   332  	if r := recover(); r != nil {
   333  		c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   334  		c.stopForError(fmt.Errorf("recovered from panic: %v", r))
   335  	}
   336  }
   337  
   338  func (c *MConnection) stopForError(r interface{}) {
   339  	if err := c.Stop(); err != nil {
   340  		c.Logger.Error("Error stopping connection", "err", err)
   341  	}
   342  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   343  		if c.onError != nil {
   344  			c.onError(r)
   345  		}
   346  	}
   347  }
   348  
   349  // Queues a message to be sent to channel.
   350  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   351  	if !c.IsRunning() {
   352  		return false
   353  	}
   354  
   355  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   356  
   357  	// Send message to channel.
   358  	channel, ok := c.channelsIdx[chID]
   359  	if !ok {
   360  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   361  		return false
   362  	}
   363  
   364  	success := channel.sendBytes(msgBytes)
   365  	if success {
   366  		// Wake up sendRoutine if necessary
   367  		select {
   368  		case c.send <- struct{}{}:
   369  		default:
   370  		}
   371  	} else {
   372  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   373  	}
   374  	return success
   375  }
   376  
   377  // Queues a message to be sent to channel.
   378  // Nonblocking, returns true if successful.
   379  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   380  	if !c.IsRunning() {
   381  		return false
   382  	}
   383  
   384  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   385  
   386  	// Send message to channel.
   387  	channel, ok := c.channelsIdx[chID]
   388  	if !ok {
   389  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   390  		return false
   391  	}
   392  
   393  	ok = channel.trySendBytes(msgBytes)
   394  	if ok {
   395  		// Wake up sendRoutine if necessary
   396  		select {
   397  		case c.send <- struct{}{}:
   398  		default:
   399  		}
   400  	}
   401  
   402  	return ok
   403  }
   404  
   405  // CanSend returns true if you can send more data onto the chID, false
   406  // otherwise. Use only as a heuristic.
   407  func (c *MConnection) CanSend(chID byte) bool {
   408  	if !c.IsRunning() {
   409  		return false
   410  	}
   411  
   412  	channel, ok := c.channelsIdx[chID]
   413  	if !ok {
   414  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   415  		return false
   416  	}
   417  	return channel.canSend()
   418  }
   419  
   420  // sendRoutine polls for packets to send from channels.
   421  func (c *MConnection) sendRoutine() {
   422  	defer c._recover()
   423  
   424  	protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
   425  
   426  FOR_LOOP:
   427  	for {
   428  		var _n int
   429  		var err error
   430  	SELECTION:
   431  		select {
   432  		case <-c.flushTimer.Ch:
   433  			// NOTE: flushTimer.Set() must be called every time
   434  			// something is written to .bufConnWriter.
   435  			c.flush()
   436  		case <-c.chStatsTimer.C:
   437  			for _, channel := range c.channels {
   438  				channel.updateStats()
   439  			}
   440  		case <-c.pingTimer.C:
   441  			c.Logger.Debug("Send Ping")
   442  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{}))
   443  			if err != nil {
   444  				c.Logger.Error("Failed to send PacketPing", "err", err)
   445  				break SELECTION
   446  			}
   447  			c.sendMonitor.Update(_n)
   448  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   449  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   450  				select {
   451  				case c.pongTimeoutCh <- true:
   452  				default:
   453  				}
   454  			})
   455  			c.flush()
   456  		case timeout := <-c.pongTimeoutCh:
   457  			if timeout {
   458  				c.Logger.Debug("Pong timeout")
   459  				err = errors.New("pong timeout")
   460  			} else {
   461  				c.stopPongTimer()
   462  			}
   463  		case <-c.pong:
   464  			c.Logger.Debug("Send Pong")
   465  			_n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{}))
   466  			if err != nil {
   467  				c.Logger.Error("Failed to send PacketPong", "err", err)
   468  				break SELECTION
   469  			}
   470  			c.sendMonitor.Update(_n)
   471  			c.flush()
   472  		case <-c.quitSendRoutine:
   473  			break FOR_LOOP
   474  		case <-c.send:
   475  			// Send some PacketMsgs
   476  			eof := c.sendSomePacketMsgs()
   477  			if !eof {
   478  				// Keep sendRoutine awake.
   479  				select {
   480  				case c.send <- struct{}{}:
   481  				default:
   482  				}
   483  			}
   484  		}
   485  
   486  		if !c.IsRunning() {
   487  			break FOR_LOOP
   488  		}
   489  		if err != nil {
   490  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   491  			c.stopForError(err)
   492  			break FOR_LOOP
   493  		}
   494  	}
   495  
   496  	// Cleanup
   497  	c.stopPongTimer()
   498  	close(c.doneSendRoutine)
   499  }
   500  
   501  // Returns true if messages from channels were exhausted.
   502  // Blocks in accordance to .sendMonitor throttling.
   503  func (c *MConnection) sendSomePacketMsgs() bool {
   504  	// Block until .sendMonitor says we can write.
   505  	// Once we're ready we send more than we asked for,
   506  	// but amortized it should even out.
   507  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   508  
   509  	// Now send some PacketMsgs.
   510  	for i := 0; i < numBatchPacketMsgs; i++ {
   511  		if c.sendPacketMsg() {
   512  			return true
   513  		}
   514  	}
   515  	return false
   516  }
   517  
   518  // Returns true if messages from channels were exhausted.
   519  func (c *MConnection) sendPacketMsg() bool {
   520  	// Choose a channel to create a PacketMsg from.
   521  	// The chosen channel will be the one whose recentlySent/priority is the least.
   522  	var leastRatio float32 = math.MaxFloat32
   523  	var leastChannel *Channel
   524  	for _, channel := range c.channels {
   525  		// If nothing to send, skip this channel
   526  		if !channel.isSendPending() {
   527  			continue
   528  		}
   529  		// Get ratio, and keep track of lowest ratio.
   530  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   531  		if ratio < leastRatio {
   532  			leastRatio = ratio
   533  			leastChannel = channel
   534  		}
   535  	}
   536  
   537  	// Nothing to send?
   538  	if leastChannel == nil {
   539  		return true
   540  	}
   541  	// c.Logger.Info("Found a msgPacket to send")
   542  
   543  	// Make & send a PacketMsg from this channel
   544  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   545  	if err != nil {
   546  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   547  		c.stopForError(err)
   548  		return true
   549  	}
   550  	c.sendMonitor.Update(_n)
   551  	c.flushTimer.Set()
   552  	return false
   553  }
   554  
   555  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   556  // After a whole message has been assembled, it's pushed to onReceive().
   557  // Blocks depending on how the connection is throttled.
   558  // Otherwise, it never blocks.
   559  func (c *MConnection) recvRoutine() {
   560  	defer c._recover()
   561  
   562  	protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize)
   563  
   564  FOR_LOOP:
   565  	for {
   566  		// Block until .recvMonitor says we can read.
   567  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   568  
   569  		// Peek into bufConnReader for debugging
   570  		/*
   571  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   572  				bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   573  				if err == nil {
   574  					// return
   575  				} else {
   576  					c.Logger.Debug("Error peeking connection buffer", "err", err)
   577  					// return nil
   578  				}
   579  				c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   580  			}
   581  		*/
   582  
   583  		// Read packet type
   584  		var packet tmp2p.Packet
   585  
   586  		_n, err := protoReader.ReadMsg(&packet)
   587  		c.recvMonitor.Update(_n)
   588  		if err != nil {
   589  			// stopServices was invoked and we are shutting down
   590  			// receiving is excpected to fail since we will close the connection
   591  			select {
   592  			case <-c.quitRecvRoutine:
   593  				break FOR_LOOP
   594  			default:
   595  			}
   596  
   597  			if c.IsRunning() {
   598  				if err == io.EOF {
   599  					c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   600  				} else {
   601  					c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   602  				}
   603  				c.stopForError(err)
   604  			}
   605  			break FOR_LOOP
   606  		}
   607  
   608  		// Read more depending on packet type.
   609  		switch pkt := packet.Sum.(type) {
   610  		case *tmp2p.Packet_PacketPing:
   611  			// TODO: prevent abuse, as they cause flush()'s.
   612  			// https://github.com/soomindae/tendermint/issues/1190
   613  			c.Logger.Debug("Receive Ping")
   614  			select {
   615  			case c.pong <- struct{}{}:
   616  			default:
   617  				// never block
   618  			}
   619  		case *tmp2p.Packet_PacketPong:
   620  			c.Logger.Debug("Receive Pong")
   621  			select {
   622  			case c.pongTimeoutCh <- false:
   623  			default:
   624  				// never block
   625  			}
   626  		case *tmp2p.Packet_PacketMsg:
   627  			channel, ok := c.channelsIdx[byte(pkt.PacketMsg.ChannelID)]
   628  			if !ok || channel == nil {
   629  				err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID)
   630  				c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   631  				c.stopForError(err)
   632  				break FOR_LOOP
   633  			}
   634  
   635  			msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg)
   636  			if err != nil {
   637  				if c.IsRunning() {
   638  					c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err)
   639  					c.stopForError(err)
   640  				}
   641  				break FOR_LOOP
   642  			}
   643  			if msgBytes != nil {
   644  				c.Logger.Debug("Received bytes", "chID", pkt.PacketMsg.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes))
   645  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   646  				c.onReceive(byte(pkt.PacketMsg.ChannelID), msgBytes)
   647  			}
   648  		default:
   649  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   650  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   651  			c.stopForError(err)
   652  			break FOR_LOOP
   653  		}
   654  	}
   655  
   656  	// Cleanup
   657  	close(c.pong)
   658  	for range c.pong {
   659  		// Drain
   660  	}
   661  }
   662  
   663  // not goroutine-safe
   664  func (c *MConnection) stopPongTimer() {
   665  	if c.pongTimer != nil {
   666  		_ = c.pongTimer.Stop()
   667  		c.pongTimer = nil
   668  	}
   669  }
   670  
   671  // maxPacketMsgSize returns a maximum size of PacketMsg
   672  func (c *MConnection) maxPacketMsgSize() int {
   673  	bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{
   674  		ChannelID: 0x01,
   675  		EOF:       true,
   676  		Data:      make([]byte, c.config.MaxPacketMsgPayloadSize),
   677  	}))
   678  	if err != nil {
   679  		panic(err)
   680  	}
   681  	return len(bz)
   682  }
   683  
   684  type ConnectionStatus struct {
   685  	Duration    time.Duration
   686  	SendMonitor flow.Status
   687  	RecvMonitor flow.Status
   688  	Channels    []ChannelStatus
   689  }
   690  
   691  type ChannelStatus struct {
   692  	ID                byte
   693  	SendQueueCapacity int
   694  	SendQueueSize     int
   695  	Priority          int
   696  	RecentlySent      int64
   697  }
   698  
   699  func (c *MConnection) Status() ConnectionStatus {
   700  	var status ConnectionStatus
   701  	status.Duration = time.Since(c.created)
   702  	status.SendMonitor = c.sendMonitor.Status()
   703  	status.RecvMonitor = c.recvMonitor.Status()
   704  	status.Channels = make([]ChannelStatus, len(c.channels))
   705  	for i, channel := range c.channels {
   706  		status.Channels[i] = ChannelStatus{
   707  			ID:                channel.desc.ID,
   708  			SendQueueCapacity: cap(channel.sendQueue),
   709  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   710  			Priority:          channel.desc.Priority,
   711  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   712  		}
   713  	}
   714  	return status
   715  }
   716  
   717  //-----------------------------------------------------------------------------
   718  
   719  type ChannelDescriptor struct {
   720  	ID                  byte
   721  	Priority            int
   722  	SendQueueCapacity   int
   723  	RecvBufferCapacity  int
   724  	RecvMessageCapacity int
   725  }
   726  
   727  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   728  	if chDesc.SendQueueCapacity == 0 {
   729  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   730  	}
   731  	if chDesc.RecvBufferCapacity == 0 {
   732  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   733  	}
   734  	if chDesc.RecvMessageCapacity == 0 {
   735  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   736  	}
   737  	filled = chDesc
   738  	return
   739  }
   740  
   741  // TODO: lowercase.
   742  // NOTE: not goroutine-safe.
   743  type Channel struct {
   744  	conn          *MConnection
   745  	desc          ChannelDescriptor
   746  	sendQueue     chan []byte
   747  	sendQueueSize int32 // atomic.
   748  	recving       []byte
   749  	sending       []byte
   750  	recentlySent  int64 // exponential moving average
   751  
   752  	maxPacketMsgPayloadSize int
   753  
   754  	Logger log.Logger
   755  }
   756  
   757  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   758  	desc = desc.FillDefaults()
   759  	if desc.Priority <= 0 {
   760  		panic("Channel default priority must be a positive integer")
   761  	}
   762  	return &Channel{
   763  		conn:                    conn,
   764  		desc:                    desc,
   765  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   766  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   767  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   768  	}
   769  }
   770  
   771  func (ch *Channel) SetLogger(l log.Logger) {
   772  	ch.Logger = l
   773  }
   774  
   775  // Queues message to send to this channel.
   776  // Goroutine-safe
   777  // Times out (and returns false) after defaultSendTimeout
   778  func (ch *Channel) sendBytes(bytes []byte) bool {
   779  	select {
   780  	case ch.sendQueue <- bytes:
   781  		atomic.AddInt32(&ch.sendQueueSize, 1)
   782  		return true
   783  	case <-time.After(defaultSendTimeout):
   784  		return false
   785  	}
   786  }
   787  
   788  // Queues message to send to this channel.
   789  // Nonblocking, returns true if successful.
   790  // Goroutine-safe
   791  func (ch *Channel) trySendBytes(bytes []byte) bool {
   792  	select {
   793  	case ch.sendQueue <- bytes:
   794  		atomic.AddInt32(&ch.sendQueueSize, 1)
   795  		return true
   796  	default:
   797  		return false
   798  	}
   799  }
   800  
   801  // Goroutine-safe
   802  func (ch *Channel) loadSendQueueSize() (size int) {
   803  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   804  }
   805  
   806  // Goroutine-safe
   807  // Use only as a heuristic.
   808  func (ch *Channel) canSend() bool {
   809  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   810  }
   811  
   812  // Returns true if any PacketMsgs are pending to be sent.
   813  // Call before calling nextPacketMsg()
   814  // Goroutine-safe
   815  func (ch *Channel) isSendPending() bool {
   816  	if len(ch.sending) == 0 {
   817  		if len(ch.sendQueue) == 0 {
   818  			return false
   819  		}
   820  		ch.sending = <-ch.sendQueue
   821  	}
   822  	return true
   823  }
   824  
   825  // Creates a new PacketMsg to send.
   826  // Not goroutine-safe
   827  func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg {
   828  	packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)}
   829  	maxSize := ch.maxPacketMsgPayloadSize
   830  	packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   831  	if len(ch.sending) <= maxSize {
   832  		packet.EOF = true
   833  		ch.sending = nil
   834  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   835  	} else {
   836  		packet.EOF = false
   837  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   838  	}
   839  	return packet
   840  }
   841  
   842  // Writes next PacketMsg to w and updates c.recentlySent.
   843  // Not goroutine-safe
   844  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) {
   845  	packet := ch.nextPacketMsg()
   846  	n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet))
   847  	atomic.AddInt64(&ch.recentlySent, int64(n))
   848  	return
   849  }
   850  
   851  // Handles incoming PacketMsgs. It returns a message bytes if message is
   852  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   853  // Not goroutine-safe
   854  func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) {
   855  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   856  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data)
   857  	if recvCap < recvReceived {
   858  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   859  	}
   860  	ch.recving = append(ch.recving, packet.Data...)
   861  	if packet.EOF {
   862  		msgBytes := ch.recving
   863  
   864  		// clear the slice without re-allocating.
   865  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   866  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   867  		//	at which point the recving slice stops being used and should be garbage collected
   868  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   869  		return msgBytes, nil
   870  	}
   871  	return nil, nil
   872  }
   873  
   874  // Call this periodically to update stats for throttling purposes.
   875  // Not goroutine-safe
   876  func (ch *Channel) updateStats() {
   877  	// Exponential decay of stats.
   878  	// TODO: optimize.
   879  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   880  }
   881  
   882  //----------------------------------------
   883  // Packet
   884  
   885  // mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message.
   886  func mustWrapPacket(pb proto.Message) *tmp2p.Packet {
   887  	var msg tmp2p.Packet
   888  
   889  	switch pb := pb.(type) {
   890  	case *tmp2p.Packet: // already a packet
   891  		msg = *pb
   892  	case *tmp2p.PacketPing:
   893  		msg = tmp2p.Packet{
   894  			Sum: &tmp2p.Packet_PacketPing{
   895  				PacketPing: pb,
   896  			},
   897  		}
   898  	case *tmp2p.PacketPong:
   899  		msg = tmp2p.Packet{
   900  			Sum: &tmp2p.Packet_PacketPong{
   901  				PacketPong: pb,
   902  			},
   903  		}
   904  	case *tmp2p.PacketMsg:
   905  		msg = tmp2p.Packet{
   906  			Sum: &tmp2p.Packet_PacketMsg{
   907  				PacketMsg: pb,
   908  			},
   909  		}
   910  	default:
   911  		panic(fmt.Errorf("unknown packet type %T", pb))
   912  	}
   913  
   914  	return &msg
   915  }