github.com/supragya/TendermintConnector@v0.0.0-20210619045051-113e32b84fb1/_deprecated_chains/irisnet/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"math"
     9  	"net"
    10  	"reflect"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	cmn "github.com/supragya/TendermintConnector/chains/irisnet/libs/common"
    16  	flow "github.com/supragya/TendermintConnector/chains/irisnet/libs/flowrate"
    17  	amino "github.com/tendermint/go-amino"
    18  	"github.com/tendermint/tendermint/libs/log"
    19  )
    20  
    21  const (
    22  	defaultMaxPacketMsgPayloadSize = 1024
    23  
    24  	numBatchPacketMsgs = 10
    25  	minReadBufferSize  = 1024
    26  	minWriteBufferSize = 65536
    27  	updateStats        = 2 * time.Second
    28  
    29  	// some of these defaults are written in the user config
    30  	// flushThrottle, sendRate, recvRate
    31  	defaultFlushThrottle = 100 * time.Millisecond
    32  
    33  	defaultSendQueueCapacity   = 1
    34  	defaultRecvBufferCapacity  = 4096
    35  	defaultRecvMessageCapacity = 22020096      // 21MB
    36  	defaultSendRate            = int64(512000) // 500KB/s
    37  	defaultRecvRate            = int64(512000) // 500KB/s
    38  	defaultSendTimeout         = 10 * time.Second
    39  	defaultPingInterval        = 60 * time.Second
    40  	defaultPongTimeout         = 45 * time.Second
    41  )
    42  
    43  type receiveCbFunc func(chID byte, msgBytes []byte)
    44  type errorCbFunc func(interface{})
    45  
    46  /*
    47  Each peer has one `MConnection` (multiplex connection) instance.
    48  
    49  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    50  several messages along a single channel of communication.
    51  
    52  Each `MConnection` handles message transmission on multiple abstract communication
    53  `Channel`s.  Each channel has a globally unique byte id.
    54  The byte id and the relative priorities of each `Channel` are configured upon
    55  initialization of the connection.
    56  
    57  There are two methods for sending messages:
    58  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    59  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    60  
    61  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    62  successfully queued for the channel with the given id byte `chID`, or until the
    63  request times out.  The message `msg` is serialized using Go-Amino.
    64  
    65  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    66  channel's queue is full.
    67  
    68  Inbound message bytes are handled with an onReceive callback function.
    69  */
    70  type MConnection struct {
    71  	cmn.BaseService
    72  
    73  	conn          net.Conn
    74  	bufConnReader *bufio.Reader
    75  	bufConnWriter *bufio.Writer
    76  	sendMonitor   *flow.Monitor
    77  	recvMonitor   *flow.Monitor
    78  	send          chan struct{}
    79  	pong          chan struct{}
    80  	channels      []*Channel
    81  	channelsIdx   map[byte]*Channel
    82  	onReceive     receiveCbFunc
    83  	onError       errorCbFunc
    84  	errored       uint32
    85  	config        MConnConfig
    86  
    87  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    88  	// doneSendRoutine is closed when the sendRoutine actually quits.
    89  	quitSendRoutine chan struct{}
    90  	doneSendRoutine chan struct{}
    91  
    92  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
    93  	quitRecvRoutine chan struct{}
    94  
    95  	// used to ensure FlushStop and OnStop
    96  	// are safe to call concurrently.
    97  	stopMtx sync.Mutex
    98  
    99  	flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
   100  	pingTimer  *time.Ticker       // send pings periodically
   101  
   102  	// close conn if pong is not received in pongTimeout
   103  	pongTimer     *time.Timer
   104  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   105  
   106  	chStatsTimer *time.Ticker // update channel stats periodically
   107  
   108  	created time.Time // time of creation
   109  
   110  	_maxPacketMsgSize int
   111  }
   112  
   113  // MConnConfig is a MConnection configuration.
   114  type MConnConfig struct {
   115  	SendRate int64 `mapstructure:"send_rate"`
   116  	RecvRate int64 `mapstructure:"recv_rate"`
   117  
   118  	// Maximum payload size
   119  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   120  
   121  	// Interval to flush writes (throttled)
   122  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   123  
   124  	// Interval to send pings
   125  	PingInterval time.Duration `mapstructure:"ping_interval"`
   126  
   127  	// Maximum wait time for pongs
   128  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   129  }
   130  
   131  // DefaultMConnConfig returns the default config.
   132  func DefaultMConnConfig() MConnConfig {
   133  	return MConnConfig{
   134  		SendRate:                defaultSendRate,
   135  		RecvRate:                defaultRecvRate,
   136  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   137  		FlushThrottle:           defaultFlushThrottle,
   138  		PingInterval:            defaultPingInterval,
   139  		PongTimeout:             defaultPongTimeout,
   140  	}
   141  }
   142  
   143  // NewMConnection wraps net.Conn and creates multiplex connection
   144  func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection {
   145  	return NewMConnectionWithConfig(
   146  		conn,
   147  		chDescs,
   148  		onReceive,
   149  		onError,
   150  		DefaultMConnConfig())
   151  }
   152  
   153  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   154  func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection {
   155  	if config.PongTimeout >= config.PingInterval {
   156  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   157  	}
   158  
   159  	mconn := &MConnection{
   160  		conn:          conn,
   161  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   162  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   163  		sendMonitor:   flow.New(0, 0),
   164  		recvMonitor:   flow.New(0, 0),
   165  		send:          make(chan struct{}, 1),
   166  		pong:          make(chan struct{}, 1),
   167  		onReceive:     onReceive,
   168  		onError:       onError,
   169  		config:        config,
   170  		created:       time.Now(),
   171  	}
   172  
   173  	// Create channels
   174  	var channelsIdx = map[byte]*Channel{}
   175  	var channels = []*Channel{}
   176  
   177  	for _, desc := range chDescs {
   178  		channel := newChannel(mconn, *desc)
   179  		channelsIdx[channel.desc.ID] = channel
   180  		channels = append(channels, channel)
   181  	}
   182  	mconn.channels = channels
   183  	mconn.channelsIdx = channelsIdx
   184  
   185  	mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn)
   186  
   187  	// maxPacketMsgSize() is a bit heavy, so call just once
   188  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   189  
   190  	return mconn
   191  }
   192  
   193  func (c *MConnection) SetLogger(l log.Logger) {
   194  	c.BaseService.SetLogger(l)
   195  	for _, ch := range c.channels {
   196  		ch.SetLogger(l)
   197  	}
   198  }
   199  
   200  // OnStart implements BaseService
   201  func (c *MConnection) OnStart() error {
   202  	if err := c.BaseService.OnStart(); err != nil {
   203  		return err
   204  	}
   205  	c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle)
   206  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   207  	c.pongTimeoutCh = make(chan bool, 1)
   208  	c.chStatsTimer = time.NewTicker(updateStats)
   209  	c.quitSendRoutine = make(chan struct{})
   210  	c.doneSendRoutine = make(chan struct{})
   211  	c.quitRecvRoutine = make(chan struct{})
   212  	go c.sendRoutine()
   213  	go c.recvRoutine()
   214  	return nil
   215  }
   216  
   217  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   218  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   219  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   220  func (c *MConnection) stopServices() (alreadyStopped bool) {
   221  	c.stopMtx.Lock()
   222  	defer c.stopMtx.Unlock()
   223  
   224  	select {
   225  	case <-c.quitSendRoutine:
   226  		// already quit
   227  		return true
   228  	default:
   229  	}
   230  
   231  	select {
   232  	case <-c.quitRecvRoutine:
   233  		// already quit
   234  		return true
   235  	default:
   236  	}
   237  
   238  	c.BaseService.OnStop()
   239  	c.flushTimer.Stop()
   240  	c.pingTimer.Stop()
   241  	c.chStatsTimer.Stop()
   242  
   243  	// inform the recvRouting that we are shutting down
   244  	close(c.quitRecvRoutine)
   245  	close(c.quitSendRoutine)
   246  	return false
   247  }
   248  
   249  // FlushStop replicates the logic of OnStop.
   250  // It additionally ensures that all successful
   251  // .Send() calls will get flushed before closing
   252  // the connection.
   253  func (c *MConnection) FlushStop() {
   254  	if c.stopServices() {
   255  		return
   256  	}
   257  
   258  	// this block is unique to FlushStop
   259  	{
   260  		// wait until the sendRoutine exits
   261  		// so we dont race on calling sendSomePacketMsgs
   262  		<-c.doneSendRoutine
   263  
   264  		// Send and flush all pending msgs.
   265  		// Since sendRoutine has exited, we can call this
   266  		// safely
   267  		eof := c.sendSomePacketMsgs()
   268  		for !eof {
   269  			eof = c.sendSomePacketMsgs()
   270  		}
   271  		c.flush()
   272  
   273  		// Now we can close the connection
   274  	}
   275  
   276  	c.conn.Close() // nolint: errcheck
   277  
   278  	// We can't close pong safely here because
   279  	// recvRoutine may write to it after we've stopped.
   280  	// Though it doesn't need to get closed at all,
   281  	// we close it @ recvRoutine.
   282  
   283  	// c.Stop()
   284  }
   285  
   286  // OnStop implements BaseService
   287  func (c *MConnection) OnStop() {
   288  	if c.stopServices() {
   289  		return
   290  	}
   291  
   292  	c.conn.Close() // nolint: errcheck
   293  
   294  	// We can't close pong safely here because
   295  	// recvRoutine may write to it after we've stopped.
   296  	// Though it doesn't need to get closed at all,
   297  	// we close it @ recvRoutine.
   298  }
   299  
   300  func (c *MConnection) String() string {
   301  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   302  }
   303  
   304  func (c *MConnection) flush() {
   305  	c.Logger.Debug("Flush", "conn", c)
   306  	err := c.bufConnWriter.Flush()
   307  	if err != nil {
   308  		c.Logger.Error("MConnection flush failed", "err", err)
   309  	}
   310  }
   311  
   312  // Catch panics, usually caused by remote disconnects.
   313  func (c *MConnection) _recover() {
   314  	if r := recover(); r != nil {
   315  		err := cmn.ErrorWrap(r, "recovered panic in MConnection")
   316  		c.stopForError(err)
   317  	}
   318  }
   319  
   320  func (c *MConnection) stopForError(r interface{}) {
   321  	c.Stop()
   322  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   323  		if c.onError != nil {
   324  			c.onError(r)
   325  		}
   326  	}
   327  }
   328  
   329  // Queues a message to be sent to channel.
   330  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   331  	if !c.IsRunning() {
   332  		return false
   333  	}
   334  
   335  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   336  
   337  	// Send message to channel.
   338  	channel, ok := c.channelsIdx[chID]
   339  	if !ok {
   340  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   341  		return false
   342  	}
   343  
   344  	success := channel.sendBytes(msgBytes)
   345  	if success {
   346  		// Wake up sendRoutine if necessary
   347  		select {
   348  		case c.send <- struct{}{}:
   349  		default:
   350  		}
   351  	} else {
   352  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   353  	}
   354  	return success
   355  }
   356  
   357  // Queues a message to be sent to channel.
   358  // Nonblocking, returns true if successful.
   359  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   360  	if !c.IsRunning() {
   361  		return false
   362  	}
   363  
   364  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   365  
   366  	// Send message to channel.
   367  	channel, ok := c.channelsIdx[chID]
   368  	if !ok {
   369  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   370  		return false
   371  	}
   372  
   373  	ok = channel.trySendBytes(msgBytes)
   374  	if ok {
   375  		// Wake up sendRoutine if necessary
   376  		select {
   377  		case c.send <- struct{}{}:
   378  		default:
   379  		}
   380  	}
   381  
   382  	return ok
   383  }
   384  
   385  // CanSend returns true if you can send more data onto the chID, false
   386  // otherwise. Use only as a heuristic.
   387  func (c *MConnection) CanSend(chID byte) bool {
   388  	if !c.IsRunning() {
   389  		return false
   390  	}
   391  
   392  	channel, ok := c.channelsIdx[chID]
   393  	if !ok {
   394  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   395  		return false
   396  	}
   397  	return channel.canSend()
   398  }
   399  
   400  // sendRoutine polls for packets to send from channels.
   401  func (c *MConnection) sendRoutine() {
   402  	defer c._recover()
   403  
   404  FOR_LOOP:
   405  	for {
   406  		var _n int64
   407  		var err error
   408  	SELECTION:
   409  		select {
   410  		case <-c.flushTimer.Ch:
   411  			// NOTE: flushTimer.Set() must be called every time
   412  			// something is written to .bufConnWriter.
   413  			c.flush()
   414  		case <-c.chStatsTimer.C:
   415  			for _, channel := range c.channels {
   416  				channel.updateStats()
   417  			}
   418  		case <-c.pingTimer.C:
   419  			c.Logger.Debug("Send Ping")
   420  			_n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{})
   421  			if err != nil {
   422  				break SELECTION
   423  			}
   424  			c.sendMonitor.Update(int(_n))
   425  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   426  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   427  				select {
   428  				case c.pongTimeoutCh <- true:
   429  				default:
   430  				}
   431  			})
   432  			c.flush()
   433  		case timeout := <-c.pongTimeoutCh:
   434  			if timeout {
   435  				c.Logger.Debug("Pong timeout")
   436  				err = errors.New("pong timeout")
   437  			} else {
   438  				c.stopPongTimer()
   439  			}
   440  		case <-c.pong:
   441  			c.Logger.Debug("Send Pong")
   442  			_n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{})
   443  			if err != nil {
   444  				break SELECTION
   445  			}
   446  			c.sendMonitor.Update(int(_n))
   447  			c.flush()
   448  		case <-c.quitSendRoutine:
   449  			break FOR_LOOP
   450  		case <-c.send:
   451  			// Send some PacketMsgs
   452  			eof := c.sendSomePacketMsgs()
   453  			if !eof {
   454  				// Keep sendRoutine awake.
   455  				select {
   456  				case c.send <- struct{}{}:
   457  				default:
   458  				}
   459  			}
   460  		}
   461  
   462  		if !c.IsRunning() {
   463  			break FOR_LOOP
   464  		}
   465  		if err != nil {
   466  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   467  			c.stopForError(err)
   468  			break FOR_LOOP
   469  		}
   470  	}
   471  
   472  	// Cleanup
   473  	c.stopPongTimer()
   474  	close(c.doneSendRoutine)
   475  }
   476  
   477  // Returns true if messages from channels were exhausted.
   478  // Blocks in accordance to .sendMonitor throttling.
   479  func (c *MConnection) sendSomePacketMsgs() bool {
   480  	// Block until .sendMonitor says we can write.
   481  	// Once we're ready we send more than we asked for,
   482  	// but amortized it should even out.
   483  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   484  
   485  	// Now send some PacketMsgs.
   486  	for i := 0; i < numBatchPacketMsgs; i++ {
   487  		if c.sendPacketMsg() {
   488  			return true
   489  		}
   490  	}
   491  	return false
   492  }
   493  
   494  // Returns true if messages from channels were exhausted.
   495  func (c *MConnection) sendPacketMsg() bool {
   496  	// Choose a channel to create a PacketMsg from.
   497  	// The chosen channel will be the one whose recentlySent/priority is the least.
   498  	var leastRatio float32 = math.MaxFloat32
   499  	var leastChannel *Channel
   500  	for _, channel := range c.channels {
   501  		// If nothing to send, skip this channel
   502  		if !channel.isSendPending() {
   503  			continue
   504  		}
   505  		// Get ratio, and keep track of lowest ratio.
   506  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   507  		if ratio < leastRatio {
   508  			leastRatio = ratio
   509  			leastChannel = channel
   510  		}
   511  	}
   512  
   513  	// Nothing to send?
   514  	if leastChannel == nil {
   515  		return true
   516  	}
   517  	// c.Logger.Info("Found a msgPacket to send")
   518  
   519  	// Make & send a PacketMsg from this channel
   520  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   521  	if err != nil {
   522  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   523  		c.stopForError(err)
   524  		return true
   525  	}
   526  	c.sendMonitor.Update(int(_n))
   527  	c.flushTimer.Set()
   528  	return false
   529  }
   530  
   531  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   532  // After a whole message has been assembled, it's pushed to onReceive().
   533  // Blocks depending on how the connection is throttled.
   534  // Otherwise, it never blocks.
   535  func (c *MConnection) recvRoutine() {
   536  	defer c._recover()
   537  
   538  FOR_LOOP:
   539  	for {
   540  		// Block until .recvMonitor says we can read.
   541  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   542  
   543  		// Peek into bufConnReader for debugging
   544  		/*
   545  			if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   546  				bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100))
   547  				if err == nil {
   548  					// return
   549  				} else {
   550  					c.Logger.Debug("Error peeking connection buffer", "err", err)
   551  					// return nil
   552  				}
   553  				c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   554  			}
   555  		*/
   556  
   557  		// Read packet type
   558  		var packet Packet
   559  		var _n int64
   560  		var err error
   561  		_n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize))
   562  		c.recvMonitor.Update(int(_n))
   563  
   564  		if err != nil {
   565  			// stopServices was invoked and we are shutting down
   566  			// receiving is excpected to fail since we will close the connection
   567  			select {
   568  			case <-c.quitRecvRoutine:
   569  				break FOR_LOOP
   570  			default:
   571  			}
   572  
   573  			if c.IsRunning() {
   574  				if err == io.EOF {
   575  					c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   576  				} else {
   577  					c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   578  				}
   579  				c.stopForError(err)
   580  			}
   581  			break FOR_LOOP
   582  		}
   583  
   584  		// Read more depending on packet type.
   585  		switch pkt := packet.(type) {
   586  		case PacketPing:
   587  			c.Logger.Debug("Receive Ping")
   588  			select {
   589  			case c.pong <- struct{}{}:
   590  			default:
   591  				// never block
   592  			}
   593  		case PacketPong:
   594  			c.Logger.Debug("Receive Pong")
   595  			select {
   596  			case c.pongTimeoutCh <- false:
   597  			default:
   598  				// never block
   599  			}
   600  		case PacketMsg:
   601  			channel, ok := c.channelsIdx[pkt.ChannelID]
   602  			if !ok || channel == nil {
   603  				err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
   604  				c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   605  				c.stopForError(err)
   606  				break FOR_LOOP
   607  			}
   608  
   609  			msgBytes, err := channel.recvPacketMsg(pkt)
   610  			if err != nil {
   611  				if c.IsRunning() {
   612  					c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   613  					c.stopForError(err)
   614  				}
   615  				break FOR_LOOP
   616  			}
   617  			if msgBytes != nil {
   618  				c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes))
   619  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   620  				c.onReceive(pkt.ChannelID, msgBytes)
   621  			}
   622  		default:
   623  			err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet))
   624  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   625  			c.stopForError(err)
   626  			break FOR_LOOP
   627  		}
   628  	}
   629  
   630  	// Cleanup
   631  	close(c.pong)
   632  	for range c.pong {
   633  		// Drain
   634  	}
   635  }
   636  
   637  // not goroutine-safe
   638  func (c *MConnection) stopPongTimer() {
   639  	if c.pongTimer != nil {
   640  		_ = c.pongTimer.Stop()
   641  		c.pongTimer = nil
   642  	}
   643  }
   644  
   645  // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead
   646  // of amino encoding.
   647  func (c *MConnection) maxPacketMsgSize() int {
   648  	return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{
   649  		ChannelID: 0x01,
   650  		EOF:       1,
   651  		Bytes:     make([]byte, c.config.MaxPacketMsgPayloadSize),
   652  	})) + 10 // leave room for changes in amino
   653  }
   654  
   655  type ConnectionStatus struct {
   656  	Duration    time.Duration
   657  	SendMonitor flow.Status
   658  	RecvMonitor flow.Status
   659  	Channels    []ChannelStatus
   660  }
   661  
   662  type ChannelStatus struct {
   663  	ID                byte
   664  	SendQueueCapacity int
   665  	SendQueueSize     int
   666  	Priority          int
   667  	RecentlySent      int64
   668  }
   669  
   670  func (c *MConnection) Status() ConnectionStatus {
   671  	var status ConnectionStatus
   672  	status.Duration = time.Since(c.created)
   673  	status.SendMonitor = c.sendMonitor.Status()
   674  	status.RecvMonitor = c.recvMonitor.Status()
   675  	status.Channels = make([]ChannelStatus, len(c.channels))
   676  	for i, channel := range c.channels {
   677  		status.Channels[i] = ChannelStatus{
   678  			ID:                channel.desc.ID,
   679  			SendQueueCapacity: cap(channel.sendQueue),
   680  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   681  			Priority:          channel.desc.Priority,
   682  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   683  		}
   684  	}
   685  	return status
   686  }
   687  
   688  //-----------------------------------------------------------------------------
   689  
   690  type ChannelDescriptor struct {
   691  	ID                  byte
   692  	Priority            int
   693  	SendQueueCapacity   int
   694  	RecvBufferCapacity  int
   695  	RecvMessageCapacity int
   696  }
   697  
   698  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   699  	if chDesc.SendQueueCapacity == 0 {
   700  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   701  	}
   702  	if chDesc.RecvBufferCapacity == 0 {
   703  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   704  	}
   705  	if chDesc.RecvMessageCapacity == 0 {
   706  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   707  	}
   708  	filled = chDesc
   709  	return
   710  }
   711  
   712  type Channel struct {
   713  	conn          *MConnection
   714  	desc          ChannelDescriptor
   715  	sendQueue     chan []byte
   716  	sendQueueSize int32 // atomic.
   717  	recving       []byte
   718  	sending       []byte
   719  	recentlySent  int64 // exponential moving average
   720  
   721  	maxPacketMsgPayloadSize int
   722  
   723  	Logger log.Logger
   724  }
   725  
   726  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   727  	desc = desc.FillDefaults()
   728  	if desc.Priority <= 0 {
   729  		panic("Channel default priority must be a positive integer")
   730  	}
   731  	return &Channel{
   732  		conn:                    conn,
   733  		desc:                    desc,
   734  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   735  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   736  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   737  	}
   738  }
   739  
   740  func (ch *Channel) SetLogger(l log.Logger) {
   741  	ch.Logger = l
   742  }
   743  
   744  // Queues message to send to this channel.
   745  // Goroutine-safe
   746  // Times out (and returns false) after defaultSendTimeout
   747  func (ch *Channel) sendBytes(bytes []byte) bool {
   748  	select {
   749  	case ch.sendQueue <- bytes:
   750  		atomic.AddInt32(&ch.sendQueueSize, 1)
   751  		return true
   752  	case <-time.After(defaultSendTimeout):
   753  		return false
   754  	}
   755  }
   756  
   757  // Queues message to send to this channel.
   758  // Nonblocking, returns true if successful.
   759  // Goroutine-safe
   760  func (ch *Channel) trySendBytes(bytes []byte) bool {
   761  	select {
   762  	case ch.sendQueue <- bytes:
   763  		atomic.AddInt32(&ch.sendQueueSize, 1)
   764  		return true
   765  	default:
   766  		return false
   767  	}
   768  }
   769  
   770  // Goroutine-safe
   771  func (ch *Channel) loadSendQueueSize() (size int) {
   772  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   773  }
   774  
   775  // Goroutine-safe
   776  // Use only as a heuristic.
   777  func (ch *Channel) canSend() bool {
   778  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   779  }
   780  
   781  // Returns true if any PacketMsgs are pending to be sent.
   782  // Call before calling nextPacketMsg()
   783  // Goroutine-safe
   784  func (ch *Channel) isSendPending() bool {
   785  	if len(ch.sending) == 0 {
   786  		if len(ch.sendQueue) == 0 {
   787  			return false
   788  		}
   789  		ch.sending = <-ch.sendQueue
   790  	}
   791  	return true
   792  }
   793  
   794  // Creates a new PacketMsg to send.
   795  // Not goroutine-safe
   796  func (ch *Channel) nextPacketMsg() PacketMsg {
   797  	packet := PacketMsg{}
   798  	packet.ChannelID = byte(ch.desc.ID)
   799  	maxSize := ch.maxPacketMsgPayloadSize
   800  	packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
   801  	if len(ch.sending) <= maxSize {
   802  		packet.EOF = byte(0x01)
   803  		ch.sending = nil
   804  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   805  	} else {
   806  		packet.EOF = byte(0x00)
   807  		ch.sending = ch.sending[cmn.MinInt(maxSize, len(ch.sending)):]
   808  	}
   809  	return packet
   810  }
   811  
   812  // Writes next PacketMsg to w and updates c.recentlySent.
   813  // Not goroutine-safe
   814  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) {
   815  	var packet = ch.nextPacketMsg()
   816  	n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet)
   817  	atomic.AddInt64(&ch.recentlySent, n)
   818  	return
   819  }
   820  
   821  // Handles incoming PacketMsgs. It returns a message bytes if message is
   822  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   823  // Not goroutine-safe
   824  func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) {
   825  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   826  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes)
   827  	if recvCap < recvReceived {
   828  		return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   829  	}
   830  	ch.recving = append(ch.recving, packet.Bytes...)
   831  	if packet.EOF == byte(0x01) {
   832  		msgBytes := ch.recving
   833  
   834  		// clear the slice without re-allocating.
   835  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   836  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   837  		//	at which point the recving slice stops being used and should be garbage collected
   838  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   839  		return msgBytes, nil
   840  	}
   841  	return nil, nil
   842  }
   843  
   844  // Call this periodically to update stats for throttling purposes.
   845  // Not goroutine-safe
   846  func (ch *Channel) updateStats() {
   847  	// Exponential decay of stats.
   848  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   849  }
   850  
   851  //----------------------------------------
   852  // Packet
   853  
   854  type Packet interface {
   855  	AssertIsPacket()
   856  }
   857  
   858  func RegisterPacket(cdc *amino.Codec) {
   859  	cdc.RegisterInterface((*Packet)(nil), nil)
   860  	cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil)
   861  	cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil)
   862  	cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil)
   863  }
   864  
   865  func (_ PacketPing) AssertIsPacket() {}
   866  func (_ PacketPong) AssertIsPacket() {}
   867  func (_ PacketMsg) AssertIsPacket()  {}
   868  
   869  type PacketPing struct {
   870  }
   871  
   872  type PacketPong struct {
   873  }
   874  
   875  type PacketMsg struct {
   876  	ChannelID byte
   877  	EOF       byte // 1 means message ends here.
   878  	Bytes     []byte
   879  }
   880  
   881  func (mp PacketMsg) String() string {
   882  	return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF)
   883  }