github.com/vipernet-xyz/tendermint-core@v0.32.0/p2p/conn/connection.go (about)

     1  package conn
     2  
     3  import (
     4  	"bufio"
     5  	"runtime/debug"
     6  
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"net"
    11  	"reflect"
    12  	"sync"
    13  	"sync/atomic"
    14  	"time"
    15  
    16  	"github.com/pkg/errors"
    17  
    18  	amino "github.com/tendermint/go-amino"
    19  
    20  	flow "github.com/tendermint/tendermint/libs/flowrate"
    21  	"github.com/tendermint/tendermint/libs/log"
    22  	tmmath "github.com/tendermint/tendermint/libs/math"
    23  	"github.com/tendermint/tendermint/libs/service"
    24  	"github.com/tendermint/tendermint/libs/timer"
    25  )
    26  
    27  const (
    28  	defaultMaxPacketMsgPayloadSize = 1024
    29  
    30  	numBatchPacketMsgs = 10
    31  	minReadBufferSize  = 1024
    32  	minWriteBufferSize = 65536
    33  	updateStats        = 2 * time.Second
    34  
    35  	// some of these defaults are written in the user config
    36  	// flushThrottle, sendRate, recvRate
    37  	// TODO: remove values present in config
    38  	defaultFlushThrottle = 100 * time.Millisecond
    39  
    40  	defaultSendQueueCapacity   = 1
    41  	defaultRecvBufferCapacity  = 4096
    42  	defaultRecvMessageCapacity = 22020096      // 21MB
    43  	defaultSendRate            = int64(512000) // 500KB/s
    44  	defaultRecvRate            = int64(512000) // 500KB/s
    45  	defaultSendTimeout         = 10 * time.Second
    46  	defaultPingInterval        = 60 * time.Second
    47  	defaultPongTimeout         = 45 * time.Second
    48  )
    49  
    50  type receiveCbFunc func(chID byte, msgBytes []byte)
    51  type errorCbFunc func(interface{})
    52  
    53  /*
    54  Each peer has one `MConnection` (multiplex connection) instance.
    55  
    56  __multiplex__ *noun* a system or signal involving simultaneous transmission of
    57  several messages along a single channel of communication.
    58  
    59  Each `MConnection` handles message transmission on multiple abstract communication
    60  `Channel`s.  Each channel has a globally unique byte id.
    61  The byte id and the relative priorities of each `Channel` are configured upon
    62  initialization of the connection.
    63  
    64  There are two methods for sending messages:
    65  	func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
    66  	func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
    67  
    68  `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
    69  successfully queued for the channel with the given id byte `chID`, or until the
    70  request times out.  The message `msg` is serialized using Go-Amino.
    71  
    72  `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
    73  channel's queue is full.
    74  
    75  Inbound message bytes are handled with an onReceive callback function.
    76  */
    77  type MConnection struct {
    78  	service.BaseService
    79  
    80  	conn          net.Conn
    81  	bufConnReader *bufio.Reader
    82  	bufConnWriter *bufio.Writer
    83  	sendMonitor   *flow.Monitor
    84  	recvMonitor   *flow.Monitor
    85  	send          chan struct{}
    86  	pong          chan struct{}
    87  	channels      []*Channel
    88  	channelsIdx   map[byte]*Channel
    89  	onReceive     receiveCbFunc
    90  	onError       errorCbFunc
    91  	errored       uint32
    92  	config        MConnConfig
    93  
    94  	// Closing quitSendRoutine will cause the sendRoutine to eventually quit.
    95  	// doneSendRoutine is closed when the sendRoutine actually quits.
    96  	quitSendRoutine chan struct{}
    97  	doneSendRoutine chan struct{}
    98  
    99  	// Closing quitRecvRouting will cause the recvRouting to eventually quit.
   100  	quitRecvRoutine chan struct{}
   101  
   102  	// used to ensure FlushStop and OnStop
   103  	// are safe to call concurrently.
   104  	stopMtx sync.Mutex
   105  
   106  	flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled.
   107  	pingTimer  *time.Ticker         // send pings periodically
   108  
   109  	// close conn if pong is not received in pongTimeout
   110  	pongTimer     *time.Timer
   111  	pongTimeoutCh chan bool // true - timeout, false - peer sent pong
   112  
   113  	chStatsTimer *time.Ticker // update channel stats periodically
   114  
   115  	created time.Time // time of creation
   116  
   117  	_maxPacketMsgSize int
   118  }
   119  
   120  // MConnConfig is a MConnection configuration.
   121  type MConnConfig struct {
   122  	SendRate int64 `mapstructure:"send_rate"`
   123  	RecvRate int64 `mapstructure:"recv_rate"`
   124  
   125  	// Maximum payload size
   126  	MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
   127  
   128  	// Interval to flush writes (throttled)
   129  	FlushThrottle time.Duration `mapstructure:"flush_throttle"`
   130  
   131  	// Interval to send pings
   132  	PingInterval time.Duration `mapstructure:"ping_interval"`
   133  
   134  	// Maximum wait time for pongs
   135  	PongTimeout time.Duration `mapstructure:"pong_timeout"`
   136  }
   137  
   138  // DefaultMConnConfig returns the default config.
   139  func DefaultMConnConfig() MConnConfig {
   140  	return MConnConfig{
   141  		SendRate:                defaultSendRate,
   142  		RecvRate:                defaultRecvRate,
   143  		MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize,
   144  		FlushThrottle:           defaultFlushThrottle,
   145  		PingInterval:            defaultPingInterval,
   146  		PongTimeout:             defaultPongTimeout,
   147  	}
   148  }
   149  
   150  // NewMConnection wraps net.Conn and creates multiplex connection
   151  func NewMConnection(
   152  	conn net.Conn,
   153  	chDescs []*ChannelDescriptor,
   154  	onReceive receiveCbFunc,
   155  	onError errorCbFunc,
   156  ) *MConnection {
   157  	return NewMConnectionWithConfig(
   158  		conn,
   159  		chDescs,
   160  		onReceive,
   161  		onError,
   162  		DefaultMConnConfig())
   163  }
   164  
   165  // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
   166  func NewMConnectionWithConfig(
   167  	conn net.Conn,
   168  	chDescs []*ChannelDescriptor,
   169  	onReceive receiveCbFunc,
   170  	onError errorCbFunc,
   171  	config MConnConfig,
   172  ) *MConnection {
   173  	if config.PongTimeout >= config.PingInterval {
   174  		panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
   175  	}
   176  
   177  	mconn := &MConnection{
   178  		conn:          conn,
   179  		bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
   180  		bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
   181  		sendMonitor:   flow.New(0, 0),
   182  		recvMonitor:   flow.New(0, 0),
   183  		send:          make(chan struct{}, 1),
   184  		pong:          make(chan struct{}, 1),
   185  		onReceive:     onReceive,
   186  		onError:       onError,
   187  		config:        config,
   188  		created:       time.Now(),
   189  	}
   190  
   191  	// Create channels
   192  	var channelsIdx = map[byte]*Channel{}
   193  	var channels = []*Channel{}
   194  
   195  	for _, desc := range chDescs {
   196  		channel := newChannel(mconn, *desc)
   197  		channelsIdx[channel.desc.ID] = channel
   198  		channels = append(channels, channel)
   199  	}
   200  	mconn.channels = channels
   201  	mconn.channelsIdx = channelsIdx
   202  
   203  	mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn)
   204  
   205  	// maxPacketMsgSize() is a bit heavy, so call just once
   206  	mconn._maxPacketMsgSize = mconn.maxPacketMsgSize()
   207  
   208  	return mconn
   209  }
   210  
   211  func (c *MConnection) SetLogger(l log.Logger) {
   212  	c.BaseService.SetLogger(l)
   213  	for _, ch := range c.channels {
   214  		ch.SetLogger(l)
   215  	}
   216  }
   217  
   218  // OnStart implements BaseService
   219  func (c *MConnection) OnStart() error {
   220  	if err := c.BaseService.OnStart(); err != nil {
   221  		return err
   222  	}
   223  	c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle)
   224  	c.pingTimer = time.NewTicker(c.config.PingInterval)
   225  	c.pongTimeoutCh = make(chan bool, 1)
   226  	c.chStatsTimer = time.NewTicker(updateStats)
   227  	c.quitSendRoutine = make(chan struct{})
   228  	c.doneSendRoutine = make(chan struct{})
   229  	c.quitRecvRoutine = make(chan struct{})
   230  	go c.sendRoutine()
   231  	go c.recvRoutine()
   232  	return nil
   233  }
   234  
   235  // stopServices stops the BaseService and timers and closes the quitSendRoutine.
   236  // if the quitSendRoutine was already closed, it returns true, otherwise it returns false.
   237  // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time.
   238  func (c *MConnection) stopServices() (alreadyStopped bool) {
   239  	c.stopMtx.Lock()
   240  	defer c.stopMtx.Unlock()
   241  
   242  	select {
   243  	case <-c.quitSendRoutine:
   244  		// already quit
   245  		return true
   246  	default:
   247  	}
   248  
   249  	select {
   250  	case <-c.quitRecvRoutine:
   251  		// already quit
   252  		return true
   253  	default:
   254  	}
   255  
   256  	c.Logger.Debug("Starting BaseService.OnStop()")
   257  	c.BaseService.OnStop()
   258  	c.Logger.Debug("Ending BaseService.OnStop")
   259  	c.Logger.Debug("Starting flushTimer.Stop()")
   260  	c.flushTimer.Stop()
   261  	c.Logger.Debug("Ending flushTimer.Stop()")
   262  	c.Logger.Debug("Starting pingTimer.Stop()")
   263  	c.pingTimer.Stop()
   264  	c.Logger.Debug("Ending pingTimer.Stop()")
   265  	c.Logger.Debug("Starting chStatsTimer.Stop()")
   266  	c.chStatsTimer.Stop()
   267  	c.Logger.Debug("Ending chStatsTimer.Stop()")
   268  
   269  	// inform the recvRouting that we are shutting down
   270  	close(c.quitRecvRoutine)
   271  	close(c.quitSendRoutine)
   272  	return false
   273  }
   274  
   275  // FlushStop replicates the logic of OnStop.
   276  // It additionally ensures that all successful
   277  // .Send() calls will get flushed before closing
   278  // the connection.
   279  func (c *MConnection) FlushStop() {
   280  	if c.stopServices() {
   281  		return
   282  	}
   283  
   284  	// this block is unique to FlushStop
   285  	{
   286  		// wait until the sendRoutine exits
   287  		// so we dont race on calling sendSomePacketMsgs
   288  		<-c.doneSendRoutine
   289  
   290  		// Send and flush all pending msgs.
   291  		// Since sendRoutine has exited, we can call this
   292  		// safely
   293  		eof := c.sendSomePacketMsgs()
   294  		for !eof {
   295  			eof = c.sendSomePacketMsgs()
   296  		}
   297  		c.flush()
   298  
   299  		// Now we can close the connection
   300  	}
   301  
   302  	c.conn.Close() // nolint: errcheck
   303  
   304  	// We can't close pong safely here because
   305  	// recvRoutine may write to it after we've stopped.
   306  	// Though it doesn't need to get closed at all,
   307  	// we close it @ recvRoutine.
   308  
   309  	// c.Stop()
   310  }
   311  
   312  // OnStop implements BaseService
   313  func (c *MConnection) OnStop() {
   314  	if c.stopServices() {
   315  		return
   316  	}
   317  
   318  	c.conn.Close() // nolint: errcheck
   319  
   320  	// We can't close pong safely here because
   321  	// recvRoutine may write to it after we've stopped.
   322  	// Though it doesn't need to get closed at all,
   323  	// we close it @ recvRoutine.
   324  }
   325  
   326  func (c *MConnection) String() string {
   327  	return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
   328  }
   329  
   330  func (c *MConnection) flush() {
   331  	c.Logger.Debug("Flush", "conn", c)
   332  	err := c.bufConnWriter.Flush()
   333  	if err != nil {
   334  		c.Logger.Error("MConnection flush failed", "err", err)
   335  	}
   336  }
   337  
   338  // Catch panics, usually caused by remote disconnects.
   339  func (c *MConnection) _recover() {
   340  	if r := recover(); r != nil {
   341  		c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack()))
   342  		c.stopForError(errors.Errorf("recovered from panic: %v", r))
   343  	}
   344  }
   345  
   346  func (c *MConnection) stopForError(r interface{}) {
   347  	c.Stop()
   348  	if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
   349  		if c.onError != nil {
   350  			c.onError(r)
   351  		}
   352  	}
   353  }
   354  
   355  // Queues a message to be sent to channel.
   356  func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
   357  	if !c.IsRunning() {
   358  		return false
   359  	}
   360  
   361  	c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   362  
   363  	// Send message to channel.
   364  	channel, ok := c.channelsIdx[chID]
   365  	if !ok {
   366  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   367  		return false
   368  	}
   369  
   370  	success := channel.sendBytes(msgBytes)
   371  	if success {
   372  		// Wake up sendRoutine if necessary
   373  		select {
   374  		case c.send <- struct{}{}:
   375  		default:
   376  		}
   377  	} else {
   378  		c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   379  	}
   380  	return success
   381  }
   382  
   383  // Queues a message to be sent to channel.
   384  // Nonblocking, returns true if successful.
   385  func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
   386  	if !c.IsRunning() {
   387  		return false
   388  	}
   389  
   390  	c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
   391  
   392  	// Send message to channel.
   393  	channel, ok := c.channelsIdx[chID]
   394  	if !ok {
   395  		c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
   396  		return false
   397  	}
   398  
   399  	ok = channel.trySendBytes(msgBytes)
   400  	if ok {
   401  		// Wake up sendRoutine if necessary
   402  		select {
   403  		case c.send <- struct{}{}:
   404  		default:
   405  		}
   406  	}
   407  
   408  	return ok
   409  }
   410  
   411  // CanSend returns true if you can send more data onto the chID, false
   412  // otherwise. Use only as a heuristic.
   413  func (c *MConnection) CanSend(chID byte) bool {
   414  	if !c.IsRunning() {
   415  		return false
   416  	}
   417  
   418  	channel, ok := c.channelsIdx[chID]
   419  	if !ok {
   420  		c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
   421  		return false
   422  	}
   423  	return channel.canSend()
   424  }
   425  
   426  // sendRoutine polls for packets to send from channels.
   427  func (c *MConnection) sendRoutine() {
   428  	defer c._recover()
   429  
   430  FOR_LOOP:
   431  	for {
   432  		var _n int64
   433  		var err error
   434  	SELECTION:
   435  		select {
   436  		case <-c.flushTimer.Ch:
   437  			// NOTE: flushTimer.Set() must be called every time
   438  			// something is written to .bufConnWriter.
   439  			c.flush()
   440  		case <-c.chStatsTimer.C:
   441  			for _, channel := range c.channels {
   442  				channel.updateStats()
   443  			}
   444  		case <-c.pingTimer.C:
   445  			c.Logger.Debug("Send Ping")
   446  			_n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{})
   447  			if err != nil {
   448  				break SELECTION
   449  			}
   450  			c.sendMonitor.Update(int(_n))
   451  			c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
   452  			c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
   453  				select {
   454  				case c.pongTimeoutCh <- true:
   455  				default:
   456  				}
   457  			})
   458  			c.flush()
   459  		case timeout := <-c.pongTimeoutCh:
   460  			if timeout {
   461  				c.Logger.Debug("Pong timeout")
   462  				err = errors.New("pong timeout")
   463  			} else {
   464  				c.stopPongTimer()
   465  			}
   466  		case <-c.pong:
   467  			c.Logger.Debug("Send Pong")
   468  			_n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{})
   469  			if err != nil {
   470  				break SELECTION
   471  			}
   472  			c.sendMonitor.Update(int(_n))
   473  			c.flush()
   474  		case <-c.quitSendRoutine:
   475  			break FOR_LOOP
   476  		case <-c.send:
   477  			// Send some PacketMsgs
   478  			eof := c.sendSomePacketMsgs()
   479  			if !eof {
   480  				// Keep sendRoutine awake.
   481  				select {
   482  				case c.send <- struct{}{}:
   483  				default:
   484  				}
   485  			}
   486  		}
   487  
   488  		if !c.IsRunning() {
   489  			break FOR_LOOP
   490  		}
   491  		if err != nil {
   492  			c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err)
   493  			c.stopForError(err)
   494  			break FOR_LOOP
   495  		}
   496  	}
   497  
   498  	// Cleanup
   499  	c.stopPongTimer()
   500  	close(c.doneSendRoutine)
   501  }
   502  
   503  // Returns true if messages from channels were exhausted.
   504  // Blocks in accordance to .sendMonitor throttling.
   505  func (c *MConnection) sendSomePacketMsgs() bool {
   506  	// Block until .sendMonitor says we can write.
   507  	// Once we're ready we send more than we asked for,
   508  	// but amortized it should even out.
   509  	c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true)
   510  
   511  	// Now send some PacketMsgs.
   512  	for i := 0; i < numBatchPacketMsgs; i++ {
   513  		if c.sendPacketMsg() {
   514  			return true
   515  		}
   516  	}
   517  	return false
   518  }
   519  
   520  // Returns true if messages from channels were exhausted.
   521  func (c *MConnection) sendPacketMsg() bool {
   522  	// Choose a channel to create a PacketMsg from.
   523  	// The chosen channel will be the one whose recentlySent/priority is the least.
   524  	var leastRatio float32 = math.MaxFloat32
   525  	var leastChannel *Channel
   526  	for _, channel := range c.channels {
   527  		// If nothing to send, skip this channel
   528  		if !channel.isSendPending() {
   529  			continue
   530  		}
   531  		// Get ratio, and keep track of lowest ratio.
   532  		ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
   533  		if ratio < leastRatio {
   534  			leastRatio = ratio
   535  			leastChannel = channel
   536  		}
   537  	}
   538  
   539  	// Nothing to send?
   540  	if leastChannel == nil {
   541  		return true
   542  	}
   543  	// c.Logger.Info("Found a msgPacket to send")
   544  
   545  	// Make & send a PacketMsg from this channel
   546  	_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
   547  	if err != nil {
   548  		c.Logger.Error("Failed to write PacketMsg", "err", err)
   549  		c.stopForError(err)
   550  		return true
   551  	}
   552  	c.sendMonitor.Update(int(_n))
   553  	c.flushTimer.Set()
   554  	return false
   555  }
   556  
   557  // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
   558  // After a whole message has been assembled, it's pushed to onReceive().
   559  // Blocks depending on how the connection is throttled.
   560  // Otherwise, it never blocks.
   561  func (c *MConnection) recvRoutine() {
   562  	defer c._recover()
   563  
   564  FOR_LOOP:
   565  	for {
   566  		// Block until .recvMonitor says we can read.
   567  		c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true)
   568  
   569  		// Peek into bufConnReader for debugging
   570  		//TODO peeking
   571  		if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
   572  			bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100))
   573  			if err == nil {
   574  				// return
   575  			} else {
   576  				c.Logger.Debug("Error peeking connection buffer", "err", err)
   577  				// return nil
   578  			}
   579  			c.Logger.Debug("Peek connection buffer", "numBytes", numBytes, "bz", bz)
   580  		}
   581  
   582  		// Read packet type
   583  		var packet Packet
   584  		var _n int64
   585  		var err error
   586  		_n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize))
   587  		c.recvMonitor.Update(int(_n))
   588  
   589  		if err != nil {
   590  			// stopServices was invoked and we are shutting down
   591  			// receiving is excpected to fail since we will close the connection
   592  			select {
   593  			case <-c.quitRecvRoutine:
   594  				break FOR_LOOP
   595  			default:
   596  			}
   597  
   598  			if c.IsRunning() {
   599  				if err == io.EOF {
   600  					c.Logger.Debug("Connection is closed @ recvRoutine (likely by the other side)", "conn", c)
   601  				} else {
   602  					c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
   603  				}
   604  				c.stopForError(err)
   605  			}
   606  			break FOR_LOOP
   607  		}
   608  
   609  		// Read more depending on packet type.
   610  		switch pkt := packet.(type) {
   611  		case PacketPing:
   612  			// TODO: prevent abuse, as they cause flush()'s.
   613  			// https://github.com/tendermint/tendermint/issues/1190
   614  			c.Logger.Debug("Receive Ping")
   615  			select {
   616  			case c.pong <- struct{}{}:
   617  			default:
   618  				// never block
   619  			}
   620  		case PacketPong:
   621  			c.Logger.Debug("Receive Pong")
   622  			select {
   623  			case c.pongTimeoutCh <- false:
   624  			default:
   625  				// never block
   626  			}
   627  		case PacketMsg:
   628  			channel, ok := c.channelsIdx[pkt.ChannelID]
   629  			if !ok || channel == nil {
   630  				err := fmt.Errorf("unknown channel %X", pkt.ChannelID)
   631  				c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   632  				c.stopForError(err)
   633  				break FOR_LOOP
   634  			}
   635  
   636  			msgBytes, err := channel.recvPacketMsg(pkt)
   637  			if err != nil {
   638  				if c.IsRunning() {
   639  					c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   640  					c.stopForError(err)
   641  				}
   642  				break FOR_LOOP
   643  			}
   644  			if msgBytes != nil {
   645  				c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes))
   646  				// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
   647  				c.onReceive(pkt.ChannelID, msgBytes)
   648  			}
   649  		default:
   650  			err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet))
   651  			c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
   652  			c.stopForError(err)
   653  			break FOR_LOOP
   654  		}
   655  	}
   656  
   657  	// Cleanup
   658  	close(c.pong)
   659  	for range c.pong {
   660  		// Drain
   661  	}
   662  }
   663  
   664  // not goroutine-safe
   665  func (c *MConnection) stopPongTimer() {
   666  	if c.pongTimer != nil {
   667  		_ = c.pongTimer.Stop()
   668  		c.pongTimer = nil
   669  	}
   670  }
   671  
   672  // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead
   673  // of amino encoding.
   674  func (c *MConnection) maxPacketMsgSize() int {
   675  	return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{
   676  		ChannelID: 0x01,
   677  		EOF:       1,
   678  		Bytes:     make([]byte, c.config.MaxPacketMsgPayloadSize),
   679  	})) + 10 // leave room for changes in amino
   680  }
   681  
   682  type ConnectionStatus struct {
   683  	Duration    time.Duration
   684  	SendMonitor flow.Status
   685  	RecvMonitor flow.Status
   686  	Channels    []ChannelStatus
   687  }
   688  
   689  type ChannelStatus struct {
   690  	ID                byte
   691  	SendQueueCapacity int
   692  	SendQueueSize     int
   693  	Priority          int
   694  	RecentlySent      int64
   695  }
   696  
   697  func (c *MConnection) Status() ConnectionStatus {
   698  	var status ConnectionStatus
   699  	status.Duration = time.Since(c.created)
   700  	status.SendMonitor = c.sendMonitor.Status()
   701  	status.RecvMonitor = c.recvMonitor.Status()
   702  	status.Channels = make([]ChannelStatus, len(c.channels))
   703  	for i, channel := range c.channels {
   704  		status.Channels[i] = ChannelStatus{
   705  			ID:                channel.desc.ID,
   706  			SendQueueCapacity: cap(channel.sendQueue),
   707  			SendQueueSize:     int(atomic.LoadInt32(&channel.sendQueueSize)),
   708  			Priority:          channel.desc.Priority,
   709  			RecentlySent:      atomic.LoadInt64(&channel.recentlySent),
   710  		}
   711  	}
   712  	return status
   713  }
   714  
   715  //-----------------------------------------------------------------------------
   716  
   717  type ChannelDescriptor struct {
   718  	ID                  byte
   719  	Priority            int
   720  	SendQueueCapacity   int
   721  	RecvBufferCapacity  int
   722  	RecvMessageCapacity int
   723  }
   724  
   725  func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
   726  	if chDesc.SendQueueCapacity == 0 {
   727  		chDesc.SendQueueCapacity = defaultSendQueueCapacity
   728  	}
   729  	if chDesc.RecvBufferCapacity == 0 {
   730  		chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
   731  	}
   732  	if chDesc.RecvMessageCapacity == 0 {
   733  		chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
   734  	}
   735  	filled = chDesc
   736  	return
   737  }
   738  
   739  // TODO: lowercase.
   740  // NOTE: not goroutine-safe.
   741  type Channel struct {
   742  	conn          *MConnection
   743  	desc          ChannelDescriptor
   744  	sendQueue     chan []byte
   745  	sendQueueSize int32 // atomic.
   746  	recving       []byte
   747  	sending       []byte
   748  	recentlySent  int64 // exponential moving average
   749  
   750  	maxPacketMsgPayloadSize int
   751  
   752  	Logger log.Logger
   753  }
   754  
   755  func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
   756  	desc = desc.FillDefaults()
   757  	if desc.Priority <= 0 {
   758  		panic("Channel default priority must be a positive integer")
   759  	}
   760  	return &Channel{
   761  		conn:                    conn,
   762  		desc:                    desc,
   763  		sendQueue:               make(chan []byte, desc.SendQueueCapacity),
   764  		recving:                 make([]byte, 0, desc.RecvBufferCapacity),
   765  		maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
   766  	}
   767  }
   768  
   769  func (ch *Channel) SetLogger(l log.Logger) {
   770  	ch.Logger = l
   771  }
   772  
   773  // Queues message to send to this channel.
   774  // Goroutine-safe
   775  // Times out (and returns false) after defaultSendTimeout
   776  func (ch *Channel) sendBytes(bytes []byte) bool {
   777  	select {
   778  	case ch.sendQueue <- bytes:
   779  		atomic.AddInt32(&ch.sendQueueSize, 1)
   780  		return true
   781  	case <-time.After(defaultSendTimeout):
   782  		return false
   783  	}
   784  }
   785  
   786  // Queues message to send to this channel.
   787  // Nonblocking, returns true if successful.
   788  // Goroutine-safe
   789  func (ch *Channel) trySendBytes(bytes []byte) bool {
   790  	select {
   791  	case ch.sendQueue <- bytes:
   792  		atomic.AddInt32(&ch.sendQueueSize, 1)
   793  		return true
   794  	default:
   795  		return false
   796  	}
   797  }
   798  
   799  // Goroutine-safe
   800  func (ch *Channel) loadSendQueueSize() (size int) {
   801  	return int(atomic.LoadInt32(&ch.sendQueueSize))
   802  }
   803  
   804  // Goroutine-safe
   805  // Use only as a heuristic.
   806  func (ch *Channel) canSend() bool {
   807  	return ch.loadSendQueueSize() < defaultSendQueueCapacity
   808  }
   809  
   810  // Returns true if any PacketMsgs are pending to be sent.
   811  // Call before calling nextPacketMsg()
   812  // Goroutine-safe
   813  func (ch *Channel) isSendPending() bool {
   814  	if len(ch.sending) == 0 {
   815  		if len(ch.sendQueue) == 0 {
   816  			return false
   817  		}
   818  		ch.sending = <-ch.sendQueue
   819  	}
   820  	return true
   821  }
   822  
   823  // Creates a new PacketMsg to send.
   824  // Not goroutine-safe
   825  func (ch *Channel) nextPacketMsg() PacketMsg {
   826  	packet := PacketMsg{}
   827  	packet.ChannelID = ch.desc.ID
   828  	maxSize := ch.maxPacketMsgPayloadSize
   829  	packet.Bytes = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))]
   830  	if len(ch.sending) <= maxSize {
   831  		packet.EOF = byte(0x01)
   832  		ch.sending = nil
   833  		atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
   834  	} else {
   835  		packet.EOF = byte(0x00)
   836  		ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):]
   837  	}
   838  	return packet
   839  }
   840  
   841  // Writes next PacketMsg to w and updates c.recentlySent.
   842  // Not goroutine-safe
   843  func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) {
   844  	var packet = ch.nextPacketMsg()
   845  	n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet)
   846  	atomic.AddInt64(&ch.recentlySent, n)
   847  	return
   848  }
   849  
   850  // Handles incoming PacketMsgs. It returns a message bytes if message is
   851  // complete. NOTE message bytes may change on next call to recvPacketMsg.
   852  // Not goroutine-safe
   853  func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) {
   854  	ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
   855  	var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes)
   856  	if recvCap < recvReceived {
   857  		return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived)
   858  	}
   859  	ch.recving = append(ch.recving, packet.Bytes...)
   860  	if packet.EOF == byte(0x01) {
   861  		msgBytes := ch.recving
   862  
   863  		// clear the slice without re-allocating.
   864  		// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
   865  		//   suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
   866  		//	at which point the recving slice stops being used and should be garbage collected
   867  		ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
   868  		return msgBytes, nil
   869  	}
   870  	return nil, nil
   871  }
   872  
   873  // Call this periodically to update stats for throttling purposes.
   874  // Not goroutine-safe
   875  func (ch *Channel) updateStats() {
   876  	// Exponential decay of stats.
   877  	// TODO: optimize.
   878  	atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8))
   879  }
   880  
   881  //----------------------------------------
   882  // Packet
   883  
   884  type Packet interface {
   885  	AssertIsPacket()
   886  }
   887  
   888  func RegisterPacket(cdc *amino.Codec) {
   889  	cdc.RegisterInterface((*Packet)(nil), nil)
   890  	cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil)
   891  	cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil)
   892  	cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil)
   893  }
   894  
   895  func (PacketPing) AssertIsPacket() {}
   896  func (PacketPong) AssertIsPacket() {}
   897  func (PacketMsg) AssertIsPacket()  {}
   898  
   899  type PacketPing struct {
   900  }
   901  
   902  type PacketPong struct {
   903  }
   904  
   905  type PacketMsg struct {
   906  	ChannelID byte
   907  	EOF       byte // 1 means message ends here.
   908  	Bytes     []byte
   909  }
   910  
   911  func (mp PacketMsg) String() string {
   912  	return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF)
   913  }