github.com/franono/tendermint@v0.32.2-0.20200527150959-749313264ce9/p2p/conn/connection.go (about) 1 package conn 2 3 import ( 4 "bufio" 5 "runtime/debug" 6 7 "errors" 8 "fmt" 9 "io" 10 "math" 11 "net" 12 "reflect" 13 "sync" 14 "sync/atomic" 15 "time" 16 17 amino "github.com/tendermint/go-amino" 18 19 flow "github.com/franono/tendermint/libs/flowrate" 20 "github.com/franono/tendermint/libs/log" 21 tmmath "github.com/franono/tendermint/libs/math" 22 "github.com/franono/tendermint/libs/service" 23 "github.com/franono/tendermint/libs/timer" 24 ) 25 26 const ( 27 defaultMaxPacketMsgPayloadSize = 1024 28 29 numBatchPacketMsgs = 10 30 minReadBufferSize = 1024 31 minWriteBufferSize = 65536 32 updateStats = 2 * time.Second 33 34 // some of these defaults are written in the user config 35 // flushThrottle, sendRate, recvRate 36 // TODO: remove values present in config 37 defaultFlushThrottle = 100 * time.Millisecond 38 39 defaultSendQueueCapacity = 1 40 defaultRecvBufferCapacity = 4096 41 defaultRecvMessageCapacity = 22020096 // 21MB 42 defaultSendRate = int64(512000) // 500KB/s 43 defaultRecvRate = int64(512000) // 500KB/s 44 defaultSendTimeout = 10 * time.Second 45 defaultPingInterval = 60 * time.Second 46 defaultPongTimeout = 45 * time.Second 47 ) 48 49 type receiveCbFunc func(chID byte, msgBytes []byte) 50 type errorCbFunc func(interface{}) 51 52 /* 53 Each peer has one `MConnection` (multiplex connection) instance. 54 55 __multiplex__ *noun* a system or signal involving simultaneous transmission of 56 several messages along a single channel of communication. 57 58 Each `MConnection` handles message transmission on multiple abstract communication 59 `Channel`s. Each channel has a globally unique byte id. 60 The byte id and the relative priorities of each `Channel` are configured upon 61 initialization of the connection. 62 63 There are two methods for sending messages: 64 func (m MConnection) Send(chID byte, msgBytes []byte) bool {} 65 func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} 66 67 `Send(chID, msgBytes)` is a blocking call that waits until `msg` is 68 successfully queued for the channel with the given id byte `chID`, or until the 69 request times out. The message `msg` is serialized using Go-Amino. 70 71 `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the 72 channel's queue is full. 73 74 Inbound message bytes are handled with an onReceive callback function. 75 */ 76 type MConnection struct { 77 service.BaseService 78 79 conn net.Conn 80 bufConnReader *bufio.Reader 81 bufConnWriter *bufio.Writer 82 sendMonitor *flow.Monitor 83 recvMonitor *flow.Monitor 84 send chan struct{} 85 pong chan struct{} 86 channels []*Channel 87 channelsIdx map[byte]*Channel 88 onReceive receiveCbFunc 89 onError errorCbFunc 90 errored uint32 91 config MConnConfig 92 93 // Closing quitSendRoutine will cause the sendRoutine to eventually quit. 94 // doneSendRoutine is closed when the sendRoutine actually quits. 95 quitSendRoutine chan struct{} 96 doneSendRoutine chan struct{} 97 98 // Closing quitRecvRouting will cause the recvRouting to eventually quit. 99 quitRecvRoutine chan struct{} 100 101 // used to ensure FlushStop and OnStop 102 // are safe to call concurrently. 103 stopMtx sync.Mutex 104 105 flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled. 106 pingTimer *time.Ticker // send pings periodically 107 108 // close conn if pong is not received in pongTimeout 109 pongTimer *time.Timer 110 pongTimeoutCh chan bool // true - timeout, false - peer sent pong 111 112 chStatsTimer *time.Ticker // update channel stats periodically 113 114 created time.Time // time of creation 115 116 _maxPacketMsgSize int 117 } 118 119 // MConnConfig is a MConnection configuration. 120 type MConnConfig struct { 121 SendRate int64 `mapstructure:"send_rate"` 122 RecvRate int64 `mapstructure:"recv_rate"` 123 124 // Maximum payload size 125 MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` 126 127 // Interval to flush writes (throttled) 128 FlushThrottle time.Duration `mapstructure:"flush_throttle"` 129 130 // Interval to send pings 131 PingInterval time.Duration `mapstructure:"ping_interval"` 132 133 // Maximum wait time for pongs 134 PongTimeout time.Duration `mapstructure:"pong_timeout"` 135 } 136 137 // DefaultMConnConfig returns the default config. 138 func DefaultMConnConfig() MConnConfig { 139 return MConnConfig{ 140 SendRate: defaultSendRate, 141 RecvRate: defaultRecvRate, 142 MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, 143 FlushThrottle: defaultFlushThrottle, 144 PingInterval: defaultPingInterval, 145 PongTimeout: defaultPongTimeout, 146 } 147 } 148 149 // NewMConnection wraps net.Conn and creates multiplex connection 150 func NewMConnection( 151 conn net.Conn, 152 chDescs []*ChannelDescriptor, 153 onReceive receiveCbFunc, 154 onError errorCbFunc, 155 ) *MConnection { 156 return NewMConnectionWithConfig( 157 conn, 158 chDescs, 159 onReceive, 160 onError, 161 DefaultMConnConfig()) 162 } 163 164 // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config 165 func NewMConnectionWithConfig( 166 conn net.Conn, 167 chDescs []*ChannelDescriptor, 168 onReceive receiveCbFunc, 169 onError errorCbFunc, 170 config MConnConfig, 171 ) *MConnection { 172 if config.PongTimeout >= config.PingInterval { 173 panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)") 174 } 175 176 mconn := &MConnection{ 177 conn: conn, 178 bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), 179 bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), 180 sendMonitor: flow.New(0, 0), 181 recvMonitor: flow.New(0, 0), 182 send: make(chan struct{}, 1), 183 pong: make(chan struct{}, 1), 184 onReceive: onReceive, 185 onError: onError, 186 config: config, 187 created: time.Now(), 188 } 189 190 // Create channels 191 var channelsIdx = map[byte]*Channel{} 192 var channels = []*Channel{} 193 194 for _, desc := range chDescs { 195 channel := newChannel(mconn, *desc) 196 channelsIdx[channel.desc.ID] = channel 197 channels = append(channels, channel) 198 } 199 mconn.channels = channels 200 mconn.channelsIdx = channelsIdx 201 202 mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) 203 204 // maxPacketMsgSize() is a bit heavy, so call just once 205 mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() 206 207 return mconn 208 } 209 210 func (c *MConnection) SetLogger(l log.Logger) { 211 c.BaseService.SetLogger(l) 212 for _, ch := range c.channels { 213 ch.SetLogger(l) 214 } 215 } 216 217 // OnStart implements BaseService 218 func (c *MConnection) OnStart() error { 219 if err := c.BaseService.OnStart(); err != nil { 220 return err 221 } 222 c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) 223 c.pingTimer = time.NewTicker(c.config.PingInterval) 224 c.pongTimeoutCh = make(chan bool, 1) 225 c.chStatsTimer = time.NewTicker(updateStats) 226 c.quitSendRoutine = make(chan struct{}) 227 c.doneSendRoutine = make(chan struct{}) 228 c.quitRecvRoutine = make(chan struct{}) 229 go c.sendRoutine() 230 go c.recvRoutine() 231 return nil 232 } 233 234 // stopServices stops the BaseService and timers and closes the quitSendRoutine. 235 // if the quitSendRoutine was already closed, it returns true, otherwise it returns false. 236 // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time. 237 func (c *MConnection) stopServices() (alreadyStopped bool) { 238 c.stopMtx.Lock() 239 defer c.stopMtx.Unlock() 240 241 select { 242 case <-c.quitSendRoutine: 243 // already quit 244 return true 245 default: 246 } 247 248 select { 249 case <-c.quitRecvRoutine: 250 // already quit 251 return true 252 default: 253 } 254 255 c.BaseService.OnStop() 256 c.flushTimer.Stop() 257 c.pingTimer.Stop() 258 c.chStatsTimer.Stop() 259 260 // inform the recvRouting that we are shutting down 261 close(c.quitRecvRoutine) 262 close(c.quitSendRoutine) 263 return false 264 } 265 266 // FlushStop replicates the logic of OnStop. 267 // It additionally ensures that all successful 268 // .Send() calls will get flushed before closing 269 // the connection. 270 func (c *MConnection) FlushStop() { 271 if c.stopServices() { 272 return 273 } 274 275 // this block is unique to FlushStop 276 { 277 // wait until the sendRoutine exits 278 // so we dont race on calling sendSomePacketMsgs 279 <-c.doneSendRoutine 280 281 // Send and flush all pending msgs. 282 // Since sendRoutine has exited, we can call this 283 // safely 284 eof := c.sendSomePacketMsgs() 285 for !eof { 286 eof = c.sendSomePacketMsgs() 287 } 288 c.flush() 289 290 // Now we can close the connection 291 } 292 293 c.conn.Close() // nolint: errcheck 294 295 // We can't close pong safely here because 296 // recvRoutine may write to it after we've stopped. 297 // Though it doesn't need to get closed at all, 298 // we close it @ recvRoutine. 299 300 // c.Stop() 301 } 302 303 // OnStop implements BaseService 304 func (c *MConnection) OnStop() { 305 if c.stopServices() { 306 return 307 } 308 309 c.conn.Close() // nolint: errcheck 310 311 // We can't close pong safely here because 312 // recvRoutine may write to it after we've stopped. 313 // Though it doesn't need to get closed at all, 314 // we close it @ recvRoutine. 315 } 316 317 func (c *MConnection) String() string { 318 return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) 319 } 320 321 func (c *MConnection) flush() { 322 c.Logger.Debug("Flush", "conn", c) 323 err := c.bufConnWriter.Flush() 324 if err != nil { 325 c.Logger.Error("MConnection flush failed", "err", err) 326 } 327 } 328 329 // Catch panics, usually caused by remote disconnects. 330 func (c *MConnection) _recover() { 331 if r := recover(); r != nil { 332 c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) 333 c.stopForError(fmt.Errorf("recovered from panic: %v", r)) 334 } 335 } 336 337 func (c *MConnection) stopForError(r interface{}) { 338 c.Stop() 339 if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { 340 if c.onError != nil { 341 c.onError(r) 342 } 343 } 344 } 345 346 // Queues a message to be sent to channel. 347 func (c *MConnection) Send(chID byte, msgBytes []byte) bool { 348 if !c.IsRunning() { 349 return false 350 } 351 352 c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 353 354 // Send message to channel. 355 channel, ok := c.channelsIdx[chID] 356 if !ok { 357 c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) 358 return false 359 } 360 361 success := channel.sendBytes(msgBytes) 362 if success { 363 // Wake up sendRoutine if necessary 364 select { 365 case c.send <- struct{}{}: 366 default: 367 } 368 } else { 369 c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 370 } 371 return success 372 } 373 374 // Queues a message to be sent to channel. 375 // Nonblocking, returns true if successful. 376 func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { 377 if !c.IsRunning() { 378 return false 379 } 380 381 c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 382 383 // Send message to channel. 384 channel, ok := c.channelsIdx[chID] 385 if !ok { 386 c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) 387 return false 388 } 389 390 ok = channel.trySendBytes(msgBytes) 391 if ok { 392 // Wake up sendRoutine if necessary 393 select { 394 case c.send <- struct{}{}: 395 default: 396 } 397 } 398 399 return ok 400 } 401 402 // CanSend returns true if you can send more data onto the chID, false 403 // otherwise. Use only as a heuristic. 404 func (c *MConnection) CanSend(chID byte) bool { 405 if !c.IsRunning() { 406 return false 407 } 408 409 channel, ok := c.channelsIdx[chID] 410 if !ok { 411 c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) 412 return false 413 } 414 return channel.canSend() 415 } 416 417 // sendRoutine polls for packets to send from channels. 418 func (c *MConnection) sendRoutine() { 419 defer c._recover() 420 421 FOR_LOOP: 422 for { 423 var _n int64 424 var err error 425 SELECTION: 426 select { 427 case <-c.flushTimer.Ch: 428 // NOTE: flushTimer.Set() must be called every time 429 // something is written to .bufConnWriter. 430 c.flush() 431 case <-c.chStatsTimer.C: 432 for _, channel := range c.channels { 433 channel.updateStats() 434 } 435 case <-c.pingTimer.C: 436 c.Logger.Debug("Send Ping") 437 _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) 438 if err != nil { 439 break SELECTION 440 } 441 c.sendMonitor.Update(int(_n)) 442 c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) 443 c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { 444 select { 445 case c.pongTimeoutCh <- true: 446 default: 447 } 448 }) 449 c.flush() 450 case timeout := <-c.pongTimeoutCh: 451 if timeout { 452 c.Logger.Debug("Pong timeout") 453 err = errors.New("pong timeout") 454 } else { 455 c.stopPongTimer() 456 } 457 case <-c.pong: 458 c.Logger.Debug("Send Pong") 459 _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{}) 460 if err != nil { 461 break SELECTION 462 } 463 c.sendMonitor.Update(int(_n)) 464 c.flush() 465 case <-c.quitSendRoutine: 466 break FOR_LOOP 467 case <-c.send: 468 // Send some PacketMsgs 469 eof := c.sendSomePacketMsgs() 470 if !eof { 471 // Keep sendRoutine awake. 472 select { 473 case c.send <- struct{}{}: 474 default: 475 } 476 } 477 } 478 479 if !c.IsRunning() { 480 break FOR_LOOP 481 } 482 if err != nil { 483 c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) 484 c.stopForError(err) 485 break FOR_LOOP 486 } 487 } 488 489 // Cleanup 490 c.stopPongTimer() 491 close(c.doneSendRoutine) 492 } 493 494 // Returns true if messages from channels were exhausted. 495 // Blocks in accordance to .sendMonitor throttling. 496 func (c *MConnection) sendSomePacketMsgs() bool { 497 // Block until .sendMonitor says we can write. 498 // Once we're ready we send more than we asked for, 499 // but amortized it should even out. 500 c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) 501 502 // Now send some PacketMsgs. 503 for i := 0; i < numBatchPacketMsgs; i++ { 504 if c.sendPacketMsg() { 505 return true 506 } 507 } 508 return false 509 } 510 511 // Returns true if messages from channels were exhausted. 512 func (c *MConnection) sendPacketMsg() bool { 513 // Choose a channel to create a PacketMsg from. 514 // The chosen channel will be the one whose recentlySent/priority is the least. 515 var leastRatio float32 = math.MaxFloat32 516 var leastChannel *Channel 517 for _, channel := range c.channels { 518 // If nothing to send, skip this channel 519 if !channel.isSendPending() { 520 continue 521 } 522 // Get ratio, and keep track of lowest ratio. 523 ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) 524 if ratio < leastRatio { 525 leastRatio = ratio 526 leastChannel = channel 527 } 528 } 529 530 // Nothing to send? 531 if leastChannel == nil { 532 return true 533 } 534 // c.Logger.Info("Found a msgPacket to send") 535 536 // Make & send a PacketMsg from this channel 537 _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) 538 if err != nil { 539 c.Logger.Error("Failed to write PacketMsg", "err", err) 540 c.stopForError(err) 541 return true 542 } 543 c.sendMonitor.Update(int(_n)) 544 c.flushTimer.Set() 545 return false 546 } 547 548 // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. 549 // After a whole message has been assembled, it's pushed to onReceive(). 550 // Blocks depending on how the connection is throttled. 551 // Otherwise, it never blocks. 552 func (c *MConnection) recvRoutine() { 553 defer c._recover() 554 555 FOR_LOOP: 556 for { 557 // Block until .recvMonitor says we can read. 558 c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) 559 560 // Peek into bufConnReader for debugging 561 /* 562 if numBytes := c.bufConnReader.Buffered(); numBytes > 0 { 563 bz, err := c.bufConnReader.Peek(tmmath.MinInt(numBytes, 100)) 564 if err == nil { 565 // return 566 } else { 567 c.Logger.Debug("Error peeking connection buffer", "err", err) 568 // return nil 569 } 570 c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) 571 } 572 */ 573 574 // Read packet type 575 var packet Packet 576 var _n int64 577 var err error 578 _n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) 579 c.recvMonitor.Update(int(_n)) 580 581 if err != nil { 582 // stopServices was invoked and we are shutting down 583 // receiving is excpected to fail since we will close the connection 584 select { 585 case <-c.quitRecvRoutine: 586 break FOR_LOOP 587 default: 588 } 589 590 if c.IsRunning() { 591 if err == io.EOF { 592 c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) 593 } else { 594 c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) 595 } 596 c.stopForError(err) 597 } 598 break FOR_LOOP 599 } 600 601 // Read more depending on packet type. 602 switch pkt := packet.(type) { 603 case PacketPing: 604 // TODO: prevent abuse, as they cause flush()'s. 605 // https://github.com/franono/tendermint/issues/1190 606 c.Logger.Debug("Receive Ping") 607 select { 608 case c.pong <- struct{}{}: 609 default: 610 // never block 611 } 612 case PacketPong: 613 c.Logger.Debug("Receive Pong") 614 select { 615 case c.pongTimeoutCh <- false: 616 default: 617 // never block 618 } 619 case PacketMsg: 620 channel, ok := c.channelsIdx[pkt.ChannelID] 621 if !ok || channel == nil { 622 err := fmt.Errorf("unknown channel %X", pkt.ChannelID) 623 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 624 c.stopForError(err) 625 break FOR_LOOP 626 } 627 628 msgBytes, err := channel.recvPacketMsg(pkt) 629 if err != nil { 630 if c.IsRunning() { 631 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 632 c.stopForError(err) 633 } 634 break FOR_LOOP 635 } 636 if msgBytes != nil { 637 c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) 638 // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine 639 c.onReceive(pkt.ChannelID, msgBytes) 640 } 641 default: 642 err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) 643 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 644 c.stopForError(err) 645 break FOR_LOOP 646 } 647 } 648 649 // Cleanup 650 close(c.pong) 651 for range c.pong { 652 // Drain 653 } 654 } 655 656 // not goroutine-safe 657 func (c *MConnection) stopPongTimer() { 658 if c.pongTimer != nil { 659 _ = c.pongTimer.Stop() 660 c.pongTimer = nil 661 } 662 } 663 664 // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead 665 // of amino encoding. 666 func (c *MConnection) maxPacketMsgSize() int { 667 return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{ 668 ChannelID: 0x01, 669 EOF: 1, 670 Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), 671 })) + 10 // leave room for changes in amino 672 } 673 674 type ConnectionStatus struct { 675 Duration time.Duration 676 SendMonitor flow.Status 677 RecvMonitor flow.Status 678 Channels []ChannelStatus 679 } 680 681 type ChannelStatus struct { 682 ID byte 683 SendQueueCapacity int 684 SendQueueSize int 685 Priority int 686 RecentlySent int64 687 } 688 689 func (c *MConnection) Status() ConnectionStatus { 690 var status ConnectionStatus 691 status.Duration = time.Since(c.created) 692 status.SendMonitor = c.sendMonitor.Status() 693 status.RecvMonitor = c.recvMonitor.Status() 694 status.Channels = make([]ChannelStatus, len(c.channels)) 695 for i, channel := range c.channels { 696 status.Channels[i] = ChannelStatus{ 697 ID: channel.desc.ID, 698 SendQueueCapacity: cap(channel.sendQueue), 699 SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), 700 Priority: channel.desc.Priority, 701 RecentlySent: atomic.LoadInt64(&channel.recentlySent), 702 } 703 } 704 return status 705 } 706 707 //----------------------------------------------------------------------------- 708 709 type ChannelDescriptor struct { 710 ID byte 711 Priority int 712 SendQueueCapacity int 713 RecvBufferCapacity int 714 RecvMessageCapacity int 715 } 716 717 func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { 718 if chDesc.SendQueueCapacity == 0 { 719 chDesc.SendQueueCapacity = defaultSendQueueCapacity 720 } 721 if chDesc.RecvBufferCapacity == 0 { 722 chDesc.RecvBufferCapacity = defaultRecvBufferCapacity 723 } 724 if chDesc.RecvMessageCapacity == 0 { 725 chDesc.RecvMessageCapacity = defaultRecvMessageCapacity 726 } 727 filled = chDesc 728 return 729 } 730 731 // TODO: lowercase. 732 // NOTE: not goroutine-safe. 733 type Channel struct { 734 conn *MConnection 735 desc ChannelDescriptor 736 sendQueue chan []byte 737 sendQueueSize int32 // atomic. 738 recving []byte 739 sending []byte 740 recentlySent int64 // exponential moving average 741 742 maxPacketMsgPayloadSize int 743 744 Logger log.Logger 745 } 746 747 func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { 748 desc = desc.FillDefaults() 749 if desc.Priority <= 0 { 750 panic("Channel default priority must be a positive integer") 751 } 752 return &Channel{ 753 conn: conn, 754 desc: desc, 755 sendQueue: make(chan []byte, desc.SendQueueCapacity), 756 recving: make([]byte, 0, desc.RecvBufferCapacity), 757 maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, 758 } 759 } 760 761 func (ch *Channel) SetLogger(l log.Logger) { 762 ch.Logger = l 763 } 764 765 // Queues message to send to this channel. 766 // Goroutine-safe 767 // Times out (and returns false) after defaultSendTimeout 768 func (ch *Channel) sendBytes(bytes []byte) bool { 769 select { 770 case ch.sendQueue <- bytes: 771 atomic.AddInt32(&ch.sendQueueSize, 1) 772 return true 773 case <-time.After(defaultSendTimeout): 774 return false 775 } 776 } 777 778 // Queues message to send to this channel. 779 // Nonblocking, returns true if successful. 780 // Goroutine-safe 781 func (ch *Channel) trySendBytes(bytes []byte) bool { 782 select { 783 case ch.sendQueue <- bytes: 784 atomic.AddInt32(&ch.sendQueueSize, 1) 785 return true 786 default: 787 return false 788 } 789 } 790 791 // Goroutine-safe 792 func (ch *Channel) loadSendQueueSize() (size int) { 793 return int(atomic.LoadInt32(&ch.sendQueueSize)) 794 } 795 796 // Goroutine-safe 797 // Use only as a heuristic. 798 func (ch *Channel) canSend() bool { 799 return ch.loadSendQueueSize() < defaultSendQueueCapacity 800 } 801 802 // Returns true if any PacketMsgs are pending to be sent. 803 // Call before calling nextPacketMsg() 804 // Goroutine-safe 805 func (ch *Channel) isSendPending() bool { 806 if len(ch.sending) == 0 { 807 if len(ch.sendQueue) == 0 { 808 return false 809 } 810 ch.sending = <-ch.sendQueue 811 } 812 return true 813 } 814 815 // Creates a new PacketMsg to send. 816 // Not goroutine-safe 817 func (ch *Channel) nextPacketMsg() PacketMsg { 818 packet := PacketMsg{} 819 packet.ChannelID = ch.desc.ID 820 maxSize := ch.maxPacketMsgPayloadSize 821 packet.Bytes = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] 822 if len(ch.sending) <= maxSize { 823 packet.EOF = byte(0x01) 824 ch.sending = nil 825 atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize 826 } else { 827 packet.EOF = byte(0x00) 828 ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):] 829 } 830 return packet 831 } 832 833 // Writes next PacketMsg to w and updates c.recentlySent. 834 // Not goroutine-safe 835 func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { 836 var packet = ch.nextPacketMsg() 837 n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet) 838 atomic.AddInt64(&ch.recentlySent, n) 839 return 840 } 841 842 // Handles incoming PacketMsgs. It returns a message bytes if message is 843 // complete. NOTE message bytes may change on next call to recvPacketMsg. 844 // Not goroutine-safe 845 func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { 846 ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) 847 var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes) 848 if recvCap < recvReceived { 849 return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) 850 } 851 ch.recving = append(ch.recving, packet.Bytes...) 852 if packet.EOF == byte(0x01) { 853 msgBytes := ch.recving 854 855 // clear the slice without re-allocating. 856 // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go 857 // suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes, 858 // at which point the recving slice stops being used and should be garbage collected 859 ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity) 860 return msgBytes, nil 861 } 862 return nil, nil 863 } 864 865 // Call this periodically to update stats for throttling purposes. 866 // Not goroutine-safe 867 func (ch *Channel) updateStats() { 868 // Exponential decay of stats. 869 // TODO: optimize. 870 atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) 871 } 872 873 //---------------------------------------- 874 // Packet 875 876 type Packet interface { 877 AssertIsPacket() 878 } 879 880 func RegisterPacket(cdc *amino.Codec) { 881 cdc.RegisterInterface((*Packet)(nil), nil) 882 cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil) 883 cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil) 884 cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil) 885 } 886 887 func (PacketPing) AssertIsPacket() {} 888 func (PacketPong) AssertIsPacket() {} 889 func (PacketMsg) AssertIsPacket() {} 890 891 type PacketPing struct { 892 } 893 894 type PacketPong struct { 895 } 896 897 type PacketMsg struct { 898 ChannelID byte 899 EOF byte // 1 means message ends here. 900 Bytes []byte 901 } 902 903 func (mp PacketMsg) String() string { 904 return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) 905 }