github.com/gnolang/gno@v0.0.0-20240520182011-228e9d0192ce/tm2/pkg/p2p/conn/connection.go (about) 1 package conn 2 3 import ( 4 "bufio" 5 goerrors "errors" 6 "fmt" 7 "io" 8 "log/slog" 9 "math" 10 "net" 11 "reflect" 12 "runtime/debug" 13 "sync" 14 "sync/atomic" 15 "time" 16 17 "github.com/gnolang/gno/tm2/pkg/amino" 18 "github.com/gnolang/gno/tm2/pkg/errors" 19 "github.com/gnolang/gno/tm2/pkg/flow" 20 "github.com/gnolang/gno/tm2/pkg/service" 21 "github.com/gnolang/gno/tm2/pkg/timer" 22 ) 23 24 const ( 25 defaultMaxPacketMsgPayloadSize = 1024 26 27 numBatchPacketMsgs = 10 28 minReadBufferSize = 1024 29 minWriteBufferSize = 65536 30 updateStats = 2 * time.Second 31 32 // some of these defaults are written in the user config 33 // flushThrottle, sendRate, recvRate 34 // TODO: remove values present in config 35 defaultFlushThrottle = 100 * time.Millisecond 36 37 defaultSendQueueCapacity = 1 38 defaultRecvBufferCapacity = 4096 39 defaultRecvMessageCapacity = 22020096 // 21MB 40 defaultSendRate = int64(512000) // 500KB/s 41 defaultRecvRate = int64(512000) // 500KB/s 42 defaultSendTimeout = 10 * time.Second 43 defaultPingInterval = 60 * time.Second 44 defaultPongTimeout = 45 * time.Second 45 ) 46 47 type ( 48 receiveCbFunc func(chID byte, msgBytes []byte) 49 errorCbFunc func(interface{}) 50 ) 51 52 /* 53 Each peer has one `MConnection` (multiplex connection) instance. 54 55 __multiplex__ *noun* a system or signal involving simultaneous transmission of 56 several messages along a single channel of communication. 57 58 Each `MConnection` handles message transmission on multiple abstract communication 59 `Channel`s. Each channel has a globally unique byte id. 60 The byte id and the relative priorities of each `Channel` are configured upon 61 initialization of the connection. 62 63 There are two methods for sending messages: 64 65 func (m MConnection) Send(chID byte, msgBytes []byte) bool {} 66 func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} 67 68 `Send(chID, msgBytes)` is a blocking call that waits until `msg` is 69 successfully queued for the channel with the given id byte `chID`, or until the 70 request times out. The message `msg` is serialized using Go-Amino. 71 72 `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the 73 channel's queue is full. 74 75 Inbound message bytes are handled with an onReceive callback function. 76 */ 77 type MConnection struct { 78 service.BaseService 79 80 conn net.Conn 81 bufConnReader *bufio.Reader 82 bufConnWriter *bufio.Writer 83 sendMonitor *flow.Monitor 84 recvMonitor *flow.Monitor 85 send chan struct{} 86 pong chan struct{} 87 channels []*Channel 88 channelsIdx map[byte]*Channel 89 onReceive receiveCbFunc 90 onError errorCbFunc 91 errored uint32 92 config MConnConfig 93 94 // Closing quitSendRoutine will cause the sendRoutine to eventually quit. 95 // doneSendRoutine is closed when the sendRoutine actually quits. 96 quitSendRoutine chan struct{} 97 doneSendRoutine chan struct{} 98 99 // Closing quitRecvRouting will cause the recvRouting to eventually quit. 100 quitRecvRoutine chan struct{} 101 102 // used to ensure FlushStop and OnStop 103 // are safe to call concurrently. 104 stopMtx sync.Mutex 105 106 flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled. 107 pingTimer *time.Ticker // send pings periodically 108 109 // close conn if pong is not received in pongTimeout 110 pongTimer *time.Timer 111 pongTimeoutCh chan bool // true - timeout, false - peer sent pong 112 113 chStatsTimer *time.Ticker // update channel stats periodically 114 115 created time.Time // time of creation 116 117 _maxPacketMsgSize int 118 } 119 120 // MConnConfig is a MConnection configuration. 121 type MConnConfig struct { 122 SendRate int64 `toml:"send_rate"` 123 RecvRate int64 `toml:"recv_rate"` 124 125 // Maximum payload size 126 MaxPacketMsgPayloadSize int `toml:"max_packet_msg_payload_size"` 127 128 // Interval to flush writes (throttled) 129 FlushThrottle time.Duration `toml:"flush_throttle"` 130 131 // Interval to send pings 132 PingInterval time.Duration `toml:"ping_interval"` 133 134 // Maximum wait time for pongs 135 PongTimeout time.Duration `toml:"pong_timeout"` 136 } 137 138 // DefaultMConnConfig returns the default config. 139 func DefaultMConnConfig() MConnConfig { 140 return MConnConfig{ 141 SendRate: defaultSendRate, 142 RecvRate: defaultRecvRate, 143 MaxPacketMsgPayloadSize: defaultMaxPacketMsgPayloadSize, 144 FlushThrottle: defaultFlushThrottle, 145 PingInterval: defaultPingInterval, 146 PongTimeout: defaultPongTimeout, 147 } 148 } 149 150 // NewMConnection wraps net.Conn and creates multiplex connection 151 func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection { 152 return NewMConnectionWithConfig( 153 conn, 154 chDescs, 155 onReceive, 156 onError, 157 DefaultMConnConfig()) 158 } 159 160 // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config 161 func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection { 162 if config.PongTimeout >= config.PingInterval { 163 panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)") 164 } 165 mconn := &MConnection{ 166 conn: conn, 167 bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), 168 bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), 169 sendMonitor: flow.New(0, 0), 170 recvMonitor: flow.New(0, 0), 171 send: make(chan struct{}, 1), 172 pong: make(chan struct{}, 1), 173 onReceive: onReceive, 174 onError: onError, 175 config: config, 176 created: time.Now(), 177 } 178 179 // Create channels 180 channelsIdx := map[byte]*Channel{} 181 channels := []*Channel{} 182 183 for _, desc := range chDescs { 184 channel := newChannel(mconn, *desc) 185 channelsIdx[channel.desc.ID] = channel 186 channels = append(channels, channel) 187 } 188 mconn.channels = channels 189 mconn.channelsIdx = channelsIdx 190 191 mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) 192 193 // maxPacketMsgSize() is a bit heavy, so call just once 194 mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() 195 196 return mconn 197 } 198 199 func (c *MConnection) SetLogger(l *slog.Logger) { 200 c.BaseService.SetLogger(l) 201 for _, ch := range c.channels { 202 ch.SetLogger(l) 203 } 204 } 205 206 // OnStart implements BaseService 207 func (c *MConnection) OnStart() error { 208 if err := c.BaseService.OnStart(); err != nil { 209 return err 210 } 211 c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) 212 c.pingTimer = time.NewTicker(c.config.PingInterval) 213 c.pongTimeoutCh = make(chan bool, 1) 214 c.chStatsTimer = time.NewTicker(updateStats) 215 c.quitSendRoutine = make(chan struct{}) 216 c.doneSendRoutine = make(chan struct{}) 217 c.quitRecvRoutine = make(chan struct{}) 218 go c.sendRoutine() 219 go c.recvRoutine() 220 return nil 221 } 222 223 // stopServices stops the BaseService and timers and closes the quitSendRoutine. 224 // if the quitSendRoutine was already closed, it returns true, otherwise it returns false. 225 // It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time. 226 func (c *MConnection) stopServices() (alreadyStopped bool) { 227 c.stopMtx.Lock() 228 defer c.stopMtx.Unlock() 229 230 select { 231 case <-c.quitSendRoutine: 232 // already quit 233 return true 234 default: 235 } 236 237 select { 238 case <-c.quitRecvRoutine: 239 // already quit 240 return true 241 default: 242 } 243 244 c.BaseService.OnStop() 245 c.flushTimer.Stop() 246 c.pingTimer.Stop() 247 c.chStatsTimer.Stop() 248 249 // inform the recvRouting that we are shutting down 250 close(c.quitRecvRoutine) 251 close(c.quitSendRoutine) 252 return false 253 } 254 255 // FlushStop replicates the logic of OnStop. 256 // It additionally ensures that all successful 257 // .Send() calls will get flushed before closing 258 // the connection. 259 func (c *MConnection) FlushStop() { 260 if c.stopServices() { 261 return 262 } 263 264 // this block is unique to FlushStop 265 { 266 // wait until the sendRoutine exits 267 // so we dont race on calling sendSomePacketMsgs 268 <-c.doneSendRoutine 269 270 // Send and flush all pending msgs. 271 // Since sendRoutine has exited, we can call this 272 // safely 273 eof := c.sendSomePacketMsgs() 274 for !eof { 275 eof = c.sendSomePacketMsgs() 276 } 277 c.flush() 278 279 // Now we can close the connection 280 } 281 282 c.conn.Close() //nolint: errcheck 283 284 // We can't close pong safely here because 285 // recvRoutine may write to it after we've stopped. 286 // Though it doesn't need to get closed at all, 287 // we close it @ recvRoutine. 288 289 // c.Stop() 290 } 291 292 // OnStop implements BaseService 293 func (c *MConnection) OnStop() { 294 if c.stopServices() { 295 return 296 } 297 298 c.conn.Close() //nolint: errcheck 299 300 // We can't close pong safely here because 301 // recvRoutine may write to it after we've stopped. 302 // Though it doesn't need to get closed at all, 303 // we close it @ recvRoutine. 304 } 305 306 func (c *MConnection) String() string { 307 return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) 308 } 309 310 func (c *MConnection) flush() { 311 c.Logger.Debug("Flush", "conn", c) 312 err := c.bufConnWriter.Flush() 313 if err != nil { 314 c.Logger.Error("MConnection flush failed", "err", err) 315 } 316 } 317 318 // Catch panics, usually caused by remote disconnects. 319 func (c *MConnection) _recover() { 320 if r := recover(); r != nil { 321 c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) 322 c.stopForError(errors.New("recovered from panic: %v", r)) 323 } 324 } 325 326 func (c *MConnection) stopForError(r interface{}) { 327 c.Stop() 328 if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { 329 if c.onError != nil { 330 c.onError(r) 331 } 332 } 333 } 334 335 // Queues a message to be sent to channel. 336 func (c *MConnection) Send(chID byte, msgBytes []byte) bool { 337 if !c.IsRunning() { 338 return false 339 } 340 341 c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 342 343 // Send message to channel. 344 channel, ok := c.channelsIdx[chID] 345 if !ok { 346 c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) 347 return false 348 } 349 350 success := channel.sendBytes(msgBytes) 351 if success { 352 // Wake up sendRoutine if necessary 353 select { 354 case c.send <- struct{}{}: 355 default: 356 } 357 } else { 358 c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 359 } 360 return success 361 } 362 363 // Queues a message to be sent to channel. 364 // Nonblocking, returns true if successful. 365 func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { 366 if !c.IsRunning() { 367 return false 368 } 369 370 c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) 371 372 // Send message to channel. 373 channel, ok := c.channelsIdx[chID] 374 if !ok { 375 c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) 376 return false 377 } 378 379 ok = channel.trySendBytes(msgBytes) 380 if ok { 381 // Wake up sendRoutine if necessary 382 select { 383 case c.send <- struct{}{}: 384 default: 385 } 386 } 387 388 return ok 389 } 390 391 // CanSend returns true if you can send more data onto the chID, false 392 // otherwise. Use only as a heuristic. 393 func (c *MConnection) CanSend(chID byte) bool { 394 if !c.IsRunning() { 395 return false 396 } 397 398 channel, ok := c.channelsIdx[chID] 399 if !ok { 400 c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) 401 return false 402 } 403 return channel.canSend() 404 } 405 406 // sendRoutine polls for packets to send from channels. 407 func (c *MConnection) sendRoutine() { 408 defer c._recover() 409 410 FOR_LOOP: 411 for { 412 var _n int64 413 var err error 414 SELECTION: 415 select { 416 case <-c.flushTimer.Ch: 417 // NOTE: flushTimer.Set() must be called every time 418 // something is written to .bufConnWriter. 419 c.flush() 420 case <-c.chStatsTimer.C: 421 for _, channel := range c.channels { 422 channel.updateStats() 423 } 424 case <-c.pingTimer.C: 425 c.Logger.Debug("Send Ping") 426 _n, err = amino.MarshalAnySizedWriter(c.bufConnWriter, PacketPing{}) 427 if err != nil { 428 break SELECTION 429 } 430 c.sendMonitor.Update(int(_n)) 431 c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) 432 c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { 433 select { 434 case c.pongTimeoutCh <- true: 435 default: 436 } 437 }) 438 c.flush() 439 case timeout := <-c.pongTimeoutCh: 440 if timeout { 441 c.Logger.Debug("Pong timeout") 442 err = errors.New("pong timeout") 443 } else { 444 c.stopPongTimer() 445 } 446 case <-c.pong: 447 c.Logger.Debug("Send Pong") 448 _n, err = amino.MarshalAnySizedWriter(c.bufConnWriter, PacketPong{}) 449 if err != nil { 450 break SELECTION 451 } 452 c.sendMonitor.Update(int(_n)) 453 c.flush() 454 case <-c.quitSendRoutine: 455 break FOR_LOOP 456 case <-c.send: 457 // Send some PacketMsgs 458 eof := c.sendSomePacketMsgs() 459 if !eof { 460 // Keep sendRoutine awake. 461 select { 462 case c.send <- struct{}{}: 463 default: 464 } 465 } 466 } 467 468 if !c.IsRunning() { 469 break FOR_LOOP 470 } 471 if err != nil { 472 c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) 473 c.stopForError(err) 474 break FOR_LOOP 475 } 476 } 477 478 // Cleanup 479 c.stopPongTimer() 480 close(c.doneSendRoutine) 481 } 482 483 // Returns true if messages from channels were exhausted. 484 // Blocks in accordance to .sendMonitor throttling. 485 func (c *MConnection) sendSomePacketMsgs() bool { 486 // Block until .sendMonitor says we can write. 487 // Once we're ready we send more than we asked for, 488 // but amortized it should even out. 489 c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) 490 491 // Now send some PacketMsgs. 492 for i := 0; i < numBatchPacketMsgs; i++ { 493 if c.sendPacketMsg() { 494 return true 495 } 496 } 497 return false 498 } 499 500 // Returns true if messages from channels were exhausted. 501 func (c *MConnection) sendPacketMsg() bool { 502 // Choose a channel to create a PacketMsg from. 503 // The chosen channel will be the one whose recentlySent/priority is the least. 504 var leastRatio float32 = math.MaxFloat32 505 var leastChannel *Channel 506 for _, channel := range c.channels { 507 // If nothing to send, skip this channel 508 if !channel.isSendPending() { 509 continue 510 } 511 // Get ratio, and keep track of lowest ratio. 512 ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) 513 if ratio < leastRatio { 514 leastRatio = ratio 515 leastChannel = channel 516 } 517 } 518 519 // Nothing to send? 520 if leastChannel == nil { 521 return true 522 } 523 // c.Logger.Info("Found a msgPacket to send") 524 525 // Make & send a PacketMsg from this channel 526 _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) 527 if err != nil { 528 c.Logger.Error("Failed to write PacketMsg", "err", err) 529 c.stopForError(err) 530 return true 531 } 532 c.sendMonitor.Update(int(_n)) 533 c.flushTimer.Set() 534 return false 535 } 536 537 // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. 538 // After a whole message has been assembled, it's pushed to onReceive(). 539 // Blocks depending on how the connection is throttled. 540 // Otherwise, it never blocks. 541 func (c *MConnection) recvRoutine() { 542 defer c._recover() 543 544 FOR_LOOP: 545 for { 546 // Block until .recvMonitor says we can read. 547 c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) 548 549 // Peek into bufConnReader for debugging 550 /* 551 if numBytes := c.bufConnReader.Buffered(); numBytes > 0 { 552 bz, err := c.bufConnReader.Peek(min(numBytes, 100)) 553 if err == nil { 554 // return 555 } else { 556 c.Logger.Debug("Error peeking connection buffer", "err", err) 557 // return nil 558 } 559 c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) 560 } 561 */ 562 563 // Read packet type 564 var packet Packet 565 var _n int64 566 var err error 567 _n, err = amino.UnmarshalSizedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) 568 c.recvMonitor.Update(int(_n)) 569 570 if err != nil { 571 // stopServices was invoked and we are shutting down 572 // receiving is expected to fail since we will close the connection 573 select { 574 case <-c.quitRecvRoutine: 575 break FOR_LOOP 576 default: 577 } 578 579 if c.IsRunning() { 580 if goerrors.Is(err, io.EOF) { 581 c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) 582 } else { 583 c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) 584 } 585 c.stopForError(err) 586 } 587 break FOR_LOOP 588 } 589 590 // Read more depending on packet type. 591 switch pkt := packet.(type) { 592 case PacketPing: 593 // TODO: prevent abuse, as they cause flush()'s. 594 // https://github.com/tendermint/classic/issues/1190 595 c.Logger.Debug("Receive Ping") 596 select { 597 case c.pong <- struct{}{}: 598 default: 599 // never block 600 } 601 case PacketPong: 602 c.Logger.Debug("Receive Pong") 603 select { 604 case c.pongTimeoutCh <- false: 605 default: 606 // never block 607 } 608 case PacketMsg: 609 channel, ok := c.channelsIdx[pkt.ChannelID] 610 if !ok || channel == nil { 611 err := fmt.Errorf("unknown channel %X", pkt.ChannelID) 612 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 613 c.stopForError(err) 614 break FOR_LOOP 615 } 616 617 msgBytes, err := channel.recvPacketMsg(pkt) 618 if err != nil { 619 if c.IsRunning() { 620 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 621 c.stopForError(err) 622 } 623 break FOR_LOOP 624 } 625 if msgBytes != nil { 626 c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) 627 // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine 628 c.onReceive(pkt.ChannelID, msgBytes) 629 } 630 default: 631 err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) 632 c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) 633 c.stopForError(err) 634 break FOR_LOOP 635 } 636 } 637 638 // Cleanup 639 close(c.pong) 640 for range c.pong { 641 // Drain 642 } 643 } 644 645 // not goroutine-safe 646 func (c *MConnection) stopPongTimer() { 647 if c.pongTimer != nil { 648 _ = c.pongTimer.Stop() 649 c.pongTimer = nil 650 } 651 } 652 653 // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead 654 // of amino encoding. 655 func (c *MConnection) maxPacketMsgSize() int { 656 return len(amino.MustMarshalAnySized(PacketMsg{ 657 ChannelID: 0x01, 658 EOF: 1, 659 Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), 660 })) + 10 // leave room for changes in amino 661 } 662 663 type ConnectionStatus struct { 664 Duration time.Duration 665 SendMonitor flow.Status 666 RecvMonitor flow.Status 667 Channels []ChannelStatus 668 } 669 670 type ChannelStatus struct { 671 ID byte 672 SendQueueCapacity int 673 SendQueueSize int 674 Priority int 675 RecentlySent int64 676 } 677 678 func (c *MConnection) Status() ConnectionStatus { 679 var status ConnectionStatus 680 status.Duration = time.Since(c.created) 681 status.SendMonitor = c.sendMonitor.Status() 682 status.RecvMonitor = c.recvMonitor.Status() 683 status.Channels = make([]ChannelStatus, len(c.channels)) 684 for i, ch := range c.channels { 685 channel := ch 686 status.Channels[i] = ChannelStatus{ 687 ID: channel.desc.ID, 688 SendQueueCapacity: cap(channel.sendQueue), 689 SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), 690 Priority: channel.desc.Priority, 691 RecentlySent: atomic.LoadInt64(&channel.recentlySent), 692 } 693 } 694 return status 695 } 696 697 // ----------------------------------------------------------------------------- 698 699 type ChannelDescriptor struct { 700 ID byte 701 Priority int 702 SendQueueCapacity int 703 RecvBufferCapacity int 704 RecvMessageCapacity int 705 } 706 707 func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { 708 if chDesc.SendQueueCapacity == 0 { 709 chDesc.SendQueueCapacity = defaultSendQueueCapacity 710 } 711 if chDesc.RecvBufferCapacity == 0 { 712 chDesc.RecvBufferCapacity = defaultRecvBufferCapacity 713 } 714 if chDesc.RecvMessageCapacity == 0 { 715 chDesc.RecvMessageCapacity = defaultRecvMessageCapacity 716 } 717 filled = chDesc 718 return 719 } 720 721 // TODO: lowercase. 722 // NOTE: not goroutine-safe. 723 type Channel struct { 724 conn *MConnection 725 desc ChannelDescriptor 726 sendQueue chan []byte 727 sendQueueSize int32 // atomic. 728 recving []byte 729 sending []byte 730 recentlySent int64 // exponential moving average 731 732 maxPacketMsgPayloadSize int 733 734 Logger *slog.Logger 735 } 736 737 func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { 738 desc = desc.FillDefaults() 739 if desc.Priority <= 0 { 740 panic("Channel default priority must be a positive integer") 741 } 742 return &Channel{ 743 conn: conn, 744 desc: desc, 745 sendQueue: make(chan []byte, desc.SendQueueCapacity), 746 recving: make([]byte, 0, desc.RecvBufferCapacity), 747 maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, 748 } 749 } 750 751 func (ch *Channel) SetLogger(l *slog.Logger) { 752 ch.Logger = l 753 } 754 755 // Queues message to send to this channel. 756 // Goroutine-safe 757 // Times out (and returns false) after defaultSendTimeout 758 func (ch *Channel) sendBytes(bytes []byte) bool { 759 select { 760 case ch.sendQueue <- bytes: 761 atomic.AddInt32(&ch.sendQueueSize, 1) 762 return true 763 case <-time.After(defaultSendTimeout): 764 return false 765 } 766 } 767 768 // Queues message to send to this channel. 769 // Nonblocking, returns true if successful. 770 // Goroutine-safe 771 func (ch *Channel) trySendBytes(bytes []byte) bool { 772 select { 773 case ch.sendQueue <- bytes: 774 atomic.AddInt32(&ch.sendQueueSize, 1) 775 return true 776 default: 777 return false 778 } 779 } 780 781 // Goroutine-safe 782 func (ch *Channel) loadSendQueueSize() (size int) { 783 return int(atomic.LoadInt32(&ch.sendQueueSize)) 784 } 785 786 // Goroutine-safe 787 // Use only as a heuristic. 788 func (ch *Channel) canSend() bool { 789 return ch.loadSendQueueSize() < defaultSendQueueCapacity 790 } 791 792 // Returns true if any PacketMsgs are pending to be sent. 793 // Call before calling nextPacketMsg() 794 // Goroutine-safe 795 func (ch *Channel) isSendPending() bool { 796 if len(ch.sending) == 0 { 797 if len(ch.sendQueue) == 0 { 798 return false 799 } 800 ch.sending = <-ch.sendQueue 801 } 802 return true 803 } 804 805 // Creates a new PacketMsg to send. 806 // Not goroutine-safe 807 func (ch *Channel) nextPacketMsg() PacketMsg { 808 packet := PacketMsg{} 809 packet.ChannelID = ch.desc.ID 810 maxSize := ch.maxPacketMsgPayloadSize 811 packet.Bytes = ch.sending[:min(maxSize, len(ch.sending))] 812 if len(ch.sending) <= maxSize { 813 packet.EOF = byte(0x01) 814 ch.sending = nil 815 atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize 816 } else { 817 packet.EOF = byte(0x00) 818 ch.sending = ch.sending[min(maxSize, len(ch.sending)):] 819 } 820 return packet 821 } 822 823 // Writes next PacketMsg to w and updates c.recentlySent. 824 // Not goroutine-safe 825 func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { 826 packet := ch.nextPacketMsg() 827 n, err = amino.MarshalAnySizedWriter(w, packet) 828 atomic.AddInt64(&ch.recentlySent, n) 829 return 830 } 831 832 // Handles incoming PacketMsgs. It returns a message bytes if message is 833 // complete. NOTE message bytes may change on next call to recvPacketMsg. 834 // Not goroutine-safe 835 func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { 836 ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) 837 recvCap, recvReceived := ch.desc.RecvMessageCapacity, len(ch.recving)+len(packet.Bytes) 838 if recvCap < recvReceived { 839 return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) 840 } 841 ch.recving = append(ch.recving, packet.Bytes...) 842 if packet.EOF == byte(0x01) { 843 msgBytes := ch.recving 844 845 // clear the slice without re-allocating. 846 // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go 847 // suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes, 848 // at which point the recving slice stops being used and should be garbage collected 849 ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity) 850 return msgBytes, nil 851 } 852 return nil, nil 853 } 854 855 // Call this periodically to update stats for throttling purposes. 856 // Not goroutine-safe 857 func (ch *Channel) updateStats() { 858 // Exponential decay of stats. 859 // TODO: optimize. 860 atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) 861 } 862 863 // ---------------------------------------- 864 // Packet 865 866 type Packet interface { 867 AssertPacket() 868 } 869 870 func (PacketPing) AssertPacket() {} 871 func (PacketPong) AssertPacket() {} 872 func (PacketMsg) AssertPacket() {} 873 874 type PacketPing struct{} 875 876 type PacketPong struct{} 877 878 type PacketMsg struct { 879 ChannelID byte 880 EOF byte // 1 means message ends here. 881 Bytes []byte 882 } 883 884 func (mp PacketMsg) String() string { 885 return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) 886 }