google.golang.org/grpc@v1.62.1/internal/transport/controlbuf.go (about) 1 /* 2 * 3 * Copyright 2014 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package transport 20 21 import ( 22 "bytes" 23 "errors" 24 "fmt" 25 "net" 26 "runtime" 27 "strconv" 28 "sync" 29 "sync/atomic" 30 31 "golang.org/x/net/http2" 32 "golang.org/x/net/http2/hpack" 33 "google.golang.org/grpc/internal/grpclog" 34 "google.golang.org/grpc/internal/grpcutil" 35 "google.golang.org/grpc/status" 36 ) 37 38 var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { 39 e.SetMaxDynamicTableSizeLimit(v) 40 } 41 42 type itemNode struct { 43 it any 44 next *itemNode 45 } 46 47 type itemList struct { 48 head *itemNode 49 tail *itemNode 50 } 51 52 func (il *itemList) enqueue(i any) { 53 n := &itemNode{it: i} 54 if il.tail == nil { 55 il.head, il.tail = n, n 56 return 57 } 58 il.tail.next = n 59 il.tail = n 60 } 61 62 // peek returns the first item in the list without removing it from the 63 // list. 64 func (il *itemList) peek() any { 65 return il.head.it 66 } 67 68 func (il *itemList) dequeue() any { 69 if il.head == nil { 70 return nil 71 } 72 i := il.head.it 73 il.head = il.head.next 74 if il.head == nil { 75 il.tail = nil 76 } 77 return i 78 } 79 80 func (il *itemList) dequeueAll() *itemNode { 81 h := il.head 82 il.head, il.tail = nil, nil 83 return h 84 } 85 86 func (il *itemList) isEmpty() bool { 87 return il.head == nil 88 } 89 90 // The following defines various control items which could flow through 91 // the control buffer of transport. They represent different aspects of 92 // control tasks, e.g., flow control, settings, streaming resetting, etc. 93 94 // maxQueuedTransportResponseFrames is the most queued "transport response" 95 // frames we will buffer before preventing new reads from occurring on the 96 // transport. These are control frames sent in response to client requests, 97 // such as RST_STREAM due to bad headers or settings acks. 98 const maxQueuedTransportResponseFrames = 50 99 100 type cbItem interface { 101 isTransportResponseFrame() bool 102 } 103 104 // registerStream is used to register an incoming stream with loopy writer. 105 type registerStream struct { 106 streamID uint32 107 wq *writeQuota 108 } 109 110 func (*registerStream) isTransportResponseFrame() bool { return false } 111 112 // headerFrame is also used to register stream on the client-side. 113 type headerFrame struct { 114 streamID uint32 115 hf []hpack.HeaderField 116 endStream bool // Valid on server side. 117 initStream func(uint32) error // Used only on the client side. 118 onWrite func() 119 wq *writeQuota // write quota for the stream created. 120 cleanup *cleanupStream // Valid on the server side. 121 onOrphaned func(error) // Valid on client-side 122 } 123 124 func (h *headerFrame) isTransportResponseFrame() bool { 125 return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM 126 } 127 128 type cleanupStream struct { 129 streamID uint32 130 rst bool 131 rstCode http2.ErrCode 132 onWrite func() 133 } 134 135 func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM 136 137 type earlyAbortStream struct { 138 httpStatus uint32 139 streamID uint32 140 contentSubtype string 141 status *status.Status 142 rst bool 143 } 144 145 func (*earlyAbortStream) isTransportResponseFrame() bool { return false } 146 147 type dataFrame struct { 148 streamID uint32 149 endStream bool 150 h []byte 151 d []byte 152 // onEachWrite is called every time 153 // a part of d is written out. 154 onEachWrite func() 155 } 156 157 func (*dataFrame) isTransportResponseFrame() bool { return false } 158 159 type incomingWindowUpdate struct { 160 streamID uint32 161 increment uint32 162 } 163 164 func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } 165 166 type outgoingWindowUpdate struct { 167 streamID uint32 168 increment uint32 169 } 170 171 func (*outgoingWindowUpdate) isTransportResponseFrame() bool { 172 return false // window updates are throttled by thresholds 173 } 174 175 type incomingSettings struct { 176 ss []http2.Setting 177 } 178 179 func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK 180 181 type outgoingSettings struct { 182 ss []http2.Setting 183 } 184 185 func (*outgoingSettings) isTransportResponseFrame() bool { return false } 186 187 type incomingGoAway struct { 188 } 189 190 func (*incomingGoAway) isTransportResponseFrame() bool { return false } 191 192 type goAway struct { 193 code http2.ErrCode 194 debugData []byte 195 headsUp bool 196 closeConn error // if set, loopyWriter will exit, resulting in conn closure 197 } 198 199 func (*goAway) isTransportResponseFrame() bool { return false } 200 201 type ping struct { 202 ack bool 203 data [8]byte 204 } 205 206 func (*ping) isTransportResponseFrame() bool { return true } 207 208 type outFlowControlSizeRequest struct { 209 resp chan uint32 210 } 211 212 func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } 213 214 // closeConnection is an instruction to tell the loopy writer to flush the 215 // framer and exit, which will cause the transport's connection to be closed 216 // (by the client or server). The transport itself will close after the reader 217 // encounters the EOF caused by the connection closure. 218 type closeConnection struct{} 219 220 func (closeConnection) isTransportResponseFrame() bool { return false } 221 222 type outStreamState int 223 224 const ( 225 active outStreamState = iota 226 empty 227 waitingOnStreamQuota 228 ) 229 230 type outStream struct { 231 id uint32 232 state outStreamState 233 itl *itemList 234 bytesOutStanding int 235 wq *writeQuota 236 237 next *outStream 238 prev *outStream 239 } 240 241 func (s *outStream) deleteSelf() { 242 if s.prev != nil { 243 s.prev.next = s.next 244 } 245 if s.next != nil { 246 s.next.prev = s.prev 247 } 248 s.next, s.prev = nil, nil 249 } 250 251 type outStreamList struct { 252 // Following are sentinel objects that mark the 253 // beginning and end of the list. They do not 254 // contain any item lists. All valid objects are 255 // inserted in between them. 256 // This is needed so that an outStream object can 257 // deleteSelf() in O(1) time without knowing which 258 // list it belongs to. 259 head *outStream 260 tail *outStream 261 } 262 263 func newOutStreamList() *outStreamList { 264 head, tail := new(outStream), new(outStream) 265 head.next = tail 266 tail.prev = head 267 return &outStreamList{ 268 head: head, 269 tail: tail, 270 } 271 } 272 273 func (l *outStreamList) enqueue(s *outStream) { 274 e := l.tail.prev 275 e.next = s 276 s.prev = e 277 s.next = l.tail 278 l.tail.prev = s 279 } 280 281 // remove from the beginning of the list. 282 func (l *outStreamList) dequeue() *outStream { 283 b := l.head.next 284 if b == l.tail { 285 return nil 286 } 287 b.deleteSelf() 288 return b 289 } 290 291 // controlBuffer is a way to pass information to loopy. 292 // Information is passed as specific struct types called control frames. 293 // A control frame not only represents data, messages or headers to be sent out 294 // but can also be used to instruct loopy to update its internal state. 295 // It shouldn't be confused with an HTTP2 frame, although some of the control frames 296 // like dataFrame and headerFrame do go out on wire as HTTP2 frames. 297 type controlBuffer struct { 298 ch chan struct{} 299 done <-chan struct{} 300 mu sync.Mutex 301 consumerWaiting bool 302 list *itemList 303 err error 304 305 // transportResponseFrames counts the number of queued items that represent 306 // the response of an action initiated by the peer. trfChan is created 307 // when transportResponseFrames >= maxQueuedTransportResponseFrames and is 308 // closed and nilled when transportResponseFrames drops below the 309 // threshold. Both fields are protected by mu. 310 transportResponseFrames int 311 trfChan atomic.Value // chan struct{} 312 } 313 314 func newControlBuffer(done <-chan struct{}) *controlBuffer { 315 return &controlBuffer{ 316 ch: make(chan struct{}, 1), 317 list: &itemList{}, 318 done: done, 319 } 320 } 321 322 // throttle blocks if there are too many incomingSettings/cleanupStreams in the 323 // controlbuf. 324 func (c *controlBuffer) throttle() { 325 ch, _ := c.trfChan.Load().(chan struct{}) 326 if ch != nil { 327 select { 328 case <-ch: 329 case <-c.done: 330 } 331 } 332 } 333 334 func (c *controlBuffer) put(it cbItem) error { 335 _, err := c.executeAndPut(nil, it) 336 return err 337 } 338 339 func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { 340 var wakeUp bool 341 c.mu.Lock() 342 if c.err != nil { 343 c.mu.Unlock() 344 return false, c.err 345 } 346 if f != nil { 347 if !f(it) { // f wasn't successful 348 c.mu.Unlock() 349 return false, nil 350 } 351 } 352 if c.consumerWaiting { 353 wakeUp = true 354 c.consumerWaiting = false 355 } 356 c.list.enqueue(it) 357 if it.isTransportResponseFrame() { 358 c.transportResponseFrames++ 359 if c.transportResponseFrames == maxQueuedTransportResponseFrames { 360 // We are adding the frame that puts us over the threshold; create 361 // a throttling channel. 362 c.trfChan.Store(make(chan struct{})) 363 } 364 } 365 c.mu.Unlock() 366 if wakeUp { 367 select { 368 case c.ch <- struct{}{}: 369 default: 370 } 371 } 372 return true, nil 373 } 374 375 // Note argument f should never be nil. 376 func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { 377 c.mu.Lock() 378 if c.err != nil { 379 c.mu.Unlock() 380 return false, c.err 381 } 382 if !f(it) { // f wasn't successful 383 c.mu.Unlock() 384 return false, nil 385 } 386 c.mu.Unlock() 387 return true, nil 388 } 389 390 func (c *controlBuffer) get(block bool) (any, error) { 391 for { 392 c.mu.Lock() 393 if c.err != nil { 394 c.mu.Unlock() 395 return nil, c.err 396 } 397 if !c.list.isEmpty() { 398 h := c.list.dequeue().(cbItem) 399 if h.isTransportResponseFrame() { 400 if c.transportResponseFrames == maxQueuedTransportResponseFrames { 401 // We are removing the frame that put us over the 402 // threshold; close and clear the throttling channel. 403 ch := c.trfChan.Load().(chan struct{}) 404 close(ch) 405 c.trfChan.Store((chan struct{})(nil)) 406 } 407 c.transportResponseFrames-- 408 } 409 c.mu.Unlock() 410 return h, nil 411 } 412 if !block { 413 c.mu.Unlock() 414 return nil, nil 415 } 416 c.consumerWaiting = true 417 c.mu.Unlock() 418 select { 419 case <-c.ch: 420 case <-c.done: 421 return nil, errors.New("transport closed by client") 422 } 423 } 424 } 425 426 func (c *controlBuffer) finish() { 427 c.mu.Lock() 428 if c.err != nil { 429 c.mu.Unlock() 430 return 431 } 432 c.err = ErrConnClosing 433 // There may be headers for streams in the control buffer. 434 // These streams need to be cleaned out since the transport 435 // is still not aware of these yet. 436 for head := c.list.dequeueAll(); head != nil; head = head.next { 437 hdr, ok := head.it.(*headerFrame) 438 if !ok { 439 continue 440 } 441 if hdr.onOrphaned != nil { // It will be nil on the server-side. 442 hdr.onOrphaned(ErrConnClosing) 443 } 444 } 445 // In case throttle() is currently in flight, it needs to be unblocked. 446 // Otherwise, the transport may not close, since the transport is closed by 447 // the reader encountering the connection error. 448 ch, _ := c.trfChan.Load().(chan struct{}) 449 if ch != nil { 450 close(ch) 451 } 452 c.trfChan.Store((chan struct{})(nil)) 453 c.mu.Unlock() 454 } 455 456 type side int 457 458 const ( 459 clientSide side = iota 460 serverSide 461 ) 462 463 // Loopy receives frames from the control buffer. 464 // Each frame is handled individually; most of the work done by loopy goes 465 // into handling data frames. Loopy maintains a queue of active streams, and each 466 // stream maintains a queue of data frames; as loopy receives data frames 467 // it gets added to the queue of the relevant stream. 468 // Loopy goes over this list of active streams by processing one node every iteration, 469 // thereby closely resemebling to a round-robin scheduling over all streams. While 470 // processing a stream, loopy writes out data bytes from this stream capped by the min 471 // of http2MaxFrameLen, connection-level flow control and stream-level flow control. 472 type loopyWriter struct { 473 side side 474 cbuf *controlBuffer 475 sendQuota uint32 476 oiws uint32 // outbound initial window size. 477 // estdStreams is map of all established streams that are not cleaned-up yet. 478 // On client-side, this is all streams whose headers were sent out. 479 // On server-side, this is all streams whose headers were received. 480 estdStreams map[uint32]*outStream // Established streams. 481 // activeStreams is a linked-list of all streams that have data to send and some 482 // stream-level flow control quota. 483 // Each of these streams internally have a list of data items(and perhaps trailers 484 // on the server-side) to be sent out. 485 activeStreams *outStreamList 486 framer *framer 487 hBuf *bytes.Buffer // The buffer for HPACK encoding. 488 hEnc *hpack.Encoder // HPACK encoder. 489 bdpEst *bdpEstimator 490 draining bool 491 conn net.Conn 492 logger *grpclog.PrefixLogger 493 494 // Side-specific handlers 495 ssGoAwayHandler func(*goAway) (bool, error) 496 } 497 498 func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { 499 var buf bytes.Buffer 500 l := &loopyWriter{ 501 side: s, 502 cbuf: cbuf, 503 sendQuota: defaultWindowSize, 504 oiws: defaultWindowSize, 505 estdStreams: make(map[uint32]*outStream), 506 activeStreams: newOutStreamList(), 507 framer: fr, 508 hBuf: &buf, 509 hEnc: hpack.NewEncoder(&buf), 510 bdpEst: bdpEst, 511 conn: conn, 512 logger: logger, 513 } 514 return l 515 } 516 517 const minBatchSize = 1000 518 519 // run should be run in a separate goroutine. 520 // It reads control frames from controlBuf and processes them by: 521 // 1. Updating loopy's internal state, or/and 522 // 2. Writing out HTTP2 frames on the wire. 523 // 524 // Loopy keeps all active streams with data to send in a linked-list. 525 // All streams in the activeStreams linked-list must have both: 526 // 1. Data to send, and 527 // 2. Stream level flow control quota available. 528 // 529 // In each iteration of run loop, other than processing the incoming control 530 // frame, loopy calls processData, which processes one node from the 531 // activeStreams linked-list. This results in writing of HTTP2 frames into an 532 // underlying write buffer. When there's no more control frames to read from 533 // controlBuf, loopy flushes the write buffer. As an optimization, to increase 534 // the batch size for each flush, loopy yields the processor, once if the batch 535 // size is too low to give stream goroutines a chance to fill it up. 536 // 537 // Upon exiting, if the error causing the exit is not an I/O error, run() 538 // flushes the underlying connection. The connection is always left open to 539 // allow different closing behavior on the client and server. 540 func (l *loopyWriter) run() (err error) { 541 defer func() { 542 if l.logger.V(logLevel) { 543 l.logger.Infof("loopyWriter exiting with error: %v", err) 544 } 545 if !isIOError(err) { 546 l.framer.writer.Flush() 547 } 548 l.cbuf.finish() 549 }() 550 for { 551 it, err := l.cbuf.get(true) 552 if err != nil { 553 return err 554 } 555 if err = l.handle(it); err != nil { 556 return err 557 } 558 if _, err = l.processData(); err != nil { 559 return err 560 } 561 gosched := true 562 hasdata: 563 for { 564 it, err := l.cbuf.get(false) 565 if err != nil { 566 return err 567 } 568 if it != nil { 569 if err = l.handle(it); err != nil { 570 return err 571 } 572 if _, err = l.processData(); err != nil { 573 return err 574 } 575 continue hasdata 576 } 577 isEmpty, err := l.processData() 578 if err != nil { 579 return err 580 } 581 if !isEmpty { 582 continue hasdata 583 } 584 if gosched { 585 gosched = false 586 if l.framer.writer.offset < minBatchSize { 587 runtime.Gosched() 588 continue hasdata 589 } 590 } 591 l.framer.writer.Flush() 592 break hasdata 593 } 594 } 595 } 596 597 func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { 598 return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) 599 } 600 601 func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { 602 // Otherwise update the quota. 603 if w.streamID == 0 { 604 l.sendQuota += w.increment 605 return 606 } 607 // Find the stream and update it. 608 if str, ok := l.estdStreams[w.streamID]; ok { 609 str.bytesOutStanding -= int(w.increment) 610 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { 611 str.state = active 612 l.activeStreams.enqueue(str) 613 return 614 } 615 } 616 } 617 618 func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { 619 return l.framer.fr.WriteSettings(s.ss...) 620 } 621 622 func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { 623 l.applySettings(s.ss) 624 return l.framer.fr.WriteSettingsAck() 625 } 626 627 func (l *loopyWriter) registerStreamHandler(h *registerStream) { 628 str := &outStream{ 629 id: h.streamID, 630 state: empty, 631 itl: &itemList{}, 632 wq: h.wq, 633 } 634 l.estdStreams[h.streamID] = str 635 } 636 637 func (l *loopyWriter) headerHandler(h *headerFrame) error { 638 if l.side == serverSide { 639 str, ok := l.estdStreams[h.streamID] 640 if !ok { 641 if l.logger.V(logLevel) { 642 l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) 643 } 644 return nil 645 } 646 // Case 1.A: Server is responding back with headers. 647 if !h.endStream { 648 return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) 649 } 650 // else: Case 1.B: Server wants to close stream. 651 652 if str.state != empty { // either active or waiting on stream quota. 653 // add it str's list of items. 654 str.itl.enqueue(h) 655 return nil 656 } 657 if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { 658 return err 659 } 660 return l.cleanupStreamHandler(h.cleanup) 661 } 662 // Case 2: Client wants to originate stream. 663 str := &outStream{ 664 id: h.streamID, 665 state: empty, 666 itl: &itemList{}, 667 wq: h.wq, 668 } 669 return l.originateStream(str, h) 670 } 671 672 func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { 673 // l.draining is set when handling GoAway. In which case, we want to avoid 674 // creating new streams. 675 if l.draining { 676 // TODO: provide a better error with the reason we are in draining. 677 hdr.onOrphaned(errStreamDrain) 678 return nil 679 } 680 if err := hdr.initStream(str.id); err != nil { 681 return err 682 } 683 if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { 684 return err 685 } 686 l.estdStreams[str.id] = str 687 return nil 688 } 689 690 func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { 691 if onWrite != nil { 692 onWrite() 693 } 694 l.hBuf.Reset() 695 for _, f := range hf { 696 if err := l.hEnc.WriteField(f); err != nil { 697 if l.logger.V(logLevel) { 698 l.logger.Warningf("Encountered error while encoding headers: %v", err) 699 } 700 } 701 } 702 var ( 703 err error 704 endHeaders, first bool 705 ) 706 first = true 707 for !endHeaders { 708 size := l.hBuf.Len() 709 if size > http2MaxFrameLen { 710 size = http2MaxFrameLen 711 } else { 712 endHeaders = true 713 } 714 if first { 715 first = false 716 err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ 717 StreamID: streamID, 718 BlockFragment: l.hBuf.Next(size), 719 EndStream: endStream, 720 EndHeaders: endHeaders, 721 }) 722 } else { 723 err = l.framer.fr.WriteContinuation( 724 streamID, 725 endHeaders, 726 l.hBuf.Next(size), 727 ) 728 } 729 if err != nil { 730 return err 731 } 732 } 733 return nil 734 } 735 736 func (l *loopyWriter) preprocessData(df *dataFrame) { 737 str, ok := l.estdStreams[df.streamID] 738 if !ok { 739 return 740 } 741 // If we got data for a stream it means that 742 // stream was originated and the headers were sent out. 743 str.itl.enqueue(df) 744 if str.state == empty { 745 str.state = active 746 l.activeStreams.enqueue(str) 747 } 748 } 749 750 func (l *loopyWriter) pingHandler(p *ping) error { 751 if !p.ack { 752 l.bdpEst.timesnap(p.data) 753 } 754 return l.framer.fr.WritePing(p.ack, p.data) 755 756 } 757 758 func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { 759 o.resp <- l.sendQuota 760 } 761 762 func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { 763 c.onWrite() 764 if str, ok := l.estdStreams[c.streamID]; ok { 765 // On the server side it could be a trailers-only response or 766 // a RST_STREAM before stream initialization thus the stream might 767 // not be established yet. 768 delete(l.estdStreams, c.streamID) 769 str.deleteSelf() 770 } 771 if c.rst { // If RST_STREAM needs to be sent. 772 if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { 773 return err 774 } 775 } 776 if l.draining && len(l.estdStreams) == 0 { 777 // Flush and close the connection; we are done with it. 778 return errors.New("finished processing active streams while in draining mode") 779 } 780 return nil 781 } 782 783 func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { 784 if l.side == clientSide { 785 return errors.New("earlyAbortStream not handled on client") 786 } 787 // In case the caller forgets to set the http status, default to 200. 788 if eas.httpStatus == 0 { 789 eas.httpStatus = 200 790 } 791 headerFields := []hpack.HeaderField{ 792 {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, 793 {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, 794 {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, 795 {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, 796 } 797 798 if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { 799 return err 800 } 801 if eas.rst { 802 if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { 803 return err 804 } 805 } 806 return nil 807 } 808 809 func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { 810 if l.side == clientSide { 811 l.draining = true 812 if len(l.estdStreams) == 0 { 813 // Flush and close the connection; we are done with it. 814 return errors.New("received GOAWAY with no active streams") 815 } 816 } 817 return nil 818 } 819 820 func (l *loopyWriter) goAwayHandler(g *goAway) error { 821 // Handling of outgoing GoAway is very specific to side. 822 if l.ssGoAwayHandler != nil { 823 draining, err := l.ssGoAwayHandler(g) 824 if err != nil { 825 return err 826 } 827 l.draining = draining 828 } 829 return nil 830 } 831 832 func (l *loopyWriter) handle(i any) error { 833 switch i := i.(type) { 834 case *incomingWindowUpdate: 835 l.incomingWindowUpdateHandler(i) 836 case *outgoingWindowUpdate: 837 return l.outgoingWindowUpdateHandler(i) 838 case *incomingSettings: 839 return l.incomingSettingsHandler(i) 840 case *outgoingSettings: 841 return l.outgoingSettingsHandler(i) 842 case *headerFrame: 843 return l.headerHandler(i) 844 case *registerStream: 845 l.registerStreamHandler(i) 846 case *cleanupStream: 847 return l.cleanupStreamHandler(i) 848 case *earlyAbortStream: 849 return l.earlyAbortStreamHandler(i) 850 case *incomingGoAway: 851 return l.incomingGoAwayHandler(i) 852 case *dataFrame: 853 l.preprocessData(i) 854 case *ping: 855 return l.pingHandler(i) 856 case *goAway: 857 return l.goAwayHandler(i) 858 case *outFlowControlSizeRequest: 859 l.outFlowControlSizeRequestHandler(i) 860 case closeConnection: 861 // Just return a non-I/O error and run() will flush and close the 862 // connection. 863 return ErrConnClosing 864 default: 865 return fmt.Errorf("transport: unknown control message type %T", i) 866 } 867 return nil 868 } 869 870 func (l *loopyWriter) applySettings(ss []http2.Setting) { 871 for _, s := range ss { 872 switch s.ID { 873 case http2.SettingInitialWindowSize: 874 o := l.oiws 875 l.oiws = s.Val 876 if o < l.oiws { 877 // If the new limit is greater make all depleted streams active. 878 for _, stream := range l.estdStreams { 879 if stream.state == waitingOnStreamQuota { 880 stream.state = active 881 l.activeStreams.enqueue(stream) 882 } 883 } 884 } 885 case http2.SettingHeaderTableSize: 886 updateHeaderTblSize(l.hEnc, s.Val) 887 } 888 } 889 } 890 891 // processData removes the first stream from active streams, writes out at most 16KB 892 // of its data and then puts it at the end of activeStreams if there's still more data 893 // to be sent and stream has some stream-level flow control. 894 func (l *loopyWriter) processData() (bool, error) { 895 if l.sendQuota == 0 { 896 return true, nil 897 } 898 str := l.activeStreams.dequeue() // Remove the first stream. 899 if str == nil { 900 return true, nil 901 } 902 dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. 903 // A data item is represented by a dataFrame, since it later translates into 904 // multiple HTTP2 data frames. 905 // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. 906 // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the 907 // maximum possible HTTP2 frame size. 908 909 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame 910 // Client sends out empty data frame with endStream = true 911 if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { 912 return false, err 913 } 914 str.itl.dequeue() // remove the empty data item from stream 915 if str.itl.isEmpty() { 916 str.state = empty 917 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. 918 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { 919 return false, err 920 } 921 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { 922 return false, err 923 } 924 } else { 925 l.activeStreams.enqueue(str) 926 } 927 return false, nil 928 } 929 var ( 930 buf []byte 931 ) 932 // Figure out the maximum size we can send 933 maxSize := http2MaxFrameLen 934 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. 935 str.state = waitingOnStreamQuota 936 return false, nil 937 } else if maxSize > strQuota { 938 maxSize = strQuota 939 } 940 if maxSize > int(l.sendQuota) { // connection-level flow control. 941 maxSize = int(l.sendQuota) 942 } 943 // Compute how much of the header and data we can send within quota and max frame length 944 hSize := min(maxSize, len(dataItem.h)) 945 dSize := min(maxSize-hSize, len(dataItem.d)) 946 if hSize != 0 { 947 if dSize == 0 { 948 buf = dataItem.h 949 } else { 950 // We can add some data to grpc message header to distribute bytes more equally across frames. 951 // Copy on the stack to avoid generating garbage 952 var localBuf [http2MaxFrameLen]byte 953 copy(localBuf[:hSize], dataItem.h) 954 copy(localBuf[hSize:], dataItem.d[:dSize]) 955 buf = localBuf[:hSize+dSize] 956 } 957 } else { 958 buf = dataItem.d 959 } 960 961 size := hSize + dSize 962 963 // Now that outgoing flow controls are checked we can replenish str's write quota 964 str.wq.replenish(size) 965 var endStream bool 966 // If this is the last data message on this stream and all of it can be written in this iteration. 967 if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { 968 endStream = true 969 } 970 if dataItem.onEachWrite != nil { 971 dataItem.onEachWrite() 972 } 973 if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { 974 return false, err 975 } 976 str.bytesOutStanding += size 977 l.sendQuota -= uint32(size) 978 dataItem.h = dataItem.h[hSize:] 979 dataItem.d = dataItem.d[dSize:] 980 981 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. 982 str.itl.dequeue() 983 } 984 if str.itl.isEmpty() { 985 str.state = empty 986 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. 987 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { 988 return false, err 989 } 990 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { 991 return false, err 992 } 993 } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. 994 str.state = waitingOnStreamQuota 995 } else { // Otherwise add it back to the list of active streams. 996 l.activeStreams.enqueue(str) 997 } 998 return false, nil 999 } 1000 1001 func min(a, b int) int { 1002 if a < b { 1003 return a 1004 } 1005 return b 1006 }