google.golang.org/grpc@v1.74.2/internal/transport/controlbuf.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package transport
    20  
    21  import (
    22  	"bytes"
    23  	"errors"
    24  	"fmt"
    25  	"net"
    26  	"runtime"
    27  	"strconv"
    28  	"sync"
    29  	"sync/atomic"
    30  
    31  	"golang.org/x/net/http2"
    32  	"golang.org/x/net/http2/hpack"
    33  	"google.golang.org/grpc/internal/grpclog"
    34  	"google.golang.org/grpc/internal/grpcutil"
    35  	"google.golang.org/grpc/mem"
    36  	"google.golang.org/grpc/status"
    37  )
    38  
    39  var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
    40  	e.SetMaxDynamicTableSizeLimit(v)
    41  }
    42  
    43  // itemNodePool is used to reduce heap allocations.
    44  var itemNodePool = sync.Pool{
    45  	New: func() any {
    46  		return &itemNode{}
    47  	},
    48  }
    49  
    50  type itemNode struct {
    51  	it   any
    52  	next *itemNode
    53  }
    54  
    55  type itemList struct {
    56  	head *itemNode
    57  	tail *itemNode
    58  }
    59  
    60  func (il *itemList) enqueue(i any) {
    61  	n := itemNodePool.Get().(*itemNode)
    62  	n.next = nil
    63  	n.it = i
    64  	if il.tail == nil {
    65  		il.head, il.tail = n, n
    66  		return
    67  	}
    68  	il.tail.next = n
    69  	il.tail = n
    70  }
    71  
    72  // peek returns the first item in the list without removing it from the
    73  // list.
    74  func (il *itemList) peek() any {
    75  	return il.head.it
    76  }
    77  
    78  func (il *itemList) dequeue() any {
    79  	if il.head == nil {
    80  		return nil
    81  	}
    82  	i := il.head.it
    83  	temp := il.head
    84  	il.head = il.head.next
    85  	itemNodePool.Put(temp)
    86  	if il.head == nil {
    87  		il.tail = nil
    88  	}
    89  	return i
    90  }
    91  
    92  func (il *itemList) dequeueAll() *itemNode {
    93  	h := il.head
    94  	il.head, il.tail = nil, nil
    95  	return h
    96  }
    97  
    98  func (il *itemList) isEmpty() bool {
    99  	return il.head == nil
   100  }
   101  
   102  // The following defines various control items which could flow through
   103  // the control buffer of transport. They represent different aspects of
   104  // control tasks, e.g., flow control, settings, streaming resetting, etc.
   105  
   106  // maxQueuedTransportResponseFrames is the most queued "transport response"
   107  // frames we will buffer before preventing new reads from occurring on the
   108  // transport.  These are control frames sent in response to client requests,
   109  // such as RST_STREAM due to bad headers or settings acks.
   110  const maxQueuedTransportResponseFrames = 50
   111  
   112  type cbItem interface {
   113  	isTransportResponseFrame() bool
   114  }
   115  
   116  // registerStream is used to register an incoming stream with loopy writer.
   117  type registerStream struct {
   118  	streamID uint32
   119  	wq       *writeQuota
   120  }
   121  
   122  func (*registerStream) isTransportResponseFrame() bool { return false }
   123  
   124  // headerFrame is also used to register stream on the client-side.
   125  type headerFrame struct {
   126  	streamID   uint32
   127  	hf         []hpack.HeaderField
   128  	endStream  bool               // Valid on server side.
   129  	initStream func(uint32) error // Used only on the client side.
   130  	onWrite    func()
   131  	wq         *writeQuota    // write quota for the stream created.
   132  	cleanup    *cleanupStream // Valid on the server side.
   133  	onOrphaned func(error)    // Valid on client-side
   134  }
   135  
   136  func (h *headerFrame) isTransportResponseFrame() bool {
   137  	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
   138  }
   139  
   140  type cleanupStream struct {
   141  	streamID uint32
   142  	rst      bool
   143  	rstCode  http2.ErrCode
   144  	onWrite  func()
   145  }
   146  
   147  func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
   148  
   149  type earlyAbortStream struct {
   150  	httpStatus     uint32
   151  	streamID       uint32
   152  	contentSubtype string
   153  	status         *status.Status
   154  	rst            bool
   155  }
   156  
   157  func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
   158  
   159  type dataFrame struct {
   160  	streamID   uint32
   161  	endStream  bool
   162  	h          []byte
   163  	data       mem.BufferSlice
   164  	processing bool
   165  	// onEachWrite is called every time
   166  	// a part of data is written out.
   167  	onEachWrite func()
   168  }
   169  
   170  func (*dataFrame) isTransportResponseFrame() bool { return false }
   171  
   172  type incomingWindowUpdate struct {
   173  	streamID  uint32
   174  	increment uint32
   175  }
   176  
   177  func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
   178  
   179  type outgoingWindowUpdate struct {
   180  	streamID  uint32
   181  	increment uint32
   182  }
   183  
   184  func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
   185  	return false // window updates are throttled by thresholds
   186  }
   187  
   188  type incomingSettings struct {
   189  	ss []http2.Setting
   190  }
   191  
   192  func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
   193  
   194  type outgoingSettings struct {
   195  	ss []http2.Setting
   196  }
   197  
   198  func (*outgoingSettings) isTransportResponseFrame() bool { return false }
   199  
   200  type incomingGoAway struct {
   201  }
   202  
   203  func (*incomingGoAway) isTransportResponseFrame() bool { return false }
   204  
   205  type goAway struct {
   206  	code      http2.ErrCode
   207  	debugData []byte
   208  	headsUp   bool
   209  	closeConn error // if set, loopyWriter will exit with this error
   210  }
   211  
   212  func (*goAway) isTransportResponseFrame() bool { return false }
   213  
   214  type ping struct {
   215  	ack  bool
   216  	data [8]byte
   217  }
   218  
   219  func (*ping) isTransportResponseFrame() bool { return true }
   220  
   221  type outFlowControlSizeRequest struct {
   222  	resp chan uint32
   223  }
   224  
   225  func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
   226  
   227  // closeConnection is an instruction to tell the loopy writer to flush the
   228  // framer and exit, which will cause the transport's connection to be closed
   229  // (by the client or server).  The transport itself will close after the reader
   230  // encounters the EOF caused by the connection closure.
   231  type closeConnection struct{}
   232  
   233  func (closeConnection) isTransportResponseFrame() bool { return false }
   234  
   235  type outStreamState int
   236  
   237  const (
   238  	active outStreamState = iota
   239  	empty
   240  	waitingOnStreamQuota
   241  )
   242  
   243  type outStream struct {
   244  	id               uint32
   245  	state            outStreamState
   246  	itl              *itemList
   247  	bytesOutStanding int
   248  	wq               *writeQuota
   249  	reader           mem.Reader
   250  
   251  	next *outStream
   252  	prev *outStream
   253  }
   254  
   255  func (s *outStream) deleteSelf() {
   256  	if s.prev != nil {
   257  		s.prev.next = s.next
   258  	}
   259  	if s.next != nil {
   260  		s.next.prev = s.prev
   261  	}
   262  	s.next, s.prev = nil, nil
   263  }
   264  
   265  type outStreamList struct {
   266  	// Following are sentinel objects that mark the
   267  	// beginning and end of the list. They do not
   268  	// contain any item lists. All valid objects are
   269  	// inserted in between them.
   270  	// This is needed so that an outStream object can
   271  	// deleteSelf() in O(1) time without knowing which
   272  	// list it belongs to.
   273  	head *outStream
   274  	tail *outStream
   275  }
   276  
   277  func newOutStreamList() *outStreamList {
   278  	head, tail := new(outStream), new(outStream)
   279  	head.next = tail
   280  	tail.prev = head
   281  	return &outStreamList{
   282  		head: head,
   283  		tail: tail,
   284  	}
   285  }
   286  
   287  func (l *outStreamList) enqueue(s *outStream) {
   288  	e := l.tail.prev
   289  	e.next = s
   290  	s.prev = e
   291  	s.next = l.tail
   292  	l.tail.prev = s
   293  }
   294  
   295  // remove from the beginning of the list.
   296  func (l *outStreamList) dequeue() *outStream {
   297  	b := l.head.next
   298  	if b == l.tail {
   299  		return nil
   300  	}
   301  	b.deleteSelf()
   302  	return b
   303  }
   304  
   305  // controlBuffer is a way to pass information to loopy.
   306  //
   307  // Information is passed as specific struct types called control frames. A
   308  // control frame not only represents data, messages or headers to be sent out
   309  // but can also be used to instruct loopy to update its internal state. It
   310  // shouldn't be confused with an HTTP2 frame, although some of the control
   311  // frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
   312  type controlBuffer struct {
   313  	wakeupCh chan struct{}   // Unblocks readers waiting for something to read.
   314  	done     <-chan struct{} // Closed when the transport is done.
   315  
   316  	// Mutex guards all the fields below, except trfChan which can be read
   317  	// atomically without holding mu.
   318  	mu              sync.Mutex
   319  	consumerWaiting bool      // True when readers are blocked waiting for new data.
   320  	closed          bool      // True when the controlbuf is finished.
   321  	list            *itemList // List of queued control frames.
   322  
   323  	// transportResponseFrames counts the number of queued items that represent
   324  	// the response of an action initiated by the peer.  trfChan is created
   325  	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
   326  	// closed and nilled when transportResponseFrames drops below the
   327  	// threshold.  Both fields are protected by mu.
   328  	transportResponseFrames int
   329  	trfChan                 atomic.Pointer[chan struct{}]
   330  }
   331  
   332  func newControlBuffer(done <-chan struct{}) *controlBuffer {
   333  	return &controlBuffer{
   334  		wakeupCh: make(chan struct{}, 1),
   335  		list:     &itemList{},
   336  		done:     done,
   337  	}
   338  }
   339  
   340  // throttle blocks if there are too many frames in the control buf that
   341  // represent the response of an action initiated by the peer, like
   342  // incomingSettings cleanupStreams etc.
   343  func (c *controlBuffer) throttle() {
   344  	if ch := c.trfChan.Load(); ch != nil {
   345  		select {
   346  		case <-(*ch):
   347  		case <-c.done:
   348  		}
   349  	}
   350  }
   351  
   352  // put adds an item to the controlbuf.
   353  func (c *controlBuffer) put(it cbItem) error {
   354  	_, err := c.executeAndPut(nil, it)
   355  	return err
   356  }
   357  
   358  // executeAndPut runs f, and if the return value is true, adds the given item to
   359  // the controlbuf. The item could be nil, in which case, this method simply
   360  // executes f and does not add the item to the controlbuf.
   361  //
   362  // The first return value indicates whether the item was successfully added to
   363  // the control buffer. A non-nil error, specifically ErrConnClosing, is returned
   364  // if the control buffer is already closed.
   365  func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
   366  	c.mu.Lock()
   367  	defer c.mu.Unlock()
   368  
   369  	if c.closed {
   370  		return false, ErrConnClosing
   371  	}
   372  	if f != nil {
   373  		if !f() { // f wasn't successful
   374  			return false, nil
   375  		}
   376  	}
   377  	if it == nil {
   378  		return true, nil
   379  	}
   380  
   381  	var wakeUp bool
   382  	if c.consumerWaiting {
   383  		wakeUp = true
   384  		c.consumerWaiting = false
   385  	}
   386  	c.list.enqueue(it)
   387  	if it.isTransportResponseFrame() {
   388  		c.transportResponseFrames++
   389  		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
   390  			// We are adding the frame that puts us over the threshold; create
   391  			// a throttling channel.
   392  			ch := make(chan struct{})
   393  			c.trfChan.Store(&ch)
   394  		}
   395  	}
   396  	if wakeUp {
   397  		select {
   398  		case c.wakeupCh <- struct{}{}:
   399  		default:
   400  		}
   401  	}
   402  	return true, nil
   403  }
   404  
   405  // get returns the next control frame from the control buffer. If block is true
   406  // **and** there are no control frames in the control buffer, the call blocks
   407  // until one of the conditions is met: there is a frame to return or the
   408  // transport is closed.
   409  func (c *controlBuffer) get(block bool) (any, error) {
   410  	for {
   411  		c.mu.Lock()
   412  		frame, err := c.getOnceLocked()
   413  		if frame != nil || err != nil || !block {
   414  			// If we read a frame or an error, we can return to the caller. The
   415  			// call to getOnceLocked() returns a nil frame and a nil error if
   416  			// there is nothing to read, and in that case, if the caller asked
   417  			// us not to block, we can return now as well.
   418  			c.mu.Unlock()
   419  			return frame, err
   420  		}
   421  		c.consumerWaiting = true
   422  		c.mu.Unlock()
   423  
   424  		// Release the lock above and wait to be woken up.
   425  		select {
   426  		case <-c.wakeupCh:
   427  		case <-c.done:
   428  			return nil, errors.New("transport closed by client")
   429  		}
   430  	}
   431  }
   432  
   433  // Callers must not use this method, but should instead use get().
   434  //
   435  // Caller must hold c.mu.
   436  func (c *controlBuffer) getOnceLocked() (any, error) {
   437  	if c.closed {
   438  		return false, ErrConnClosing
   439  	}
   440  	if c.list.isEmpty() {
   441  		return nil, nil
   442  	}
   443  	h := c.list.dequeue().(cbItem)
   444  	if h.isTransportResponseFrame() {
   445  		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
   446  			// We are removing the frame that put us over the
   447  			// threshold; close and clear the throttling channel.
   448  			ch := c.trfChan.Swap(nil)
   449  			close(*ch)
   450  		}
   451  		c.transportResponseFrames--
   452  	}
   453  	return h, nil
   454  }
   455  
   456  // finish closes the control buffer, cleaning up any streams that have queued
   457  // header frames. Once this method returns, no more frames can be added to the
   458  // control buffer, and attempts to do so will return ErrConnClosing.
   459  func (c *controlBuffer) finish() {
   460  	c.mu.Lock()
   461  	defer c.mu.Unlock()
   462  
   463  	if c.closed {
   464  		return
   465  	}
   466  	c.closed = true
   467  	// There may be headers for streams in the control buffer.
   468  	// These streams need to be cleaned out since the transport
   469  	// is still not aware of these yet.
   470  	for head := c.list.dequeueAll(); head != nil; head = head.next {
   471  		switch v := head.it.(type) {
   472  		case *headerFrame:
   473  			if v.onOrphaned != nil { // It will be nil on the server-side.
   474  				v.onOrphaned(ErrConnClosing)
   475  			}
   476  		case *dataFrame:
   477  			if !v.processing {
   478  				v.data.Free()
   479  			}
   480  		}
   481  	}
   482  
   483  	// In case throttle() is currently in flight, it needs to be unblocked.
   484  	// Otherwise, the transport may not close, since the transport is closed by
   485  	// the reader encountering the connection error.
   486  	ch := c.trfChan.Swap(nil)
   487  	if ch != nil {
   488  		close(*ch)
   489  	}
   490  }
   491  
   492  type side int
   493  
   494  const (
   495  	clientSide side = iota
   496  	serverSide
   497  )
   498  
   499  // Loopy receives frames from the control buffer.
   500  // Each frame is handled individually; most of the work done by loopy goes
   501  // into handling data frames. Loopy maintains a queue of active streams, and each
   502  // stream maintains a queue of data frames; as loopy receives data frames
   503  // it gets added to the queue of the relevant stream.
   504  // Loopy goes over this list of active streams by processing one node every iteration,
   505  // thereby closely resembling a round-robin scheduling over all streams. While
   506  // processing a stream, loopy writes out data bytes from this stream capped by the min
   507  // of http2MaxFrameLen, connection-level flow control and stream-level flow control.
   508  type loopyWriter struct {
   509  	side      side
   510  	cbuf      *controlBuffer
   511  	sendQuota uint32
   512  	oiws      uint32 // outbound initial window size.
   513  	// estdStreams is map of all established streams that are not cleaned-up yet.
   514  	// On client-side, this is all streams whose headers were sent out.
   515  	// On server-side, this is all streams whose headers were received.
   516  	estdStreams map[uint32]*outStream // Established streams.
   517  	// activeStreams is a linked-list of all streams that have data to send and some
   518  	// stream-level flow control quota.
   519  	// Each of these streams internally have a list of data items(and perhaps trailers
   520  	// on the server-side) to be sent out.
   521  	activeStreams *outStreamList
   522  	framer        *framer
   523  	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
   524  	hEnc          *hpack.Encoder // HPACK encoder.
   525  	bdpEst        *bdpEstimator
   526  	draining      bool
   527  	conn          net.Conn
   528  	logger        *grpclog.PrefixLogger
   529  	bufferPool    mem.BufferPool
   530  
   531  	// Side-specific handlers
   532  	ssGoAwayHandler func(*goAway) (bool, error)
   533  }
   534  
   535  func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
   536  	var buf bytes.Buffer
   537  	l := &loopyWriter{
   538  		side:            s,
   539  		cbuf:            cbuf,
   540  		sendQuota:       defaultWindowSize,
   541  		oiws:            defaultWindowSize,
   542  		estdStreams:     make(map[uint32]*outStream),
   543  		activeStreams:   newOutStreamList(),
   544  		framer:          fr,
   545  		hBuf:            &buf,
   546  		hEnc:            hpack.NewEncoder(&buf),
   547  		bdpEst:          bdpEst,
   548  		conn:            conn,
   549  		logger:          logger,
   550  		ssGoAwayHandler: goAwayHandler,
   551  		bufferPool:      bufferPool,
   552  	}
   553  	return l
   554  }
   555  
   556  const minBatchSize = 1000
   557  
   558  // run should be run in a separate goroutine.
   559  // It reads control frames from controlBuf and processes them by:
   560  // 1. Updating loopy's internal state, or/and
   561  // 2. Writing out HTTP2 frames on the wire.
   562  //
   563  // Loopy keeps all active streams with data to send in a linked-list.
   564  // All streams in the activeStreams linked-list must have both:
   565  // 1. Data to send, and
   566  // 2. Stream level flow control quota available.
   567  //
   568  // In each iteration of run loop, other than processing the incoming control
   569  // frame, loopy calls processData, which processes one node from the
   570  // activeStreams linked-list.  This results in writing of HTTP2 frames into an
   571  // underlying write buffer.  When there's no more control frames to read from
   572  // controlBuf, loopy flushes the write buffer.  As an optimization, to increase
   573  // the batch size for each flush, loopy yields the processor, once if the batch
   574  // size is too low to give stream goroutines a chance to fill it up.
   575  //
   576  // Upon exiting, if the error causing the exit is not an I/O error, run()
   577  // flushes the underlying connection.  The connection is always left open to
   578  // allow different closing behavior on the client and server.
   579  func (l *loopyWriter) run() (err error) {
   580  	defer func() {
   581  		if l.logger.V(logLevel) {
   582  			l.logger.Infof("loopyWriter exiting with error: %v", err)
   583  		}
   584  		if !isIOError(err) {
   585  			l.framer.writer.Flush()
   586  		}
   587  		l.cbuf.finish()
   588  	}()
   589  	for {
   590  		it, err := l.cbuf.get(true)
   591  		if err != nil {
   592  			return err
   593  		}
   594  		if err = l.handle(it); err != nil {
   595  			return err
   596  		}
   597  		if _, err = l.processData(); err != nil {
   598  			return err
   599  		}
   600  		gosched := true
   601  	hasdata:
   602  		for {
   603  			it, err := l.cbuf.get(false)
   604  			if err != nil {
   605  				return err
   606  			}
   607  			if it != nil {
   608  				if err = l.handle(it); err != nil {
   609  					return err
   610  				}
   611  				if _, err = l.processData(); err != nil {
   612  					return err
   613  				}
   614  				continue hasdata
   615  			}
   616  			isEmpty, err := l.processData()
   617  			if err != nil {
   618  				return err
   619  			}
   620  			if !isEmpty {
   621  				continue hasdata
   622  			}
   623  			if gosched {
   624  				gosched = false
   625  				if l.framer.writer.offset < minBatchSize {
   626  					runtime.Gosched()
   627  					continue hasdata
   628  				}
   629  			}
   630  			l.framer.writer.Flush()
   631  			break hasdata
   632  		}
   633  	}
   634  }
   635  
   636  func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
   637  	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
   638  }
   639  
   640  func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
   641  	// Otherwise update the quota.
   642  	if w.streamID == 0 {
   643  		l.sendQuota += w.increment
   644  		return
   645  	}
   646  	// Find the stream and update it.
   647  	if str, ok := l.estdStreams[w.streamID]; ok {
   648  		str.bytesOutStanding -= int(w.increment)
   649  		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
   650  			str.state = active
   651  			l.activeStreams.enqueue(str)
   652  			return
   653  		}
   654  	}
   655  }
   656  
   657  func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
   658  	return l.framer.fr.WriteSettings(s.ss...)
   659  }
   660  
   661  func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
   662  	l.applySettings(s.ss)
   663  	return l.framer.fr.WriteSettingsAck()
   664  }
   665  
   666  func (l *loopyWriter) registerStreamHandler(h *registerStream) {
   667  	str := &outStream{
   668  		id:     h.streamID,
   669  		state:  empty,
   670  		itl:    &itemList{},
   671  		wq:     h.wq,
   672  		reader: mem.BufferSlice{}.Reader(),
   673  	}
   674  	l.estdStreams[h.streamID] = str
   675  }
   676  
   677  func (l *loopyWriter) headerHandler(h *headerFrame) error {
   678  	if l.side == serverSide {
   679  		str, ok := l.estdStreams[h.streamID]
   680  		if !ok {
   681  			if l.logger.V(logLevel) {
   682  				l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
   683  			}
   684  			return nil
   685  		}
   686  		// Case 1.A: Server is responding back with headers.
   687  		if !h.endStream {
   688  			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
   689  		}
   690  		// else:  Case 1.B: Server wants to close stream.
   691  
   692  		if str.state != empty { // either active or waiting on stream quota.
   693  			// add it str's list of items.
   694  			str.itl.enqueue(h)
   695  			return nil
   696  		}
   697  		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
   698  			return err
   699  		}
   700  		return l.cleanupStreamHandler(h.cleanup)
   701  	}
   702  	// Case 2: Client wants to originate stream.
   703  	str := &outStream{
   704  		id:     h.streamID,
   705  		state:  empty,
   706  		itl:    &itemList{},
   707  		wq:     h.wq,
   708  		reader: mem.BufferSlice{}.Reader(),
   709  	}
   710  	return l.originateStream(str, h)
   711  }
   712  
   713  func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
   714  	// l.draining is set when handling GoAway. In which case, we want to avoid
   715  	// creating new streams.
   716  	if l.draining {
   717  		// TODO: provide a better error with the reason we are in draining.
   718  		hdr.onOrphaned(errStreamDrain)
   719  		return nil
   720  	}
   721  	if err := hdr.initStream(str.id); err != nil {
   722  		return err
   723  	}
   724  	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
   725  		return err
   726  	}
   727  	l.estdStreams[str.id] = str
   728  	return nil
   729  }
   730  
   731  func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
   732  	if onWrite != nil {
   733  		onWrite()
   734  	}
   735  	l.hBuf.Reset()
   736  	for _, f := range hf {
   737  		if err := l.hEnc.WriteField(f); err != nil {
   738  			if l.logger.V(logLevel) {
   739  				l.logger.Warningf("Encountered error while encoding headers: %v", err)
   740  			}
   741  		}
   742  	}
   743  	var (
   744  		err               error
   745  		endHeaders, first bool
   746  	)
   747  	first = true
   748  	for !endHeaders {
   749  		size := l.hBuf.Len()
   750  		if size > http2MaxFrameLen {
   751  			size = http2MaxFrameLen
   752  		} else {
   753  			endHeaders = true
   754  		}
   755  		if first {
   756  			first = false
   757  			err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
   758  				StreamID:      streamID,
   759  				BlockFragment: l.hBuf.Next(size),
   760  				EndStream:     endStream,
   761  				EndHeaders:    endHeaders,
   762  			})
   763  		} else {
   764  			err = l.framer.fr.WriteContinuation(
   765  				streamID,
   766  				endHeaders,
   767  				l.hBuf.Next(size),
   768  			)
   769  		}
   770  		if err != nil {
   771  			return err
   772  		}
   773  	}
   774  	return nil
   775  }
   776  
   777  func (l *loopyWriter) preprocessData(df *dataFrame) {
   778  	str, ok := l.estdStreams[df.streamID]
   779  	if !ok {
   780  		return
   781  	}
   782  	// If we got data for a stream it means that
   783  	// stream was originated and the headers were sent out.
   784  	str.itl.enqueue(df)
   785  	if str.state == empty {
   786  		str.state = active
   787  		l.activeStreams.enqueue(str)
   788  	}
   789  }
   790  
   791  func (l *loopyWriter) pingHandler(p *ping) error {
   792  	if !p.ack {
   793  		l.bdpEst.timesnap(p.data)
   794  	}
   795  	return l.framer.fr.WritePing(p.ack, p.data)
   796  
   797  }
   798  
   799  func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
   800  	o.resp <- l.sendQuota
   801  }
   802  
   803  func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
   804  	c.onWrite()
   805  	if str, ok := l.estdStreams[c.streamID]; ok {
   806  		// On the server side it could be a trailers-only response or
   807  		// a RST_STREAM before stream initialization thus the stream might
   808  		// not be established yet.
   809  		delete(l.estdStreams, c.streamID)
   810  		str.reader.Close()
   811  		str.deleteSelf()
   812  		for head := str.itl.dequeueAll(); head != nil; head = head.next {
   813  			if df, ok := head.it.(*dataFrame); ok {
   814  				if !df.processing {
   815  					df.data.Free()
   816  				}
   817  			}
   818  		}
   819  	}
   820  	if c.rst { // If RST_STREAM needs to be sent.
   821  		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
   822  			return err
   823  		}
   824  	}
   825  	if l.draining && len(l.estdStreams) == 0 {
   826  		// Flush and close the connection; we are done with it.
   827  		return errors.New("finished processing active streams while in draining mode")
   828  	}
   829  	return nil
   830  }
   831  
   832  func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
   833  	if l.side == clientSide {
   834  		return errors.New("earlyAbortStream not handled on client")
   835  	}
   836  	// In case the caller forgets to set the http status, default to 200.
   837  	if eas.httpStatus == 0 {
   838  		eas.httpStatus = 200
   839  	}
   840  	headerFields := []hpack.HeaderField{
   841  		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
   842  		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
   843  		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
   844  		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
   845  	}
   846  
   847  	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
   848  		return err
   849  	}
   850  	if eas.rst {
   851  		if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
   852  			return err
   853  		}
   854  	}
   855  	return nil
   856  }
   857  
   858  func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
   859  	if l.side == clientSide {
   860  		l.draining = true
   861  		if len(l.estdStreams) == 0 {
   862  			// Flush and close the connection; we are done with it.
   863  			return errors.New("received GOAWAY with no active streams")
   864  		}
   865  	}
   866  	return nil
   867  }
   868  
   869  func (l *loopyWriter) goAwayHandler(g *goAway) error {
   870  	// Handling of outgoing GoAway is very specific to side.
   871  	if l.ssGoAwayHandler != nil {
   872  		draining, err := l.ssGoAwayHandler(g)
   873  		if err != nil {
   874  			return err
   875  		}
   876  		l.draining = draining
   877  	}
   878  	return nil
   879  }
   880  
   881  func (l *loopyWriter) handle(i any) error {
   882  	switch i := i.(type) {
   883  	case *incomingWindowUpdate:
   884  		l.incomingWindowUpdateHandler(i)
   885  	case *outgoingWindowUpdate:
   886  		return l.outgoingWindowUpdateHandler(i)
   887  	case *incomingSettings:
   888  		return l.incomingSettingsHandler(i)
   889  	case *outgoingSettings:
   890  		return l.outgoingSettingsHandler(i)
   891  	case *headerFrame:
   892  		return l.headerHandler(i)
   893  	case *registerStream:
   894  		l.registerStreamHandler(i)
   895  	case *cleanupStream:
   896  		return l.cleanupStreamHandler(i)
   897  	case *earlyAbortStream:
   898  		return l.earlyAbortStreamHandler(i)
   899  	case *incomingGoAway:
   900  		return l.incomingGoAwayHandler(i)
   901  	case *dataFrame:
   902  		l.preprocessData(i)
   903  	case *ping:
   904  		return l.pingHandler(i)
   905  	case *goAway:
   906  		return l.goAwayHandler(i)
   907  	case *outFlowControlSizeRequest:
   908  		l.outFlowControlSizeRequestHandler(i)
   909  	case closeConnection:
   910  		// Just return a non-I/O error and run() will flush and close the
   911  		// connection.
   912  		return ErrConnClosing
   913  	default:
   914  		return fmt.Errorf("transport: unknown control message type %T", i)
   915  	}
   916  	return nil
   917  }
   918  
   919  func (l *loopyWriter) applySettings(ss []http2.Setting) {
   920  	for _, s := range ss {
   921  		switch s.ID {
   922  		case http2.SettingInitialWindowSize:
   923  			o := l.oiws
   924  			l.oiws = s.Val
   925  			if o < l.oiws {
   926  				// If the new limit is greater make all depleted streams active.
   927  				for _, stream := range l.estdStreams {
   928  					if stream.state == waitingOnStreamQuota {
   929  						stream.state = active
   930  						l.activeStreams.enqueue(stream)
   931  					}
   932  				}
   933  			}
   934  		case http2.SettingHeaderTableSize:
   935  			updateHeaderTblSize(l.hEnc, s.Val)
   936  		}
   937  	}
   938  }
   939  
   940  // processData removes the first stream from active streams, writes out at most 16KB
   941  // of its data and then puts it at the end of activeStreams if there's still more data
   942  // to be sent and stream has some stream-level flow control.
   943  func (l *loopyWriter) processData() (bool, error) {
   944  	if l.sendQuota == 0 {
   945  		return true, nil
   946  	}
   947  	str := l.activeStreams.dequeue() // Remove the first stream.
   948  	if str == nil {
   949  		return true, nil
   950  	}
   951  	reader := str.reader
   952  	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
   953  	if !dataItem.processing {
   954  		dataItem.processing = true
   955  		str.reader.Reset(dataItem.data)
   956  		dataItem.data.Free()
   957  	}
   958  	// A data item is represented by a dataFrame, since it later translates into
   959  	// multiple HTTP2 data frames.
   960  	// Every dataFrame has two buffers; h that keeps grpc-message header and data
   961  	// that is the actual message. As an optimization to keep wire traffic low, data
   962  	// from data is copied to h to make as big as the maximum possible HTTP2 frame
   963  	// size.
   964  
   965  	if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
   966  		// Client sends out empty data frame with endStream = true
   967  		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
   968  			return false, err
   969  		}
   970  		str.itl.dequeue() // remove the empty data item from stream
   971  		_ = reader.Close()
   972  		if str.itl.isEmpty() {
   973  			str.state = empty
   974  		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
   975  			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
   976  				return false, err
   977  			}
   978  			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
   979  				return false, err
   980  			}
   981  		} else {
   982  			l.activeStreams.enqueue(str)
   983  		}
   984  		return false, nil
   985  	}
   986  
   987  	// Figure out the maximum size we can send
   988  	maxSize := http2MaxFrameLen
   989  	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
   990  		str.state = waitingOnStreamQuota
   991  		return false, nil
   992  	} else if maxSize > strQuota {
   993  		maxSize = strQuota
   994  	}
   995  	if maxSize > int(l.sendQuota) { // connection-level flow control.
   996  		maxSize = int(l.sendQuota)
   997  	}
   998  	// Compute how much of the header and data we can send within quota and max frame length
   999  	hSize := min(maxSize, len(dataItem.h))
  1000  	dSize := min(maxSize-hSize, reader.Remaining())
  1001  	remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
  1002  	size := hSize + dSize
  1003  
  1004  	var buf *[]byte
  1005  
  1006  	if hSize != 0 && dSize == 0 {
  1007  		buf = &dataItem.h
  1008  	} else {
  1009  		// Note: this is only necessary because the http2.Framer does not support
  1010  		// partially writing a frame, so the sequence must be materialized into a buffer.
  1011  		// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
  1012  		pool := l.bufferPool
  1013  		if pool == nil {
  1014  			// Note that this is only supposed to be nil in tests. Otherwise, stream is
  1015  			// always initialized with a BufferPool.
  1016  			pool = mem.DefaultBufferPool()
  1017  		}
  1018  		buf = pool.Get(size)
  1019  		defer pool.Put(buf)
  1020  
  1021  		copy((*buf)[:hSize], dataItem.h)
  1022  		_, _ = reader.Read((*buf)[hSize:])
  1023  	}
  1024  
  1025  	// Now that outgoing flow controls are checked we can replenish str's write quota
  1026  	str.wq.replenish(size)
  1027  	var endStream bool
  1028  	// If this is the last data message on this stream and all of it can be written in this iteration.
  1029  	if dataItem.endStream && remainingBytes == 0 {
  1030  		endStream = true
  1031  	}
  1032  	if dataItem.onEachWrite != nil {
  1033  		dataItem.onEachWrite()
  1034  	}
  1035  	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
  1036  		return false, err
  1037  	}
  1038  	str.bytesOutStanding += size
  1039  	l.sendQuota -= uint32(size)
  1040  	dataItem.h = dataItem.h[hSize:]
  1041  
  1042  	if remainingBytes == 0 { // All the data from that message was written out.
  1043  		_ = reader.Close()
  1044  		str.itl.dequeue()
  1045  	}
  1046  	if str.itl.isEmpty() {
  1047  		str.state = empty
  1048  	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
  1049  		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
  1050  			return false, err
  1051  		}
  1052  		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
  1053  			return false, err
  1054  		}
  1055  	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
  1056  		str.state = waitingOnStreamQuota
  1057  	} else { // Otherwise add it back to the list of active streams.
  1058  		l.activeStreams.enqueue(str)
  1059  	}
  1060  	return false, nil
  1061  }