github.com/cloudwego/kitex@v0.9.0/pkg/remote/trans/nphttp2/grpc/controlbuf.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   * This file may have been modified by CloudWeGo authors. All CloudWeGo
    18   * Modifications are Copyright 2021 CloudWeGo Authors.
    19   */
    20  
    21  package grpc
    22  
    23  import (
    24  	"bytes"
    25  	"fmt"
    26  	"runtime"
    27  	"sync"
    28  	"sync/atomic"
    29  
    30  	"github.com/bytedance/gopkg/lang/mcache"
    31  	"golang.org/x/net/http2"
    32  	"golang.org/x/net/http2/hpack"
    33  
    34  	"github.com/cloudwego/kitex/pkg/klog"
    35  )
    36  
    37  var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
    38  	e.SetMaxDynamicTableSizeLimit(v)
    39  }
    40  
    41  type itemNode struct {
    42  	it   interface{}
    43  	next *itemNode
    44  }
    45  
    46  type itemList struct {
    47  	head *itemNode
    48  	tail *itemNode
    49  }
    50  
    51  func (il *itemList) enqueue(i interface{}) {
    52  	n := &itemNode{it: i}
    53  	if il.tail == nil {
    54  		il.head, il.tail = n, n
    55  		return
    56  	}
    57  	il.tail.next = n
    58  	il.tail = n
    59  }
    60  
    61  // peek returns the first item in the list without removing it from the
    62  // list.
    63  func (il *itemList) peek() interface{} {
    64  	return il.head.it
    65  }
    66  
    67  func (il *itemList) dequeue() interface{} {
    68  	if il.head == nil {
    69  		return nil
    70  	}
    71  	i := il.head.it
    72  	il.head = il.head.next
    73  	if il.head == nil {
    74  		il.tail = nil
    75  	}
    76  	return i
    77  }
    78  
    79  func (il *itemList) dequeueAll() *itemNode {
    80  	h := il.head
    81  	il.head, il.tail = nil, nil
    82  	return h
    83  }
    84  
    85  func (il *itemList) isEmpty() bool {
    86  	return il.head == nil
    87  }
    88  
    89  // The following defines various control items which could flow through
    90  // the control buffer of transport. They represent different aspects of
    91  // control tasks, e.g., flow control, settings, streaming resetting, etc.
    92  
    93  // maxQueuedTransportResponseFrames is the most queued "transport response"
    94  // frames we will buffer before preventing new reads from occurring on the
    95  // transport.  These are control frames sent in response to client requests,
    96  // such as RST_STREAM due to bad headers or settings acks.
    97  const maxQueuedTransportResponseFrames = 50
    98  
    99  type cbItem interface {
   100  	isTransportResponseFrame() bool
   101  }
   102  
   103  // registerStream is used to register an incoming stream with loopy writer.
   104  type registerStream struct {
   105  	streamID uint32
   106  	wq       *writeQuota
   107  }
   108  
   109  func (*registerStream) isTransportResponseFrame() bool { return false }
   110  
   111  // headerFrame is also used to register stream on the client-side.
   112  type headerFrame struct {
   113  	streamID   uint32
   114  	hf         []hpack.HeaderField
   115  	endStream  bool               // Valid on server side.
   116  	initStream func(uint32) error // Used only on the client side.
   117  	onWrite    func()
   118  	wq         *writeQuota    // write quota for the stream created.
   119  	cleanup    *cleanupStream // Valid on the server side.
   120  	onOrphaned func(error)    // Valid on client-side
   121  }
   122  
   123  func (h *headerFrame) isTransportResponseFrame() bool {
   124  	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
   125  }
   126  
   127  type cleanupStream struct {
   128  	streamID uint32
   129  	rst      bool
   130  	rstCode  http2.ErrCode
   131  	onWrite  func()
   132  }
   133  
   134  func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
   135  
   136  type dataFrame struct {
   137  	streamID  uint32
   138  	endStream bool
   139  	// h is optional, d is required.
   140  	// you can assign the header to h and the payload to the d;
   141  	// or just assign the header + payload together to the d.
   142  	// In other words, h = nil means d = header + payload.
   143  	h      []byte
   144  	d      []byte
   145  	dcache []byte // dcache is the origin d created by mcache, this ptr is only used for kitex
   146  	// onEachWrite is called every time
   147  	// a part of d is written out.
   148  	onEachWrite func()
   149  }
   150  
   151  func (*dataFrame) isTransportResponseFrame() bool { return false }
   152  
   153  type incomingWindowUpdate struct {
   154  	streamID  uint32
   155  	increment uint32
   156  }
   157  
   158  func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
   159  
   160  type outgoingWindowUpdate struct {
   161  	streamID  uint32
   162  	increment uint32
   163  }
   164  
   165  func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
   166  	return false // window updates are throttled by thresholds
   167  }
   168  
   169  type incomingSettings struct {
   170  	ss []http2.Setting
   171  }
   172  
   173  func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
   174  
   175  type outgoingSettings struct {
   176  	ss []http2.Setting
   177  }
   178  
   179  func (*outgoingSettings) isTransportResponseFrame() bool { return false }
   180  
   181  type incomingGoAway struct{}
   182  
   183  func (*incomingGoAway) isTransportResponseFrame() bool { return false }
   184  
   185  type goAway struct {
   186  	code      http2.ErrCode
   187  	debugData []byte
   188  	headsUp   bool
   189  	closeConn bool
   190  }
   191  
   192  func (*goAway) isTransportResponseFrame() bool { return false }
   193  
   194  type ping struct {
   195  	ack  bool
   196  	data [8]byte
   197  }
   198  
   199  func (*ping) isTransportResponseFrame() bool { return true }
   200  
   201  type outFlowControlSizeRequest struct {
   202  	resp chan uint32
   203  }
   204  
   205  func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
   206  
   207  type outStreamState int
   208  
   209  const (
   210  	active outStreamState = iota
   211  	empty
   212  	waitingOnStreamQuota
   213  )
   214  
   215  type outStream struct {
   216  	id               uint32
   217  	state            outStreamState
   218  	itl              *itemList
   219  	bytesOutStanding int
   220  	wq               *writeQuota
   221  
   222  	next *outStream
   223  	prev *outStream
   224  }
   225  
   226  func (s *outStream) deleteSelf() {
   227  	if s.prev != nil {
   228  		s.prev.next = s.next
   229  	}
   230  	if s.next != nil {
   231  		s.next.prev = s.prev
   232  	}
   233  	s.next, s.prev = nil, nil
   234  }
   235  
   236  type outStreamList struct {
   237  	// Following are sentinel objects that mark the
   238  	// beginning and end of the list. They do not
   239  	// contain any item lists. All valid objects are
   240  	// inserted in between them.
   241  	// This is needed so that an outStream object can
   242  	// deleteSelf() in O(1) time without knowing which
   243  	// list it belongs to.
   244  	head *outStream
   245  	tail *outStream
   246  }
   247  
   248  func newOutStreamList() *outStreamList {
   249  	head, tail := new(outStream), new(outStream)
   250  	head.next = tail
   251  	tail.prev = head
   252  	return &outStreamList{
   253  		head: head,
   254  		tail: tail,
   255  	}
   256  }
   257  
   258  func (l *outStreamList) enqueue(s *outStream) {
   259  	e := l.tail.prev
   260  	e.next = s
   261  	s.prev = e
   262  	s.next = l.tail
   263  	l.tail.prev = s
   264  }
   265  
   266  // remove from the beginning of the list.
   267  func (l *outStreamList) dequeue() *outStream {
   268  	b := l.head.next
   269  	if b == l.tail {
   270  		return nil
   271  	}
   272  	b.deleteSelf()
   273  	return b
   274  }
   275  
   276  // controlBuffer is a way to pass information to loopy.
   277  // Information is passed as specific struct types called control frames.
   278  // A control frame not only represents data, messages or headers to be sent out
   279  // but can also be used to instruct loopy to update its internal state.
   280  // It shouldn't be confused with an HTTP2 frame, although some of the control frames
   281  // like dataFrame and headerFrame do go out on wire as HTTP2 frames.
   282  type controlBuffer struct {
   283  	ch              chan struct{}
   284  	done            <-chan struct{}
   285  	mu              sync.Mutex
   286  	consumerWaiting bool
   287  	list            *itemList
   288  	err             error
   289  
   290  	// transportResponseFrames counts the number of queued items that represent
   291  	// the response of an action initiated by the peer.  trfChan is created
   292  	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
   293  	// closed and nilled when transportResponseFrames drops below the
   294  	// threshold.  Both fields are protected by mu.
   295  	transportResponseFrames int
   296  	trfChan                 atomic.Value // *chan struct{}
   297  }
   298  
   299  func newControlBuffer(done <-chan struct{}) *controlBuffer {
   300  	return &controlBuffer{
   301  		ch:   make(chan struct{}, 1),
   302  		list: &itemList{},
   303  		done: done,
   304  	}
   305  }
   306  
   307  // throttle blocks if there are too many incomingSettings/cleanupStreams in the
   308  // controlbuf.
   309  func (c *controlBuffer) throttle() {
   310  	ch, _ := c.trfChan.Load().(*chan struct{})
   311  	if ch != nil {
   312  		select {
   313  		case <-*ch:
   314  		case <-c.done:
   315  		}
   316  	}
   317  }
   318  
   319  func (c *controlBuffer) put(it cbItem) error {
   320  	_, err := c.executeAndPut(nil, it)
   321  	return err
   322  }
   323  
   324  func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
   325  	var wakeUp bool
   326  	c.mu.Lock()
   327  	if c.err != nil {
   328  		c.mu.Unlock()
   329  		return false, c.err
   330  	}
   331  	if f != nil {
   332  		if !f(it) { // f wasn't successful
   333  			c.mu.Unlock()
   334  			return false, nil
   335  		}
   336  	}
   337  	if c.consumerWaiting {
   338  		wakeUp = true
   339  		c.consumerWaiting = false
   340  	}
   341  	c.list.enqueue(it)
   342  	if it.isTransportResponseFrame() {
   343  		c.transportResponseFrames++
   344  		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
   345  			// We are adding the frame that puts us over the threshold; create
   346  			// a throttling channel.
   347  			ch := make(chan struct{})
   348  			c.trfChan.Store(&ch)
   349  		}
   350  	}
   351  	c.mu.Unlock()
   352  	if wakeUp {
   353  		select {
   354  		case c.ch <- struct{}{}:
   355  		default:
   356  		}
   357  	}
   358  	return true, nil
   359  }
   360  
   361  // Note argument f should never be nil.
   362  func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
   363  	c.mu.Lock()
   364  	if c.err != nil {
   365  		c.mu.Unlock()
   366  		return false, c.err
   367  	}
   368  	if !f(it) { // f wasn't successful
   369  		c.mu.Unlock()
   370  		return false, nil
   371  	}
   372  	c.mu.Unlock()
   373  	return true, nil
   374  }
   375  
   376  func (c *controlBuffer) get(block bool) (interface{}, error) {
   377  	for {
   378  		c.mu.Lock()
   379  		if c.err != nil {
   380  			c.mu.Unlock()
   381  			return nil, c.err
   382  		}
   383  		if !c.list.isEmpty() {
   384  			h := c.list.dequeue().(cbItem)
   385  			if h.isTransportResponseFrame() {
   386  				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
   387  					// We are removing the frame that put us over the
   388  					// threshold; close and clear the throttling channel.
   389  					ch := c.trfChan.Load().(*chan struct{})
   390  					close(*ch)
   391  					c.trfChan.Store((*chan struct{})(nil))
   392  				}
   393  				c.transportResponseFrames--
   394  			}
   395  			c.mu.Unlock()
   396  			return h, nil
   397  		}
   398  		if !block {
   399  			c.mu.Unlock()
   400  			return nil, nil
   401  		}
   402  		c.consumerWaiting = true
   403  		c.mu.Unlock()
   404  		select {
   405  		case <-c.ch:
   406  		case <-c.done:
   407  			c.finish()
   408  			return nil, ErrConnClosing
   409  		}
   410  	}
   411  }
   412  
   413  func (c *controlBuffer) finish() {
   414  	c.mu.Lock()
   415  	if c.err != nil {
   416  		c.mu.Unlock()
   417  		return
   418  	}
   419  	c.err = ErrConnClosing
   420  	// There may be headers for streams in the control buffer.
   421  	// These streams need to be cleaned out since the transport
   422  	// is still not aware of these yet.
   423  	for head := c.list.dequeueAll(); head != nil; head = head.next {
   424  		hdr, ok := head.it.(*headerFrame)
   425  		if !ok {
   426  			continue
   427  		}
   428  		if hdr.onOrphaned != nil { // It will be nil on the server-side.
   429  			hdr.onOrphaned(ErrConnClosing)
   430  		}
   431  	}
   432  	c.mu.Unlock()
   433  }
   434  
   435  type side int
   436  
   437  const (
   438  	clientSide side = iota
   439  	serverSide
   440  )
   441  
   442  // Loopy receives frames from the control buffer.
   443  // Each frame is handled individually; most of the work done by loopy goes
   444  // into handling data frames. Loopy maintains a queue of active streams, and each
   445  // stream maintains a queue of data frames; as loopy receives data frames
   446  // it gets added to the queue of the relevant stream.
   447  // Loopy goes over this list of active streams by processing one node every iteration,
   448  // thereby closely resemebling to a round-robin scheduling over all streams. While
   449  // processing a stream, loopy writes out data bytes from this stream capped by the min
   450  // of http2MaxFrameLen, connection-level flow control and stream-level flow control.
   451  type loopyWriter struct {
   452  	side      side
   453  	cbuf      *controlBuffer
   454  	sendQuota uint32
   455  	oiws      uint32 // outbound initial window size.
   456  	// estdStreams is map of all established streams that are not cleaned-up yet.
   457  	// On client-side, this is all streams whose headers were sent out.
   458  	// On server-side, this is all streams whose headers were received.
   459  	estdStreams map[uint32]*outStream // Established streams.
   460  	// activeStreams is a linked-list of all streams that have data to send and some
   461  	// stream-level flow control quota.
   462  	// Each of these streams internally have a list of data items(and perhaps trailers
   463  	// on the server-side) to be sent out.
   464  	activeStreams *outStreamList
   465  	framer        *framer
   466  	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
   467  	hEnc          *hpack.Encoder // HPACK encoder.
   468  	bdpEst        *bdpEstimator
   469  	draining      bool
   470  
   471  	// Side-specific handlers
   472  	ssGoAwayHandler func(*goAway) (bool, error)
   473  }
   474  
   475  func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
   476  	var buf bytes.Buffer
   477  	l := &loopyWriter{
   478  		side:          s,
   479  		cbuf:          cbuf,
   480  		sendQuota:     defaultWindowSize,
   481  		oiws:          defaultWindowSize,
   482  		estdStreams:   make(map[uint32]*outStream),
   483  		activeStreams: newOutStreamList(),
   484  		framer:        fr,
   485  		hBuf:          &buf,
   486  		hEnc:          hpack.NewEncoder(&buf),
   487  		bdpEst:        bdpEst,
   488  	}
   489  	return l
   490  }
   491  
   492  const minBatchSize = 1000
   493  
   494  // run should be run in a separate goroutine.
   495  // It reads control frames from controlBuf and processes them by:
   496  // 1. Updating loopy's internal state, or/and
   497  // 2. Writing out HTTP2 frames on the wire.
   498  //
   499  // Loopy keeps all active streams with data to send in a linked-list.
   500  // All streams in the activeStreams linked-list must have both:
   501  // 1. Data to send, and
   502  // 2. Stream level flow control quota available.
   503  //
   504  // In each iteration of run loop, other than processing the incoming control
   505  // frame, loopy calls processData, which processes one node from the activeStreams linked-list.
   506  // This results in writing of HTTP2 frames into an underlying write buffer.
   507  // When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
   508  // As an optimization, to increase the batch size for each flush, loopy yields the processor, once
   509  // if the batch size is too low to give stream goroutines a chance to fill it up.
   510  func (l *loopyWriter) run(remoteAddr string) (err error) {
   511  	defer func() {
   512  		if err == ErrConnClosing {
   513  			// Don't log ErrConnClosing as error since it happens
   514  			// 1. When the connection is closed by some other known issue.
   515  			// 2. User closed the connection.
   516  			// 3. A graceful close of connection.
   517  			klog.Debugf("KITEX: grpc transport loopyWriter.run returning, error=%v, remoteAddr=%s", err, remoteAddr)
   518  			err = nil
   519  		}
   520  	}()
   521  	for {
   522  		it, err := l.cbuf.get(true)
   523  		if err != nil {
   524  			return err
   525  		}
   526  		if err = l.handle(it); err != nil {
   527  			return err
   528  		}
   529  		if _, err = l.processData(); err != nil {
   530  			return err
   531  		}
   532  		gosched := true
   533  	hasdata:
   534  		for {
   535  			it, err := l.cbuf.get(false)
   536  			if err != nil {
   537  				return err
   538  			}
   539  			if it != nil {
   540  				if err = l.handle(it); err != nil {
   541  					return err
   542  				}
   543  				if _, err = l.processData(); err != nil {
   544  					return err
   545  				}
   546  				continue hasdata
   547  			}
   548  			isEmpty, err := l.processData()
   549  			if err != nil {
   550  				return err
   551  			}
   552  			if !isEmpty {
   553  				continue hasdata
   554  			}
   555  			if gosched {
   556  				gosched = false
   557  				if l.framer.writer.offset < minBatchSize {
   558  					runtime.Gosched()
   559  					continue hasdata
   560  				}
   561  			}
   562  			l.framer.writer.Flush()
   563  			break hasdata
   564  		}
   565  	}
   566  }
   567  
   568  func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
   569  	return l.framer.WriteWindowUpdate(w.streamID, w.increment)
   570  }
   571  
   572  func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
   573  	// Otherwise update the quota.
   574  	if w.streamID == 0 {
   575  		l.sendQuota += w.increment
   576  		return nil
   577  	}
   578  	// Find the stream and update it.
   579  	if str, ok := l.estdStreams[w.streamID]; ok {
   580  		str.bytesOutStanding -= int(w.increment)
   581  		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
   582  			str.state = active
   583  			l.activeStreams.enqueue(str)
   584  			return nil
   585  		}
   586  	}
   587  	return nil
   588  }
   589  
   590  func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
   591  	return l.framer.WriteSettings(s.ss...)
   592  }
   593  
   594  func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
   595  	if err := l.applySettings(s.ss); err != nil {
   596  		return err
   597  	}
   598  	return l.framer.WriteSettingsAck()
   599  }
   600  
   601  func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
   602  	str := &outStream{
   603  		id:    h.streamID,
   604  		state: empty,
   605  		itl:   &itemList{},
   606  		wq:    h.wq,
   607  	}
   608  	l.estdStreams[h.streamID] = str
   609  	return nil
   610  }
   611  
   612  func (l *loopyWriter) headerHandler(h *headerFrame) error {
   613  	if l.side == serverSide {
   614  		str, ok := l.estdStreams[h.streamID]
   615  		if !ok {
   616  			klog.Warnf("transport: loopy doesn't recognize the stream: %d", h.streamID)
   617  			return nil
   618  		}
   619  		// Case 1.A: Server is responding back with headers.
   620  		if !h.endStream {
   621  			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
   622  		}
   623  		// else:  Case 1.B: Server wants to close stream.
   624  
   625  		if str.state != empty { // either active or waiting on stream quota.
   626  			// add it str's list of items.
   627  			str.itl.enqueue(h)
   628  			return nil
   629  		}
   630  		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
   631  			return err
   632  		}
   633  		return l.cleanupStreamHandler(h.cleanup)
   634  	}
   635  	// Case 2: Client wants to originate stream.
   636  	str := &outStream{
   637  		id:    h.streamID,
   638  		state: empty,
   639  		itl:   &itemList{},
   640  		wq:    h.wq,
   641  	}
   642  	str.itl.enqueue(h)
   643  	return l.originateStream(str)
   644  }
   645  
   646  func (l *loopyWriter) originateStream(str *outStream) error {
   647  	hdr := str.itl.dequeue().(*headerFrame)
   648  	if err := hdr.initStream(str.id); err != nil {
   649  		if err == ErrConnClosing {
   650  			return err
   651  		}
   652  		// Other errors(errStreamDrain) need not close transport.
   653  		return nil
   654  	}
   655  	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
   656  		return err
   657  	}
   658  	l.estdStreams[str.id] = str
   659  	return nil
   660  }
   661  
   662  func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
   663  	if onWrite != nil {
   664  		onWrite()
   665  	}
   666  	l.hBuf.Reset()
   667  	for _, f := range hf {
   668  		if err := l.hEnc.WriteField(f); err != nil {
   669  			klog.Warnf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
   670  		}
   671  	}
   672  	var (
   673  		err               error
   674  		endHeaders, first bool
   675  	)
   676  	first = true
   677  	for !endHeaders {
   678  		size := l.hBuf.Len()
   679  		if size > http2MaxFrameLen {
   680  			size = http2MaxFrameLen
   681  		} else {
   682  			endHeaders = true
   683  		}
   684  		if first {
   685  			first = false
   686  			err = l.framer.WriteHeaders(http2.HeadersFrameParam{
   687  				StreamID:      streamID,
   688  				BlockFragment: l.hBuf.Next(size),
   689  				EndStream:     endStream,
   690  				EndHeaders:    endHeaders,
   691  			})
   692  		} else {
   693  			err = l.framer.WriteContinuation(
   694  				streamID,
   695  				endHeaders,
   696  				l.hBuf.Next(size),
   697  			)
   698  		}
   699  		if err != nil {
   700  			return err
   701  		}
   702  	}
   703  	return nil
   704  }
   705  
   706  func (l *loopyWriter) preprocessData(df *dataFrame) error {
   707  	str, ok := l.estdStreams[df.streamID]
   708  	if !ok {
   709  		return nil
   710  	}
   711  	// If we got data for a stream it means that
   712  	// stream was originated and the headers were sent out.
   713  	str.itl.enqueue(df)
   714  	if str.state == empty {
   715  		str.state = active
   716  		l.activeStreams.enqueue(str)
   717  	}
   718  	return nil
   719  }
   720  
   721  func (l *loopyWriter) pingHandler(p *ping) error {
   722  	if !p.ack {
   723  		l.bdpEst.timesnap(p.data)
   724  	}
   725  	return l.framer.WritePing(p.ack, p.data)
   726  }
   727  
   728  func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
   729  	o.resp <- l.sendQuota
   730  	return nil
   731  }
   732  
   733  func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
   734  	c.onWrite()
   735  	if str, ok := l.estdStreams[c.streamID]; ok {
   736  		// On the server side it could be a trailers-only response or
   737  		// a RST_STREAM before stream initialization thus the stream might
   738  		// not be established yet.
   739  		delete(l.estdStreams, c.streamID)
   740  		str.deleteSelf()
   741  	}
   742  	if c.rst { // If RST_STREAM needs to be sent.
   743  		if err := l.framer.WriteRSTStream(c.streamID, c.rstCode); err != nil {
   744  			return err
   745  		}
   746  	}
   747  	if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
   748  		return ErrConnClosing
   749  	}
   750  	return nil
   751  }
   752  
   753  func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
   754  	if l.side == clientSide {
   755  		l.draining = true
   756  		if len(l.estdStreams) == 0 {
   757  			return ErrConnClosing
   758  		}
   759  	}
   760  	return nil
   761  }
   762  
   763  func (l *loopyWriter) goAwayHandler(g *goAway) error {
   764  	// Handling of outgoing GoAway is very specific to side.
   765  	if l.ssGoAwayHandler != nil {
   766  		draining, err := l.ssGoAwayHandler(g)
   767  		if err != nil {
   768  			return err
   769  		}
   770  		l.draining = draining
   771  	}
   772  	return nil
   773  }
   774  
   775  func (l *loopyWriter) handle(i interface{}) error {
   776  	switch i := i.(type) {
   777  	case *incomingWindowUpdate:
   778  		return l.incomingWindowUpdateHandler(i)
   779  	case *outgoingWindowUpdate:
   780  		return l.outgoingWindowUpdateHandler(i)
   781  	case *incomingSettings:
   782  		return l.incomingSettingsHandler(i)
   783  	case *outgoingSettings:
   784  		return l.outgoingSettingsHandler(i)
   785  	case *headerFrame:
   786  		return l.headerHandler(i)
   787  	case *registerStream:
   788  		return l.registerStreamHandler(i)
   789  	case *cleanupStream:
   790  		return l.cleanupStreamHandler(i)
   791  	case *incomingGoAway:
   792  		return l.incomingGoAwayHandler(i)
   793  	case *dataFrame:
   794  		return l.preprocessData(i)
   795  	case *ping:
   796  		return l.pingHandler(i)
   797  	case *goAway:
   798  		return l.goAwayHandler(i)
   799  	case *outFlowControlSizeRequest:
   800  		return l.outFlowControlSizeRequestHandler(i)
   801  	default:
   802  		return fmt.Errorf("transport: unknown control message type %T", i)
   803  	}
   804  }
   805  
   806  func (l *loopyWriter) applySettings(ss []http2.Setting) error {
   807  	for _, s := range ss {
   808  		switch s.ID {
   809  		case http2.SettingInitialWindowSize:
   810  			o := l.oiws
   811  			l.oiws = s.Val
   812  			if o < l.oiws {
   813  				// If the new limit is greater make all depleted streams active.
   814  				for _, stream := range l.estdStreams {
   815  					if stream.state == waitingOnStreamQuota {
   816  						stream.state = active
   817  						l.activeStreams.enqueue(stream)
   818  					}
   819  				}
   820  			}
   821  		case http2.SettingHeaderTableSize:
   822  			updateHeaderTblSize(l.hEnc, s.Val)
   823  		}
   824  	}
   825  	return nil
   826  }
   827  
   828  // processData removes the first stream from active streams, writes out at most 16KB
   829  // of its data and then puts it at the end of activeStreams if there's still more data
   830  // to be sent and stream has some stream-level flow control.
   831  func (l *loopyWriter) processData() (bool, error) {
   832  	if l.sendQuota == 0 {
   833  		return true, nil
   834  	}
   835  	str := l.activeStreams.dequeue() // Remove the first stream.
   836  	if str == nil {
   837  		return true, nil
   838  	}
   839  	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
   840  	// A data item is represented by a dataFrame, since it later translates into
   841  	// multiple HTTP2 data frames.
   842  	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
   843  	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
   844  	// maximum possible HTTP2 frame size.
   845  
   846  	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
   847  		// Client sends out empty data frame with endStream = true
   848  		if err := l.framer.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
   849  			return false, err
   850  		}
   851  		str.itl.dequeue() // remove the empty data item from stream
   852  		if str.itl.isEmpty() {
   853  			str.state = empty
   854  		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
   855  			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
   856  				return false, err
   857  			}
   858  			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
   859  				return false, nil
   860  			}
   861  		} else {
   862  			l.activeStreams.enqueue(str)
   863  		}
   864  		return false, nil
   865  	}
   866  	var buf []byte
   867  	// Figure out the maximum size we can send
   868  	maxSize := http2MaxFrameLen
   869  	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
   870  		str.state = waitingOnStreamQuota
   871  		return false, nil
   872  	} else if maxSize > strQuota {
   873  		maxSize = strQuota
   874  	}
   875  	if maxSize > int(l.sendQuota) { // connection-level flow control.
   876  		maxSize = int(l.sendQuota)
   877  	}
   878  	// Compute how much of the header and data we can send within quota and max frame length
   879  	hSize := min(maxSize, len(dataItem.h))
   880  	dSize := min(maxSize-hSize, len(dataItem.d))
   881  	if hSize != 0 {
   882  		if dSize == 0 {
   883  			buf = dataItem.h
   884  		} else {
   885  			// We can add some data to grpc message header to distribute bytes more equally across frames.
   886  			// Copy on the stack to avoid generating garbage
   887  			var localBuf [http2MaxFrameLen]byte
   888  			copy(localBuf[:hSize], dataItem.h)
   889  			copy(localBuf[hSize:], dataItem.d[:dSize])
   890  			buf = localBuf[:hSize+dSize]
   891  		}
   892  	} else {
   893  		buf = dataItem.d
   894  	}
   895  
   896  	size := hSize + dSize
   897  
   898  	// Now that outgoing flow controls are checked we can replenish str's write quota
   899  	str.wq.replenish(size)
   900  	var endStream bool
   901  	// If this is the last data message on this stream and all of it can be written in this iteration.
   902  	if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
   903  		endStream = true
   904  	}
   905  	if dataItem.onEachWrite != nil {
   906  		dataItem.onEachWrite()
   907  	}
   908  	if err := l.framer.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
   909  		return false, err
   910  	}
   911  	str.bytesOutStanding += size
   912  	l.sendQuota -= uint32(size)
   913  	dataItem.h = dataItem.h[hSize:]
   914  	dataItem.d = dataItem.d[dSize:]
   915  
   916  	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
   917  		if len(dataItem.dcache) > 0 {
   918  			mcache.Free(dataItem.dcache)
   919  		}
   920  		str.itl.dequeue()
   921  	}
   922  	if str.itl.isEmpty() {
   923  		str.state = empty
   924  	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
   925  		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
   926  			return false, err
   927  		}
   928  		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
   929  			return false, err
   930  		}
   931  	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
   932  		str.state = waitingOnStreamQuota
   933  	} else { // Otherwise add it back to the list of active streams.
   934  		l.activeStreams.enqueue(str)
   935  	}
   936  	return false, nil
   937  }
   938  
   939  func min(a, b int) int {
   940  	if a < b {
   941  		return a
   942  	}
   943  	return b
   944  }