golang.org/x/net@v0.25.1-0.20240516223405-c87a5b62e243/http2/server.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO: turn off the serve goroutine when idle, so
     6  // an idle conn only has the readFrames goroutine active. (which could
     7  // also be optimized probably to pin less memory in crypto/tls). This
     8  // would involve tracking when the serve goroutine is active (atomic
     9  // int32 read/CAS probably?) and starting it up when frames arrive,
    10  // and shutting it down when all handlers exit. the occasional PING
    11  // packets could use time.AfterFunc to call sc.wakeStartServeLoop()
    12  // (which is a no-op if already running) and then queue the PING write
    13  // as normal. The serve loop would then exit in most cases (if no
    14  // Handlers running) and not be woken up again until the PING packet
    15  // returns.
    16  
    17  // TODO (maybe): add a mechanism for Handlers to going into
    18  // half-closed-local mode (rw.(io.Closer) test?) but not exit their
    19  // handler, and continue to be able to read from the
    20  // Request.Body. This would be a somewhat semantic change from HTTP/1
    21  // (or at least what we expose in net/http), so I'd probably want to
    22  // add it there too. For now, this package says that returning from
    23  // the Handler ServeHTTP function means you're both done reading and
    24  // done writing, without a way to stop just one or the other.
    25  
    26  package http2
    27  
    28  import (
    29  	"bufio"
    30  	"bytes"
    31  	"context"
    32  	"crypto/tls"
    33  	"errors"
    34  	"fmt"
    35  	"io"
    36  	"log"
    37  	"math"
    38  	"net"
    39  	"net/http"
    40  	"net/textproto"
    41  	"net/url"
    42  	"os"
    43  	"reflect"
    44  	"runtime"
    45  	"strconv"
    46  	"strings"
    47  	"sync"
    48  	"time"
    49  
    50  	"golang.org/x/net/http/httpguts"
    51  	"golang.org/x/net/http2/hpack"
    52  )
    53  
    54  const (
    55  	prefaceTimeout         = 10 * time.Second
    56  	firstSettingsTimeout   = 2 * time.Second // should be in-flight with preface anyway
    57  	handlerChunkWriteSize  = 4 << 10
    58  	defaultMaxStreams      = 250 // TODO: make this 100 as the GFE seems to?
    59  	maxQueuedControlFrames = 10000
    60  )
    61  
    62  var (
    63  	errClientDisconnected = errors.New("client disconnected")
    64  	errClosedBody         = errors.New("body closed by handler")
    65  	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
    66  	errStreamClosed       = errors.New("http2: stream closed")
    67  )
    68  
    69  var responseWriterStatePool = sync.Pool{
    70  	New: func() interface{} {
    71  		rws := &responseWriterState{}
    72  		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
    73  		return rws
    74  	},
    75  }
    76  
    77  // Test hooks.
    78  var (
    79  	testHookOnConn        func()
    80  	testHookGetServerConn func(*serverConn)
    81  	testHookOnPanicMu     *sync.Mutex // nil except in tests
    82  	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
    83  )
    84  
    85  // Server is an HTTP/2 server.
    86  type Server struct {
    87  	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
    88  	// which may run at a time over all connections.
    89  	// Negative or zero no limit.
    90  	// TODO: implement
    91  	MaxHandlers int
    92  
    93  	// MaxConcurrentStreams optionally specifies the number of
    94  	// concurrent streams that each client may have open at a
    95  	// time. This is unrelated to the number of http.Handler goroutines
    96  	// which may be active globally, which is MaxHandlers.
    97  	// If zero, MaxConcurrentStreams defaults to at least 100, per
    98  	// the HTTP/2 spec's recommendations.
    99  	MaxConcurrentStreams uint32
   100  
   101  	// MaxDecoderHeaderTableSize optionally specifies the http2
   102  	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
   103  	// informs the remote endpoint of the maximum size of the header compression
   104  	// table used to decode header blocks, in octets. If zero, the default value
   105  	// of 4096 is used.
   106  	MaxDecoderHeaderTableSize uint32
   107  
   108  	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
   109  	// header compression table used for encoding request headers. Received
   110  	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
   111  	// the default value of 4096 is used.
   112  	MaxEncoderHeaderTableSize uint32
   113  
   114  	// MaxReadFrameSize optionally specifies the largest frame
   115  	// this server is willing to read. A valid value is between
   116  	// 16k and 16M, inclusive. If zero or otherwise invalid, a
   117  	// default value is used.
   118  	MaxReadFrameSize uint32
   119  
   120  	// PermitProhibitedCipherSuites, if true, permits the use of
   121  	// cipher suites prohibited by the HTTP/2 spec.
   122  	PermitProhibitedCipherSuites bool
   123  
   124  	// IdleTimeout specifies how long until idle clients should be
   125  	// closed with a GOAWAY frame. PING frames are not considered
   126  	// activity for the purposes of IdleTimeout.
   127  	// If zero or negative, there is no timeout.
   128  	IdleTimeout time.Duration
   129  
   130  	// MaxUploadBufferPerConnection is the size of the initial flow
   131  	// control window for each connections. The HTTP/2 spec does not
   132  	// allow this to be smaller than 65535 or larger than 2^32-1.
   133  	// If the value is outside this range, a default value will be
   134  	// used instead.
   135  	MaxUploadBufferPerConnection int32
   136  
   137  	// MaxUploadBufferPerStream is the size of the initial flow control
   138  	// window for each stream. The HTTP/2 spec does not allow this to
   139  	// be larger than 2^32-1. If the value is zero or larger than the
   140  	// maximum, a default value will be used instead.
   141  	MaxUploadBufferPerStream int32
   142  
   143  	// NewWriteScheduler constructs a write scheduler for a connection.
   144  	// If nil, a default scheduler is chosen.
   145  	NewWriteScheduler func() WriteScheduler
   146  
   147  	// CountError, if non-nil, is called on HTTP/2 server errors.
   148  	// It's intended to increment a metric for monitoring, such
   149  	// as an expvar or Prometheus metric.
   150  	// The errType consists of only ASCII word characters.
   151  	CountError func(errType string)
   152  
   153  	// Internal state. This is a pointer (rather than embedded directly)
   154  	// so that we don't embed a Mutex in this struct, which will make the
   155  	// struct non-copyable, which might break some callers.
   156  	state *serverInternalState
   157  }
   158  
   159  func (s *Server) initialConnRecvWindowSize() int32 {
   160  	if s.MaxUploadBufferPerConnection >= initialWindowSize {
   161  		return s.MaxUploadBufferPerConnection
   162  	}
   163  	return 1 << 20
   164  }
   165  
   166  func (s *Server) initialStreamRecvWindowSize() int32 {
   167  	if s.MaxUploadBufferPerStream > 0 {
   168  		return s.MaxUploadBufferPerStream
   169  	}
   170  	return 1 << 20
   171  }
   172  
   173  func (s *Server) maxReadFrameSize() uint32 {
   174  	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
   175  		return v
   176  	}
   177  	return defaultMaxReadFrameSize
   178  }
   179  
   180  func (s *Server) maxConcurrentStreams() uint32 {
   181  	if v := s.MaxConcurrentStreams; v > 0 {
   182  		return v
   183  	}
   184  	return defaultMaxStreams
   185  }
   186  
   187  func (s *Server) maxDecoderHeaderTableSize() uint32 {
   188  	if v := s.MaxDecoderHeaderTableSize; v > 0 {
   189  		return v
   190  	}
   191  	return initialHeaderTableSize
   192  }
   193  
   194  func (s *Server) maxEncoderHeaderTableSize() uint32 {
   195  	if v := s.MaxEncoderHeaderTableSize; v > 0 {
   196  		return v
   197  	}
   198  	return initialHeaderTableSize
   199  }
   200  
   201  // maxQueuedControlFrames is the maximum number of control frames like
   202  // SETTINGS, PING and RST_STREAM that will be queued for writing before
   203  // the connection is closed to prevent memory exhaustion attacks.
   204  func (s *Server) maxQueuedControlFrames() int {
   205  	// TODO: if anybody asks, add a Server field, and remember to define the
   206  	// behavior of negative values.
   207  	return maxQueuedControlFrames
   208  }
   209  
   210  type serverInternalState struct {
   211  	mu          sync.Mutex
   212  	activeConns map[*serverConn]struct{}
   213  }
   214  
   215  func (s *serverInternalState) registerConn(sc *serverConn) {
   216  	if s == nil {
   217  		return // if the Server was used without calling ConfigureServer
   218  	}
   219  	s.mu.Lock()
   220  	s.activeConns[sc] = struct{}{}
   221  	s.mu.Unlock()
   222  }
   223  
   224  func (s *serverInternalState) unregisterConn(sc *serverConn) {
   225  	if s == nil {
   226  		return // if the Server was used without calling ConfigureServer
   227  	}
   228  	s.mu.Lock()
   229  	delete(s.activeConns, sc)
   230  	s.mu.Unlock()
   231  }
   232  
   233  func (s *serverInternalState) startGracefulShutdown() {
   234  	if s == nil {
   235  		return // if the Server was used without calling ConfigureServer
   236  	}
   237  	s.mu.Lock()
   238  	for sc := range s.activeConns {
   239  		sc.startGracefulShutdown()
   240  	}
   241  	s.mu.Unlock()
   242  }
   243  
   244  // ConfigureServer adds HTTP/2 support to a net/http Server.
   245  //
   246  // The configuration conf may be nil.
   247  //
   248  // ConfigureServer must be called before s begins serving.
   249  func ConfigureServer(s *http.Server, conf *Server) error {
   250  	if s == nil {
   251  		panic("nil *http.Server")
   252  	}
   253  	if conf == nil {
   254  		conf = new(Server)
   255  	}
   256  	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
   257  	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
   258  		if h1.IdleTimeout != 0 {
   259  			h2.IdleTimeout = h1.IdleTimeout
   260  		} else {
   261  			h2.IdleTimeout = h1.ReadTimeout
   262  		}
   263  	}
   264  	s.RegisterOnShutdown(conf.state.startGracefulShutdown)
   265  
   266  	if s.TLSConfig == nil {
   267  		s.TLSConfig = new(tls.Config)
   268  	} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
   269  		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
   270  		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
   271  		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
   272  		haveRequired := false
   273  		for _, cs := range s.TLSConfig.CipherSuites {
   274  			switch cs {
   275  			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
   276  				// Alternative MTI cipher to not discourage ECDSA-only servers.
   277  				// See http://golang.org/cl/30721 for further information.
   278  				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
   279  				haveRequired = true
   280  			}
   281  		}
   282  		if !haveRequired {
   283  			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
   284  		}
   285  	}
   286  
   287  	// Note: not setting MinVersion to tls.VersionTLS12,
   288  	// as we don't want to interfere with HTTP/1.1 traffic
   289  	// on the user's server. We enforce TLS 1.2 later once
   290  	// we accept a connection. Ideally this should be done
   291  	// during next-proto selection, but using TLS <1.2 with
   292  	// HTTP/2 is still the client's bug.
   293  
   294  	s.TLSConfig.PreferServerCipherSuites = true
   295  
   296  	if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
   297  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
   298  	}
   299  	if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
   300  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
   301  	}
   302  
   303  	if s.TLSNextProto == nil {
   304  		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
   305  	}
   306  	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
   307  		if testHookOnConn != nil {
   308  			testHookOnConn()
   309  		}
   310  		// The TLSNextProto interface predates contexts, so
   311  		// the net/http package passes down its per-connection
   312  		// base context via an exported but unadvertised
   313  		// method on the Handler. This is for internal
   314  		// net/http<=>http2 use only.
   315  		var ctx context.Context
   316  		type baseContexter interface {
   317  			BaseContext() context.Context
   318  		}
   319  		if bc, ok := h.(baseContexter); ok {
   320  			ctx = bc.BaseContext()
   321  		}
   322  		conf.ServeConn(c, &ServeConnOpts{
   323  			Context:    ctx,
   324  			Handler:    h,
   325  			BaseConfig: hs,
   326  		})
   327  	}
   328  	s.TLSNextProto[NextProtoTLS] = protoHandler
   329  	return nil
   330  }
   331  
   332  // ServeConnOpts are options for the Server.ServeConn method.
   333  type ServeConnOpts struct {
   334  	// Context is the base context to use.
   335  	// If nil, context.Background is used.
   336  	Context context.Context
   337  
   338  	// BaseConfig optionally sets the base configuration
   339  	// for values. If nil, defaults are used.
   340  	BaseConfig *http.Server
   341  
   342  	// Handler specifies which handler to use for processing
   343  	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
   344  	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
   345  	Handler http.Handler
   346  
   347  	// UpgradeRequest is an initial request received on a connection
   348  	// undergoing an h2c upgrade. The request body must have been
   349  	// completely read from the connection before calling ServeConn,
   350  	// and the 101 Switching Protocols response written.
   351  	UpgradeRequest *http.Request
   352  
   353  	// Settings is the decoded contents of the HTTP2-Settings header
   354  	// in an h2c upgrade request.
   355  	Settings []byte
   356  
   357  	// SawClientPreface is set if the HTTP/2 connection preface
   358  	// has already been read from the connection.
   359  	SawClientPreface bool
   360  }
   361  
   362  func (o *ServeConnOpts) context() context.Context {
   363  	if o != nil && o.Context != nil {
   364  		return o.Context
   365  	}
   366  	return context.Background()
   367  }
   368  
   369  func (o *ServeConnOpts) baseConfig() *http.Server {
   370  	if o != nil && o.BaseConfig != nil {
   371  		return o.BaseConfig
   372  	}
   373  	return new(http.Server)
   374  }
   375  
   376  func (o *ServeConnOpts) handler() http.Handler {
   377  	if o != nil {
   378  		if o.Handler != nil {
   379  			return o.Handler
   380  		}
   381  		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
   382  			return o.BaseConfig.Handler
   383  		}
   384  	}
   385  	return http.DefaultServeMux
   386  }
   387  
   388  // ServeConn serves HTTP/2 requests on the provided connection and
   389  // blocks until the connection is no longer readable.
   390  //
   391  // ServeConn starts speaking HTTP/2 assuming that c has not had any
   392  // reads or writes. It writes its initial settings frame and expects
   393  // to be able to read the preface and settings frame from the
   394  // client. If c has a ConnectionState method like a *tls.Conn, the
   395  // ConnectionState is used to verify the TLS ciphersuite and to set
   396  // the Request.TLS field in Handlers.
   397  //
   398  // ServeConn does not support h2c by itself. Any h2c support must be
   399  // implemented in terms of providing a suitably-behaving net.Conn.
   400  //
   401  // The opts parameter is optional. If nil, default values are used.
   402  func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
   403  	baseCtx, cancel := serverConnBaseContext(c, opts)
   404  	defer cancel()
   405  
   406  	sc := &serverConn{
   407  		srv:                         s,
   408  		hs:                          opts.baseConfig(),
   409  		conn:                        c,
   410  		baseCtx:                     baseCtx,
   411  		remoteAddrStr:               c.RemoteAddr().String(),
   412  		bw:                          newBufferedWriter(c),
   413  		handler:                     opts.handler(),
   414  		streams:                     make(map[uint32]*stream),
   415  		readFrameCh:                 make(chan readFrameResult),
   416  		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
   417  		serveMsgCh:                  make(chan interface{}, 8),
   418  		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
   419  		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
   420  		doneServing:                 make(chan struct{}),
   421  		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
   422  		advMaxStreams:               s.maxConcurrentStreams(),
   423  		initialStreamSendWindowSize: initialWindowSize,
   424  		maxFrameSize:                initialMaxFrameSize,
   425  		serveG:                      newGoroutineLock(),
   426  		pushEnabled:                 true,
   427  		sawClientPreface:            opts.SawClientPreface,
   428  	}
   429  
   430  	s.state.registerConn(sc)
   431  	defer s.state.unregisterConn(sc)
   432  
   433  	// The net/http package sets the write deadline from the
   434  	// http.Server.WriteTimeout during the TLS handshake, but then
   435  	// passes the connection off to us with the deadline already set.
   436  	// Write deadlines are set per stream in serverConn.newStream.
   437  	// Disarm the net.Conn write deadline here.
   438  	if sc.hs.WriteTimeout > 0 {
   439  		sc.conn.SetWriteDeadline(time.Time{})
   440  	}
   441  
   442  	if s.NewWriteScheduler != nil {
   443  		sc.writeSched = s.NewWriteScheduler()
   444  	} else {
   445  		sc.writeSched = newRoundRobinWriteScheduler()
   446  	}
   447  
   448  	// These start at the RFC-specified defaults. If there is a higher
   449  	// configured value for inflow, that will be updated when we send a
   450  	// WINDOW_UPDATE shortly after sending SETTINGS.
   451  	sc.flow.add(initialWindowSize)
   452  	sc.inflow.init(initialWindowSize)
   453  	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
   454  	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
   455  
   456  	fr := NewFramer(sc.bw, c)
   457  	if s.CountError != nil {
   458  		fr.countError = s.CountError
   459  	}
   460  	fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
   461  	fr.MaxHeaderListSize = sc.maxHeaderListSize()
   462  	fr.SetMaxReadFrameSize(s.maxReadFrameSize())
   463  	sc.framer = fr
   464  
   465  	if tc, ok := c.(connectionStater); ok {
   466  		sc.tlsState = new(tls.ConnectionState)
   467  		*sc.tlsState = tc.ConnectionState()
   468  		// 9.2 Use of TLS Features
   469  		// An implementation of HTTP/2 over TLS MUST use TLS
   470  		// 1.2 or higher with the restrictions on feature set
   471  		// and cipher suite described in this section. Due to
   472  		// implementation limitations, it might not be
   473  		// possible to fail TLS negotiation. An endpoint MUST
   474  		// immediately terminate an HTTP/2 connection that
   475  		// does not meet the TLS requirements described in
   476  		// this section with a connection error (Section
   477  		// 5.4.1) of type INADEQUATE_SECURITY.
   478  		if sc.tlsState.Version < tls.VersionTLS12 {
   479  			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
   480  			return
   481  		}
   482  
   483  		if sc.tlsState.ServerName == "" {
   484  			// Client must use SNI, but we don't enforce that anymore,
   485  			// since it was causing problems when connecting to bare IP
   486  			// addresses during development.
   487  			//
   488  			// TODO: optionally enforce? Or enforce at the time we receive
   489  			// a new request, and verify the ServerName matches the :authority?
   490  			// But that precludes proxy situations, perhaps.
   491  			//
   492  			// So for now, do nothing here again.
   493  		}
   494  
   495  		if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
   496  			// "Endpoints MAY choose to generate a connection error
   497  			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
   498  			// the prohibited cipher suites are negotiated."
   499  			//
   500  			// We choose that. In my opinion, the spec is weak
   501  			// here. It also says both parties must support at least
   502  			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
   503  			// excuses here. If we really must, we could allow an
   504  			// "AllowInsecureWeakCiphers" option on the server later.
   505  			// Let's see how it plays out first.
   506  			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
   507  			return
   508  		}
   509  	}
   510  
   511  	if opts.Settings != nil {
   512  		fr := &SettingsFrame{
   513  			FrameHeader: FrameHeader{valid: true},
   514  			p:           opts.Settings,
   515  		}
   516  		if err := fr.ForeachSetting(sc.processSetting); err != nil {
   517  			sc.rejectConn(ErrCodeProtocol, "invalid settings")
   518  			return
   519  		}
   520  		opts.Settings = nil
   521  	}
   522  
   523  	if hook := testHookGetServerConn; hook != nil {
   524  		hook(sc)
   525  	}
   526  
   527  	if opts.UpgradeRequest != nil {
   528  		sc.upgradeRequest(opts.UpgradeRequest)
   529  		opts.UpgradeRequest = nil
   530  	}
   531  
   532  	sc.serve()
   533  }
   534  
   535  func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
   536  	ctx, cancel = context.WithCancel(opts.context())
   537  	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
   538  	if hs := opts.baseConfig(); hs != nil {
   539  		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
   540  	}
   541  	return
   542  }
   543  
   544  func (sc *serverConn) rejectConn(err ErrCode, debug string) {
   545  	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
   546  	// ignoring errors. hanging up anyway.
   547  	sc.framer.WriteGoAway(0, err, []byte(debug))
   548  	sc.bw.Flush()
   549  	sc.conn.Close()
   550  }
   551  
   552  type serverConn struct {
   553  	// Immutable:
   554  	srv              *Server
   555  	hs               *http.Server
   556  	conn             net.Conn
   557  	bw               *bufferedWriter // writing to conn
   558  	handler          http.Handler
   559  	baseCtx          context.Context
   560  	framer           *Framer
   561  	doneServing      chan struct{}          // closed when serverConn.serve ends
   562  	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
   563  	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
   564  	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
   565  	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
   566  	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
   567  	flow             outflow                // conn-wide (not stream-specific) outbound flow control
   568  	inflow           inflow                 // conn-wide inbound flow control
   569  	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
   570  	remoteAddrStr    string
   571  	writeSched       WriteScheduler
   572  
   573  	// Everything following is owned by the serve loop; use serveG.check():
   574  	serveG                      goroutineLock // used to verify funcs are on serve()
   575  	pushEnabled                 bool
   576  	sawClientPreface            bool // preface has already been read, used in h2c upgrade
   577  	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
   578  	needToSendSettingsAck       bool
   579  	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
   580  	queuedControlFrames         int    // control frames in the writeSched queue
   581  	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
   582  	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
   583  	curClientStreams            uint32 // number of open streams initiated by the client
   584  	curPushedStreams            uint32 // number of open streams initiated by server push
   585  	curHandlers                 uint32 // number of running handler goroutines
   586  	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
   587  	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
   588  	streams                     map[uint32]*stream
   589  	unstartedHandlers           []unstartedHandler
   590  	initialStreamSendWindowSize int32
   591  	maxFrameSize                int32
   592  	peerMaxHeaderListSize       uint32            // zero means unknown (default)
   593  	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
   594  	canonHeaderKeysSize         int               // canonHeader keys size in bytes
   595  	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
   596  	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
   597  	needsFrameFlush             bool              // last frame write wasn't a flush
   598  	inGoAway                    bool              // we've started to or sent GOAWAY
   599  	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
   600  	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
   601  	goAwayCode                  ErrCode
   602  	shutdownTimer               *time.Timer // nil until used
   603  	idleTimer                   *time.Timer // nil if unused
   604  
   605  	// Owned by the writeFrameAsync goroutine:
   606  	headerWriteBuf bytes.Buffer
   607  	hpackEncoder   *hpack.Encoder
   608  
   609  	// Used by startGracefulShutdown.
   610  	shutdownOnce sync.Once
   611  }
   612  
   613  func (sc *serverConn) maxHeaderListSize() uint32 {
   614  	n := sc.hs.MaxHeaderBytes
   615  	if n <= 0 {
   616  		n = http.DefaultMaxHeaderBytes
   617  	}
   618  	// http2's count is in a slightly different unit and includes 32 bytes per pair.
   619  	// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
   620  	const perFieldOverhead = 32 // per http2 spec
   621  	const typicalHeaders = 10   // conservative
   622  	return uint32(n + typicalHeaders*perFieldOverhead)
   623  }
   624  
   625  func (sc *serverConn) curOpenStreams() uint32 {
   626  	sc.serveG.check()
   627  	return sc.curClientStreams + sc.curPushedStreams
   628  }
   629  
   630  // stream represents a stream. This is the minimal metadata needed by
   631  // the serve goroutine. Most of the actual stream state is owned by
   632  // the http.Handler's goroutine in the responseWriter. Because the
   633  // responseWriter's responseWriterState is recycled at the end of a
   634  // handler, this struct intentionally has no pointer to the
   635  // *responseWriter{,State} itself, as the Handler ending nils out the
   636  // responseWriter's state field.
   637  type stream struct {
   638  	// immutable:
   639  	sc        *serverConn
   640  	id        uint32
   641  	body      *pipe       // non-nil if expecting DATA frames
   642  	cw        closeWaiter // closed wait stream transitions to closed state
   643  	ctx       context.Context
   644  	cancelCtx func()
   645  
   646  	// owned by serverConn's serve loop:
   647  	bodyBytes        int64   // body bytes seen so far
   648  	declBodyBytes    int64   // or -1 if undeclared
   649  	flow             outflow // limits writing from Handler to client
   650  	inflow           inflow  // what the client is allowed to POST/etc to us
   651  	state            streamState
   652  	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
   653  	gotTrailerHeader bool        // HEADER frame for trailers was seen
   654  	wroteHeaders     bool        // whether we wrote headers (not status 100)
   655  	readDeadline     *time.Timer // nil if unused
   656  	writeDeadline    *time.Timer // nil if unused
   657  	closeErr         error       // set before cw is closed
   658  
   659  	trailer    http.Header // accumulated trailers
   660  	reqTrailer http.Header // handler's Request.Trailer
   661  }
   662  
   663  func (sc *serverConn) Framer() *Framer  { return sc.framer }
   664  func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
   665  func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
   666  func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
   667  	return sc.hpackEncoder, &sc.headerWriteBuf
   668  }
   669  
   670  func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
   671  	sc.serveG.check()
   672  	// http://tools.ietf.org/html/rfc7540#section-5.1
   673  	if st, ok := sc.streams[streamID]; ok {
   674  		return st.state, st
   675  	}
   676  	// "The first use of a new stream identifier implicitly closes all
   677  	// streams in the "idle" state that might have been initiated by
   678  	// that peer with a lower-valued stream identifier. For example, if
   679  	// a client sends a HEADERS frame on stream 7 without ever sending a
   680  	// frame on stream 5, then stream 5 transitions to the "closed"
   681  	// state when the first frame for stream 7 is sent or received."
   682  	if streamID%2 == 1 {
   683  		if streamID <= sc.maxClientStreamID {
   684  			return stateClosed, nil
   685  		}
   686  	} else {
   687  		if streamID <= sc.maxPushPromiseID {
   688  			return stateClosed, nil
   689  		}
   690  	}
   691  	return stateIdle, nil
   692  }
   693  
   694  // setConnState calls the net/http ConnState hook for this connection, if configured.
   695  // Note that the net/http package does StateNew and StateClosed for us.
   696  // There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
   697  func (sc *serverConn) setConnState(state http.ConnState) {
   698  	if sc.hs.ConnState != nil {
   699  		sc.hs.ConnState(sc.conn, state)
   700  	}
   701  }
   702  
   703  func (sc *serverConn) vlogf(format string, args ...interface{}) {
   704  	if VerboseLogs {
   705  		sc.logf(format, args...)
   706  	}
   707  }
   708  
   709  func (sc *serverConn) logf(format string, args ...interface{}) {
   710  	if lg := sc.hs.ErrorLog; lg != nil {
   711  		lg.Printf(format, args...)
   712  	} else {
   713  		log.Printf(format, args...)
   714  	}
   715  }
   716  
   717  // errno returns v's underlying uintptr, else 0.
   718  //
   719  // TODO: remove this helper function once http2 can use build
   720  // tags. See comment in isClosedConnError.
   721  func errno(v error) uintptr {
   722  	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
   723  		return uintptr(rv.Uint())
   724  	}
   725  	return 0
   726  }
   727  
   728  // isClosedConnError reports whether err is an error from use of a closed
   729  // network connection.
   730  func isClosedConnError(err error) bool {
   731  	if err == nil {
   732  		return false
   733  	}
   734  
   735  	if errors.Is(err, net.ErrClosed) {
   736  		return true
   737  	}
   738  
   739  	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
   740  	// build tags, so I can't make an http2_windows.go file with
   741  	// Windows-specific stuff. Fix that and move this, once we
   742  	// have a way to bundle this into std's net/http somehow.
   743  	if runtime.GOOS == "windows" {
   744  		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
   745  			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
   746  				const WSAECONNABORTED = 10053
   747  				const WSAECONNRESET = 10054
   748  				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
   749  					return true
   750  				}
   751  			}
   752  		}
   753  	}
   754  	return false
   755  }
   756  
   757  func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
   758  	if err == nil {
   759  		return
   760  	}
   761  	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
   762  		// Boring, expected errors.
   763  		sc.vlogf(format, args...)
   764  	} else {
   765  		sc.logf(format, args...)
   766  	}
   767  }
   768  
   769  // maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
   770  // of the entries in the canonHeader cache.
   771  // This should be larger than the size of unique, uncommon header keys likely to
   772  // be sent by the peer, while not so high as to permit unreasonable memory usage
   773  // if the peer sends an unbounded number of unique header keys.
   774  const maxCachedCanonicalHeadersKeysSize = 2048
   775  
   776  func (sc *serverConn) canonicalHeader(v string) string {
   777  	sc.serveG.check()
   778  	buildCommonHeaderMapsOnce()
   779  	cv, ok := commonCanonHeader[v]
   780  	if ok {
   781  		return cv
   782  	}
   783  	cv, ok = sc.canonHeader[v]
   784  	if ok {
   785  		return cv
   786  	}
   787  	if sc.canonHeader == nil {
   788  		sc.canonHeader = make(map[string]string)
   789  	}
   790  	cv = http.CanonicalHeaderKey(v)
   791  	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
   792  	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
   793  		sc.canonHeader[v] = cv
   794  		sc.canonHeaderKeysSize += size
   795  	}
   796  	return cv
   797  }
   798  
   799  type readFrameResult struct {
   800  	f   Frame // valid until readMore is called
   801  	err error
   802  
   803  	// readMore should be called once the consumer no longer needs or
   804  	// retains f. After readMore, f is invalid and more frames can be
   805  	// read.
   806  	readMore func()
   807  }
   808  
   809  // readFrames is the loop that reads incoming frames.
   810  // It takes care to only read one frame at a time, blocking until the
   811  // consumer is done with the frame.
   812  // It's run on its own goroutine.
   813  func (sc *serverConn) readFrames() {
   814  	gate := make(chan struct{})
   815  	gateDone := func() { gate <- struct{}{} }
   816  	for {
   817  		f, err := sc.framer.ReadFrame()
   818  		select {
   819  		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
   820  		case <-sc.doneServing:
   821  			return
   822  		}
   823  		select {
   824  		case <-gate:
   825  		case <-sc.doneServing:
   826  			return
   827  		}
   828  		if terminalReadFrameError(err) {
   829  			return
   830  		}
   831  	}
   832  }
   833  
   834  // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
   835  type frameWriteResult struct {
   836  	_   incomparable
   837  	wr  FrameWriteRequest // what was written (or attempted)
   838  	err error             // result of the writeFrame call
   839  }
   840  
   841  // writeFrameAsync runs in its own goroutine and writes a single frame
   842  // and then reports when it's done.
   843  // At most one goroutine can be running writeFrameAsync at a time per
   844  // serverConn.
   845  func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
   846  	var err error
   847  	if wd == nil {
   848  		err = wr.write.writeFrame(sc)
   849  	} else {
   850  		err = sc.framer.endWrite()
   851  	}
   852  	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
   853  }
   854  
   855  func (sc *serverConn) closeAllStreamsOnConnClose() {
   856  	sc.serveG.check()
   857  	for _, st := range sc.streams {
   858  		sc.closeStream(st, errClientDisconnected)
   859  	}
   860  }
   861  
   862  func (sc *serverConn) stopShutdownTimer() {
   863  	sc.serveG.check()
   864  	if t := sc.shutdownTimer; t != nil {
   865  		t.Stop()
   866  	}
   867  }
   868  
   869  func (sc *serverConn) notePanic() {
   870  	// Note: this is for serverConn.serve panicking, not http.Handler code.
   871  	if testHookOnPanicMu != nil {
   872  		testHookOnPanicMu.Lock()
   873  		defer testHookOnPanicMu.Unlock()
   874  	}
   875  	if testHookOnPanic != nil {
   876  		if e := recover(); e != nil {
   877  			if testHookOnPanic(sc, e) {
   878  				panic(e)
   879  			}
   880  		}
   881  	}
   882  }
   883  
   884  func (sc *serverConn) serve() {
   885  	sc.serveG.check()
   886  	defer sc.notePanic()
   887  	defer sc.conn.Close()
   888  	defer sc.closeAllStreamsOnConnClose()
   889  	defer sc.stopShutdownTimer()
   890  	defer close(sc.doneServing) // unblocks handlers trying to send
   891  
   892  	if VerboseLogs {
   893  		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
   894  	}
   895  
   896  	sc.writeFrame(FrameWriteRequest{
   897  		write: writeSettings{
   898  			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
   899  			{SettingMaxConcurrentStreams, sc.advMaxStreams},
   900  			{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
   901  			{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
   902  			{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
   903  		},
   904  	})
   905  	sc.unackedSettings++
   906  
   907  	// Each connection starts with initialWindowSize inflow tokens.
   908  	// If a higher value is configured, we add more tokens.
   909  	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
   910  		sc.sendWindowUpdate(nil, int(diff))
   911  	}
   912  
   913  	if err := sc.readPreface(); err != nil {
   914  		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
   915  		return
   916  	}
   917  	// Now that we've got the preface, get us out of the
   918  	// "StateNew" state. We can't go directly to idle, though.
   919  	// Active means we read some data and anticipate a request. We'll
   920  	// do another Active when we get a HEADERS frame.
   921  	sc.setConnState(http.StateActive)
   922  	sc.setConnState(http.StateIdle)
   923  
   924  	if sc.srv.IdleTimeout > 0 {
   925  		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
   926  		defer sc.idleTimer.Stop()
   927  	}
   928  
   929  	go sc.readFrames() // closed by defer sc.conn.Close above
   930  
   931  	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
   932  	defer settingsTimer.Stop()
   933  
   934  	loopNum := 0
   935  	for {
   936  		loopNum++
   937  		select {
   938  		case wr := <-sc.wantWriteFrameCh:
   939  			if se, ok := wr.write.(StreamError); ok {
   940  				sc.resetStream(se)
   941  				break
   942  			}
   943  			sc.writeFrame(wr)
   944  		case res := <-sc.wroteFrameCh:
   945  			sc.wroteFrame(res)
   946  		case res := <-sc.readFrameCh:
   947  			// Process any written frames before reading new frames from the client since a
   948  			// written frame could have triggered a new stream to be started.
   949  			if sc.writingFrameAsync {
   950  				select {
   951  				case wroteRes := <-sc.wroteFrameCh:
   952  					sc.wroteFrame(wroteRes)
   953  				default:
   954  				}
   955  			}
   956  			if !sc.processFrameFromReader(res) {
   957  				return
   958  			}
   959  			res.readMore()
   960  			if settingsTimer != nil {
   961  				settingsTimer.Stop()
   962  				settingsTimer = nil
   963  			}
   964  		case m := <-sc.bodyReadCh:
   965  			sc.noteBodyRead(m.st, m.n)
   966  		case msg := <-sc.serveMsgCh:
   967  			switch v := msg.(type) {
   968  			case func(int):
   969  				v(loopNum) // for testing
   970  			case *serverMessage:
   971  				switch v {
   972  				case settingsTimerMsg:
   973  					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
   974  					return
   975  				case idleTimerMsg:
   976  					sc.vlogf("connection is idle")
   977  					sc.goAway(ErrCodeNo)
   978  				case shutdownTimerMsg:
   979  					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
   980  					return
   981  				case gracefulShutdownMsg:
   982  					sc.startGracefulShutdownInternal()
   983  				case handlerDoneMsg:
   984  					sc.handlerDone()
   985  				default:
   986  					panic("unknown timer")
   987  				}
   988  			case *startPushRequest:
   989  				sc.startPush(v)
   990  			case func(*serverConn):
   991  				v(sc)
   992  			default:
   993  				panic(fmt.Sprintf("unexpected type %T", v))
   994  			}
   995  		}
   996  
   997  		// If the peer is causing us to generate a lot of control frames,
   998  		// but not reading them from us, assume they are trying to make us
   999  		// run out of memory.
  1000  		if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
  1001  			sc.vlogf("http2: too many control frames in send queue, closing connection")
  1002  			return
  1003  		}
  1004  
  1005  		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
  1006  		// with no error code (graceful shutdown), don't start the timer until
  1007  		// all open streams have been completed.
  1008  		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
  1009  		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
  1010  		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
  1011  			sc.shutDownIn(goAwayTimeout)
  1012  		}
  1013  	}
  1014  }
  1015  
  1016  type serverMessage int
  1017  
  1018  // Message values sent to serveMsgCh.
  1019  var (
  1020  	settingsTimerMsg    = new(serverMessage)
  1021  	idleTimerMsg        = new(serverMessage)
  1022  	shutdownTimerMsg    = new(serverMessage)
  1023  	gracefulShutdownMsg = new(serverMessage)
  1024  	handlerDoneMsg      = new(serverMessage)
  1025  )
  1026  
  1027  func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
  1028  func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
  1029  func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
  1030  
  1031  func (sc *serverConn) sendServeMsg(msg interface{}) {
  1032  	sc.serveG.checkNotOn() // NOT
  1033  	select {
  1034  	case sc.serveMsgCh <- msg:
  1035  	case <-sc.doneServing:
  1036  	}
  1037  }
  1038  
  1039  var errPrefaceTimeout = errors.New("timeout waiting for client preface")
  1040  
  1041  // readPreface reads the ClientPreface greeting from the peer or
  1042  // returns errPrefaceTimeout on timeout, or an error if the greeting
  1043  // is invalid.
  1044  func (sc *serverConn) readPreface() error {
  1045  	if sc.sawClientPreface {
  1046  		return nil
  1047  	}
  1048  	errc := make(chan error, 1)
  1049  	go func() {
  1050  		// Read the client preface
  1051  		buf := make([]byte, len(ClientPreface))
  1052  		if _, err := io.ReadFull(sc.conn, buf); err != nil {
  1053  			errc <- err
  1054  		} else if !bytes.Equal(buf, clientPreface) {
  1055  			errc <- fmt.Errorf("bogus greeting %q", buf)
  1056  		} else {
  1057  			errc <- nil
  1058  		}
  1059  	}()
  1060  	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
  1061  	defer timer.Stop()
  1062  	select {
  1063  	case <-timer.C:
  1064  		return errPrefaceTimeout
  1065  	case err := <-errc:
  1066  		if err == nil {
  1067  			if VerboseLogs {
  1068  				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
  1069  			}
  1070  		}
  1071  		return err
  1072  	}
  1073  }
  1074  
  1075  var errChanPool = sync.Pool{
  1076  	New: func() interface{} { return make(chan error, 1) },
  1077  }
  1078  
  1079  var writeDataPool = sync.Pool{
  1080  	New: func() interface{} { return new(writeData) },
  1081  }
  1082  
  1083  // writeDataFromHandler writes DATA response frames from a handler on
  1084  // the given stream.
  1085  func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
  1086  	ch := errChanPool.Get().(chan error)
  1087  	writeArg := writeDataPool.Get().(*writeData)
  1088  	*writeArg = writeData{stream.id, data, endStream}
  1089  	err := sc.writeFrameFromHandler(FrameWriteRequest{
  1090  		write:  writeArg,
  1091  		stream: stream,
  1092  		done:   ch,
  1093  	})
  1094  	if err != nil {
  1095  		return err
  1096  	}
  1097  	var frameWriteDone bool // the frame write is done (successfully or not)
  1098  	select {
  1099  	case err = <-ch:
  1100  		frameWriteDone = true
  1101  	case <-sc.doneServing:
  1102  		return errClientDisconnected
  1103  	case <-stream.cw:
  1104  		// If both ch and stream.cw were ready (as might
  1105  		// happen on the final Write after an http.Handler
  1106  		// ends), prefer the write result. Otherwise this
  1107  		// might just be us successfully closing the stream.
  1108  		// The writeFrameAsync and serve goroutines guarantee
  1109  		// that the ch send will happen before the stream.cw
  1110  		// close.
  1111  		select {
  1112  		case err = <-ch:
  1113  			frameWriteDone = true
  1114  		default:
  1115  			return errStreamClosed
  1116  		}
  1117  	}
  1118  	errChanPool.Put(ch)
  1119  	if frameWriteDone {
  1120  		writeDataPool.Put(writeArg)
  1121  	}
  1122  	return err
  1123  }
  1124  
  1125  // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
  1126  // if the connection has gone away.
  1127  //
  1128  // This must not be run from the serve goroutine itself, else it might
  1129  // deadlock writing to sc.wantWriteFrameCh (which is only mildly
  1130  // buffered and is read by serve itself). If you're on the serve
  1131  // goroutine, call writeFrame instead.
  1132  func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
  1133  	sc.serveG.checkNotOn() // NOT
  1134  	select {
  1135  	case sc.wantWriteFrameCh <- wr:
  1136  		return nil
  1137  	case <-sc.doneServing:
  1138  		// Serve loop is gone.
  1139  		// Client has closed their connection to the server.
  1140  		return errClientDisconnected
  1141  	}
  1142  }
  1143  
  1144  // writeFrame schedules a frame to write and sends it if there's nothing
  1145  // already being written.
  1146  //
  1147  // There is no pushback here (the serve goroutine never blocks). It's
  1148  // the http.Handlers that block, waiting for their previous frames to
  1149  // make it onto the wire
  1150  //
  1151  // If you're not on the serve goroutine, use writeFrameFromHandler instead.
  1152  func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
  1153  	sc.serveG.check()
  1154  
  1155  	// If true, wr will not be written and wr.done will not be signaled.
  1156  	var ignoreWrite bool
  1157  
  1158  	// We are not allowed to write frames on closed streams. RFC 7540 Section
  1159  	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
  1160  	// a closed stream." Our server never sends PRIORITY, so that exception
  1161  	// does not apply.
  1162  	//
  1163  	// The serverConn might close an open stream while the stream's handler
  1164  	// is still running. For example, the server might close a stream when it
  1165  	// receives bad data from the client. If this happens, the handler might
  1166  	// attempt to write a frame after the stream has been closed (since the
  1167  	// handler hasn't yet been notified of the close). In this case, we simply
  1168  	// ignore the frame. The handler will notice that the stream is closed when
  1169  	// it waits for the frame to be written.
  1170  	//
  1171  	// As an exception to this rule, we allow sending RST_STREAM after close.
  1172  	// This allows us to immediately reject new streams without tracking any
  1173  	// state for those streams (except for the queued RST_STREAM frame). This
  1174  	// may result in duplicate RST_STREAMs in some cases, but the client should
  1175  	// ignore those.
  1176  	if wr.StreamID() != 0 {
  1177  		_, isReset := wr.write.(StreamError)
  1178  		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
  1179  			ignoreWrite = true
  1180  		}
  1181  	}
  1182  
  1183  	// Don't send a 100-continue response if we've already sent headers.
  1184  	// See golang.org/issue/14030.
  1185  	switch wr.write.(type) {
  1186  	case *writeResHeaders:
  1187  		wr.stream.wroteHeaders = true
  1188  	case write100ContinueHeadersFrame:
  1189  		if wr.stream.wroteHeaders {
  1190  			// We do not need to notify wr.done because this frame is
  1191  			// never written with wr.done != nil.
  1192  			if wr.done != nil {
  1193  				panic("wr.done != nil for write100ContinueHeadersFrame")
  1194  			}
  1195  			ignoreWrite = true
  1196  		}
  1197  	}
  1198  
  1199  	if !ignoreWrite {
  1200  		if wr.isControl() {
  1201  			sc.queuedControlFrames++
  1202  			// For extra safety, detect wraparounds, which should not happen,
  1203  			// and pull the plug.
  1204  			if sc.queuedControlFrames < 0 {
  1205  				sc.conn.Close()
  1206  			}
  1207  		}
  1208  		sc.writeSched.Push(wr)
  1209  	}
  1210  	sc.scheduleFrameWrite()
  1211  }
  1212  
  1213  // startFrameWrite starts a goroutine to write wr (in a separate
  1214  // goroutine since that might block on the network), and updates the
  1215  // serve goroutine's state about the world, updated from info in wr.
  1216  func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
  1217  	sc.serveG.check()
  1218  	if sc.writingFrame {
  1219  		panic("internal error: can only be writing one frame at a time")
  1220  	}
  1221  
  1222  	st := wr.stream
  1223  	if st != nil {
  1224  		switch st.state {
  1225  		case stateHalfClosedLocal:
  1226  			switch wr.write.(type) {
  1227  			case StreamError, handlerPanicRST, writeWindowUpdate:
  1228  				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
  1229  				// in this state. (We never send PRIORITY from the server, so that is not checked.)
  1230  			default:
  1231  				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
  1232  			}
  1233  		case stateClosed:
  1234  			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
  1235  		}
  1236  	}
  1237  	if wpp, ok := wr.write.(*writePushPromise); ok {
  1238  		var err error
  1239  		wpp.promisedID, err = wpp.allocatePromisedID()
  1240  		if err != nil {
  1241  			sc.writingFrameAsync = false
  1242  			wr.replyToWriter(err)
  1243  			return
  1244  		}
  1245  	}
  1246  
  1247  	sc.writingFrame = true
  1248  	sc.needsFrameFlush = true
  1249  	if wr.write.staysWithinBuffer(sc.bw.Available()) {
  1250  		sc.writingFrameAsync = false
  1251  		err := wr.write.writeFrame(sc)
  1252  		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
  1253  	} else if wd, ok := wr.write.(*writeData); ok {
  1254  		// Encode the frame in the serve goroutine, to ensure we don't have
  1255  		// any lingering asynchronous references to data passed to Write.
  1256  		// See https://go.dev/issue/58446.
  1257  		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
  1258  		sc.writingFrameAsync = true
  1259  		go sc.writeFrameAsync(wr, wd)
  1260  	} else {
  1261  		sc.writingFrameAsync = true
  1262  		go sc.writeFrameAsync(wr, nil)
  1263  	}
  1264  }
  1265  
  1266  // errHandlerPanicked is the error given to any callers blocked in a read from
  1267  // Request.Body when the main goroutine panics. Since most handlers read in the
  1268  // main ServeHTTP goroutine, this will show up rarely.
  1269  var errHandlerPanicked = errors.New("http2: handler panicked")
  1270  
  1271  // wroteFrame is called on the serve goroutine with the result of
  1272  // whatever happened on writeFrameAsync.
  1273  func (sc *serverConn) wroteFrame(res frameWriteResult) {
  1274  	sc.serveG.check()
  1275  	if !sc.writingFrame {
  1276  		panic("internal error: expected to be already writing a frame")
  1277  	}
  1278  	sc.writingFrame = false
  1279  	sc.writingFrameAsync = false
  1280  
  1281  	wr := res.wr
  1282  
  1283  	if writeEndsStream(wr.write) {
  1284  		st := wr.stream
  1285  		if st == nil {
  1286  			panic("internal error: expecting non-nil stream")
  1287  		}
  1288  		switch st.state {
  1289  		case stateOpen:
  1290  			// Here we would go to stateHalfClosedLocal in
  1291  			// theory, but since our handler is done and
  1292  			// the net/http package provides no mechanism
  1293  			// for closing a ResponseWriter while still
  1294  			// reading data (see possible TODO at top of
  1295  			// this file), we go into closed state here
  1296  			// anyway, after telling the peer we're
  1297  			// hanging up on them. We'll transition to
  1298  			// stateClosed after the RST_STREAM frame is
  1299  			// written.
  1300  			st.state = stateHalfClosedLocal
  1301  			// Section 8.1: a server MAY request that the client abort
  1302  			// transmission of a request without error by sending a
  1303  			// RST_STREAM with an error code of NO_ERROR after sending
  1304  			// a complete response.
  1305  			sc.resetStream(streamError(st.id, ErrCodeNo))
  1306  		case stateHalfClosedRemote:
  1307  			sc.closeStream(st, errHandlerComplete)
  1308  		}
  1309  	} else {
  1310  		switch v := wr.write.(type) {
  1311  		case StreamError:
  1312  			// st may be unknown if the RST_STREAM was generated to reject bad input.
  1313  			if st, ok := sc.streams[v.StreamID]; ok {
  1314  				sc.closeStream(st, v)
  1315  			}
  1316  		case handlerPanicRST:
  1317  			sc.closeStream(wr.stream, errHandlerPanicked)
  1318  		}
  1319  	}
  1320  
  1321  	// Reply (if requested) to unblock the ServeHTTP goroutine.
  1322  	wr.replyToWriter(res.err)
  1323  
  1324  	sc.scheduleFrameWrite()
  1325  }
  1326  
  1327  // scheduleFrameWrite tickles the frame writing scheduler.
  1328  //
  1329  // If a frame is already being written, nothing happens. This will be called again
  1330  // when the frame is done being written.
  1331  //
  1332  // If a frame isn't being written and we need to send one, the best frame
  1333  // to send is selected by writeSched.
  1334  //
  1335  // If a frame isn't being written and there's nothing else to send, we
  1336  // flush the write buffer.
  1337  func (sc *serverConn) scheduleFrameWrite() {
  1338  	sc.serveG.check()
  1339  	if sc.writingFrame || sc.inFrameScheduleLoop {
  1340  		return
  1341  	}
  1342  	sc.inFrameScheduleLoop = true
  1343  	for !sc.writingFrameAsync {
  1344  		if sc.needToSendGoAway {
  1345  			sc.needToSendGoAway = false
  1346  			sc.startFrameWrite(FrameWriteRequest{
  1347  				write: &writeGoAway{
  1348  					maxStreamID: sc.maxClientStreamID,
  1349  					code:        sc.goAwayCode,
  1350  				},
  1351  			})
  1352  			continue
  1353  		}
  1354  		if sc.needToSendSettingsAck {
  1355  			sc.needToSendSettingsAck = false
  1356  			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
  1357  			continue
  1358  		}
  1359  		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
  1360  			if wr, ok := sc.writeSched.Pop(); ok {
  1361  				if wr.isControl() {
  1362  					sc.queuedControlFrames--
  1363  				}
  1364  				sc.startFrameWrite(wr)
  1365  				continue
  1366  			}
  1367  		}
  1368  		if sc.needsFrameFlush {
  1369  			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
  1370  			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
  1371  			continue
  1372  		}
  1373  		break
  1374  	}
  1375  	sc.inFrameScheduleLoop = false
  1376  }
  1377  
  1378  // startGracefulShutdown gracefully shuts down a connection. This
  1379  // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
  1380  // shutting down. The connection isn't closed until all current
  1381  // streams are done.
  1382  //
  1383  // startGracefulShutdown returns immediately; it does not wait until
  1384  // the connection has shut down.
  1385  func (sc *serverConn) startGracefulShutdown() {
  1386  	sc.serveG.checkNotOn() // NOT
  1387  	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
  1388  }
  1389  
  1390  // After sending GOAWAY with an error code (non-graceful shutdown), the
  1391  // connection will close after goAwayTimeout.
  1392  //
  1393  // If we close the connection immediately after sending GOAWAY, there may
  1394  // be unsent data in our kernel receive buffer, which will cause the kernel
  1395  // to send a TCP RST on close() instead of a FIN. This RST will abort the
  1396  // connection immediately, whether or not the client had received the GOAWAY.
  1397  //
  1398  // Ideally we should delay for at least 1 RTT + epsilon so the client has
  1399  // a chance to read the GOAWAY and stop sending messages. Measuring RTT
  1400  // is hard, so we approximate with 1 second. See golang.org/issue/18701.
  1401  //
  1402  // This is a var so it can be shorter in tests, where all requests uses the
  1403  // loopback interface making the expected RTT very small.
  1404  //
  1405  // TODO: configurable?
  1406  var goAwayTimeout = 1 * time.Second
  1407  
  1408  func (sc *serverConn) startGracefulShutdownInternal() {
  1409  	sc.goAway(ErrCodeNo)
  1410  }
  1411  
  1412  func (sc *serverConn) goAway(code ErrCode) {
  1413  	sc.serveG.check()
  1414  	if sc.inGoAway {
  1415  		if sc.goAwayCode == ErrCodeNo {
  1416  			sc.goAwayCode = code
  1417  		}
  1418  		return
  1419  	}
  1420  	sc.inGoAway = true
  1421  	sc.needToSendGoAway = true
  1422  	sc.goAwayCode = code
  1423  	sc.scheduleFrameWrite()
  1424  }
  1425  
  1426  func (sc *serverConn) shutDownIn(d time.Duration) {
  1427  	sc.serveG.check()
  1428  	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
  1429  }
  1430  
  1431  func (sc *serverConn) resetStream(se StreamError) {
  1432  	sc.serveG.check()
  1433  	sc.writeFrame(FrameWriteRequest{write: se})
  1434  	if st, ok := sc.streams[se.StreamID]; ok {
  1435  		st.resetQueued = true
  1436  	}
  1437  }
  1438  
  1439  // processFrameFromReader processes the serve loop's read from readFrameCh from the
  1440  // frame-reading goroutine.
  1441  // processFrameFromReader returns whether the connection should be kept open.
  1442  func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
  1443  	sc.serveG.check()
  1444  	err := res.err
  1445  	if err != nil {
  1446  		if err == ErrFrameTooLarge {
  1447  			sc.goAway(ErrCodeFrameSize)
  1448  			return true // goAway will close the loop
  1449  		}
  1450  		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
  1451  		if clientGone {
  1452  			// TODO: could we also get into this state if
  1453  			// the peer does a half close
  1454  			// (e.g. CloseWrite) because they're done
  1455  			// sending frames but they're still wanting
  1456  			// our open replies?  Investigate.
  1457  			// TODO: add CloseWrite to crypto/tls.Conn first
  1458  			// so we have a way to test this? I suppose
  1459  			// just for testing we could have a non-TLS mode.
  1460  			return false
  1461  		}
  1462  	} else {
  1463  		f := res.f
  1464  		if VerboseLogs {
  1465  			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
  1466  		}
  1467  		err = sc.processFrame(f)
  1468  		if err == nil {
  1469  			return true
  1470  		}
  1471  	}
  1472  
  1473  	switch ev := err.(type) {
  1474  	case StreamError:
  1475  		sc.resetStream(ev)
  1476  		return true
  1477  	case goAwayFlowError:
  1478  		sc.goAway(ErrCodeFlowControl)
  1479  		return true
  1480  	case ConnectionError:
  1481  		if res.f != nil {
  1482  			if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
  1483  				sc.maxClientStreamID = id
  1484  			}
  1485  		}
  1486  		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
  1487  		sc.goAway(ErrCode(ev))
  1488  		return true // goAway will handle shutdown
  1489  	default:
  1490  		if res.err != nil {
  1491  			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
  1492  		} else {
  1493  			sc.logf("http2: server closing client connection: %v", err)
  1494  		}
  1495  		return false
  1496  	}
  1497  }
  1498  
  1499  func (sc *serverConn) processFrame(f Frame) error {
  1500  	sc.serveG.check()
  1501  
  1502  	// First frame received must be SETTINGS.
  1503  	if !sc.sawFirstSettings {
  1504  		if _, ok := f.(*SettingsFrame); !ok {
  1505  			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
  1506  		}
  1507  		sc.sawFirstSettings = true
  1508  	}
  1509  
  1510  	// Discard frames for streams initiated after the identified last
  1511  	// stream sent in a GOAWAY, or all frames after sending an error.
  1512  	// We still need to return connection-level flow control for DATA frames.
  1513  	// RFC 9113 Section 6.8.
  1514  	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
  1515  
  1516  		if f, ok := f.(*DataFrame); ok {
  1517  			if !sc.inflow.take(f.Length) {
  1518  				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
  1519  			}
  1520  			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1521  		}
  1522  		return nil
  1523  	}
  1524  
  1525  	switch f := f.(type) {
  1526  	case *SettingsFrame:
  1527  		return sc.processSettings(f)
  1528  	case *MetaHeadersFrame:
  1529  		return sc.processHeaders(f)
  1530  	case *WindowUpdateFrame:
  1531  		return sc.processWindowUpdate(f)
  1532  	case *PingFrame:
  1533  		return sc.processPing(f)
  1534  	case *DataFrame:
  1535  		return sc.processData(f)
  1536  	case *RSTStreamFrame:
  1537  		return sc.processResetStream(f)
  1538  	case *PriorityFrame:
  1539  		return sc.processPriority(f)
  1540  	case *GoAwayFrame:
  1541  		return sc.processGoAway(f)
  1542  	case *PushPromiseFrame:
  1543  		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
  1544  		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1545  		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
  1546  	default:
  1547  		sc.vlogf("http2: server ignoring frame: %v", f.Header())
  1548  		return nil
  1549  	}
  1550  }
  1551  
  1552  func (sc *serverConn) processPing(f *PingFrame) error {
  1553  	sc.serveG.check()
  1554  	if f.IsAck() {
  1555  		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
  1556  		// containing this flag."
  1557  		return nil
  1558  	}
  1559  	if f.StreamID != 0 {
  1560  		// "PING frames are not associated with any individual
  1561  		// stream. If a PING frame is received with a stream
  1562  		// identifier field value other than 0x0, the recipient MUST
  1563  		// respond with a connection error (Section 5.4.1) of type
  1564  		// PROTOCOL_ERROR."
  1565  		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
  1566  	}
  1567  	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
  1568  	return nil
  1569  }
  1570  
  1571  func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
  1572  	sc.serveG.check()
  1573  	switch {
  1574  	case f.StreamID != 0: // stream-level flow control
  1575  		state, st := sc.state(f.StreamID)
  1576  		if state == stateIdle {
  1577  			// Section 5.1: "Receiving any frame other than HEADERS
  1578  			// or PRIORITY on a stream in this state MUST be
  1579  			// treated as a connection error (Section 5.4.1) of
  1580  			// type PROTOCOL_ERROR."
  1581  			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
  1582  		}
  1583  		if st == nil {
  1584  			// "WINDOW_UPDATE can be sent by a peer that has sent a
  1585  			// frame bearing the END_STREAM flag. This means that a
  1586  			// receiver could receive a WINDOW_UPDATE frame on a "half
  1587  			// closed (remote)" or "closed" stream. A receiver MUST
  1588  			// NOT treat this as an error, see Section 5.1."
  1589  			return nil
  1590  		}
  1591  		if !st.flow.add(int32(f.Increment)) {
  1592  			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
  1593  		}
  1594  	default: // connection-level flow control
  1595  		if !sc.flow.add(int32(f.Increment)) {
  1596  			return goAwayFlowError{}
  1597  		}
  1598  	}
  1599  	sc.scheduleFrameWrite()
  1600  	return nil
  1601  }
  1602  
  1603  func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
  1604  	sc.serveG.check()
  1605  
  1606  	state, st := sc.state(f.StreamID)
  1607  	if state == stateIdle {
  1608  		// 6.4 "RST_STREAM frames MUST NOT be sent for a
  1609  		// stream in the "idle" state. If a RST_STREAM frame
  1610  		// identifying an idle stream is received, the
  1611  		// recipient MUST treat this as a connection error
  1612  		// (Section 5.4.1) of type PROTOCOL_ERROR.
  1613  		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
  1614  	}
  1615  	if st != nil {
  1616  		st.cancelCtx()
  1617  		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
  1618  	}
  1619  	return nil
  1620  }
  1621  
  1622  func (sc *serverConn) closeStream(st *stream, err error) {
  1623  	sc.serveG.check()
  1624  	if st.state == stateIdle || st.state == stateClosed {
  1625  		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
  1626  	}
  1627  	st.state = stateClosed
  1628  	if st.readDeadline != nil {
  1629  		st.readDeadline.Stop()
  1630  	}
  1631  	if st.writeDeadline != nil {
  1632  		st.writeDeadline.Stop()
  1633  	}
  1634  	if st.isPushed() {
  1635  		sc.curPushedStreams--
  1636  	} else {
  1637  		sc.curClientStreams--
  1638  	}
  1639  	delete(sc.streams, st.id)
  1640  	if len(sc.streams) == 0 {
  1641  		sc.setConnState(http.StateIdle)
  1642  		if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
  1643  			sc.idleTimer.Reset(sc.srv.IdleTimeout)
  1644  		}
  1645  		if h1ServerKeepAlivesDisabled(sc.hs) {
  1646  			sc.startGracefulShutdownInternal()
  1647  		}
  1648  	}
  1649  	if p := st.body; p != nil {
  1650  		// Return any buffered unread bytes worth of conn-level flow control.
  1651  		// See golang.org/issue/16481
  1652  		sc.sendWindowUpdate(nil, p.Len())
  1653  
  1654  		p.CloseWithError(err)
  1655  	}
  1656  	if e, ok := err.(StreamError); ok {
  1657  		if e.Cause != nil {
  1658  			err = e.Cause
  1659  		} else {
  1660  			err = errStreamClosed
  1661  		}
  1662  	}
  1663  	st.closeErr = err
  1664  	st.cancelCtx()
  1665  	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
  1666  	sc.writeSched.CloseStream(st.id)
  1667  }
  1668  
  1669  func (sc *serverConn) processSettings(f *SettingsFrame) error {
  1670  	sc.serveG.check()
  1671  	if f.IsAck() {
  1672  		sc.unackedSettings--
  1673  		if sc.unackedSettings < 0 {
  1674  			// Why is the peer ACKing settings we never sent?
  1675  			// The spec doesn't mention this case, but
  1676  			// hang up on them anyway.
  1677  			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
  1678  		}
  1679  		return nil
  1680  	}
  1681  	if f.NumSettings() > 100 || f.HasDuplicates() {
  1682  		// This isn't actually in the spec, but hang up on
  1683  		// suspiciously large settings frames or those with
  1684  		// duplicate entries.
  1685  		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
  1686  	}
  1687  	if err := f.ForeachSetting(sc.processSetting); err != nil {
  1688  		return err
  1689  	}
  1690  	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
  1691  	// acknowledged individually, even if multiple are received before the ACK.
  1692  	sc.needToSendSettingsAck = true
  1693  	sc.scheduleFrameWrite()
  1694  	return nil
  1695  }
  1696  
  1697  func (sc *serverConn) processSetting(s Setting) error {
  1698  	sc.serveG.check()
  1699  	if err := s.Valid(); err != nil {
  1700  		return err
  1701  	}
  1702  	if VerboseLogs {
  1703  		sc.vlogf("http2: server processing setting %v", s)
  1704  	}
  1705  	switch s.ID {
  1706  	case SettingHeaderTableSize:
  1707  		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
  1708  	case SettingEnablePush:
  1709  		sc.pushEnabled = s.Val != 0
  1710  	case SettingMaxConcurrentStreams:
  1711  		sc.clientMaxStreams = s.Val
  1712  	case SettingInitialWindowSize:
  1713  		return sc.processSettingInitialWindowSize(s.Val)
  1714  	case SettingMaxFrameSize:
  1715  		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
  1716  	case SettingMaxHeaderListSize:
  1717  		sc.peerMaxHeaderListSize = s.Val
  1718  	default:
  1719  		// Unknown setting: "An endpoint that receives a SETTINGS
  1720  		// frame with any unknown or unsupported identifier MUST
  1721  		// ignore that setting."
  1722  		if VerboseLogs {
  1723  			sc.vlogf("http2: server ignoring unknown setting %v", s)
  1724  		}
  1725  	}
  1726  	return nil
  1727  }
  1728  
  1729  func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
  1730  	sc.serveG.check()
  1731  	// Note: val already validated to be within range by
  1732  	// processSetting's Valid call.
  1733  
  1734  	// "A SETTINGS frame can alter the initial flow control window
  1735  	// size for all current streams. When the value of
  1736  	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
  1737  	// adjust the size of all stream flow control windows that it
  1738  	// maintains by the difference between the new value and the
  1739  	// old value."
  1740  	old := sc.initialStreamSendWindowSize
  1741  	sc.initialStreamSendWindowSize = int32(val)
  1742  	growth := int32(val) - old // may be negative
  1743  	for _, st := range sc.streams {
  1744  		if !st.flow.add(growth) {
  1745  			// 6.9.2 Initial Flow Control Window Size
  1746  			// "An endpoint MUST treat a change to
  1747  			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
  1748  			// control window to exceed the maximum size as a
  1749  			// connection error (Section 5.4.1) of type
  1750  			// FLOW_CONTROL_ERROR."
  1751  			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
  1752  		}
  1753  	}
  1754  	return nil
  1755  }
  1756  
  1757  func (sc *serverConn) processData(f *DataFrame) error {
  1758  	sc.serveG.check()
  1759  	id := f.Header().StreamID
  1760  
  1761  	data := f.Data()
  1762  	state, st := sc.state(id)
  1763  	if id == 0 || state == stateIdle {
  1764  		// Section 6.1: "DATA frames MUST be associated with a
  1765  		// stream. If a DATA frame is received whose stream
  1766  		// identifier field is 0x0, the recipient MUST respond
  1767  		// with a connection error (Section 5.4.1) of type
  1768  		// PROTOCOL_ERROR."
  1769  		//
  1770  		// Section 5.1: "Receiving any frame other than HEADERS
  1771  		// or PRIORITY on a stream in this state MUST be
  1772  		// treated as a connection error (Section 5.4.1) of
  1773  		// type PROTOCOL_ERROR."
  1774  		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
  1775  	}
  1776  
  1777  	// "If a DATA frame is received whose stream is not in "open"
  1778  	// or "half closed (local)" state, the recipient MUST respond
  1779  	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
  1780  	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
  1781  		// This includes sending a RST_STREAM if the stream is
  1782  		// in stateHalfClosedLocal (which currently means that
  1783  		// the http.Handler returned, so it's done reading &
  1784  		// done writing). Try to stop the client from sending
  1785  		// more DATA.
  1786  
  1787  		// But still enforce their connection-level flow control,
  1788  		// and return any flow control bytes since we're not going
  1789  		// to consume them.
  1790  		if !sc.inflow.take(f.Length) {
  1791  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1792  		}
  1793  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1794  
  1795  		if st != nil && st.resetQueued {
  1796  			// Already have a stream error in flight. Don't send another.
  1797  			return nil
  1798  		}
  1799  		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
  1800  	}
  1801  	if st.body == nil {
  1802  		panic("internal error: should have a body in this state")
  1803  	}
  1804  
  1805  	// Sender sending more than they'd declared?
  1806  	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
  1807  		if !sc.inflow.take(f.Length) {
  1808  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1809  		}
  1810  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1811  
  1812  		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
  1813  		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
  1814  		// value of a content-length header field does not equal the sum of the
  1815  		// DATA frame payload lengths that form the body.
  1816  		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
  1817  	}
  1818  	if f.Length > 0 {
  1819  		// Check whether the client has flow control quota.
  1820  		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
  1821  			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
  1822  		}
  1823  
  1824  		if len(data) > 0 {
  1825  			st.bodyBytes += int64(len(data))
  1826  			wrote, err := st.body.Write(data)
  1827  			if err != nil {
  1828  				// The handler has closed the request body.
  1829  				// Return the connection-level flow control for the discarded data,
  1830  				// but not the stream-level flow control.
  1831  				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
  1832  				return nil
  1833  			}
  1834  			if wrote != len(data) {
  1835  				panic("internal error: bad Writer")
  1836  			}
  1837  		}
  1838  
  1839  		// Return any padded flow control now, since we won't
  1840  		// refund it later on body reads.
  1841  		// Call sendWindowUpdate even if there is no padding,
  1842  		// to return buffered flow control credit if the sent
  1843  		// window has shrunk.
  1844  		pad := int32(f.Length) - int32(len(data))
  1845  		sc.sendWindowUpdate32(nil, pad)
  1846  		sc.sendWindowUpdate32(st, pad)
  1847  	}
  1848  	if f.StreamEnded() {
  1849  		st.endStream()
  1850  	}
  1851  	return nil
  1852  }
  1853  
  1854  func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
  1855  	sc.serveG.check()
  1856  	if f.ErrCode != ErrCodeNo {
  1857  		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1858  	} else {
  1859  		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1860  	}
  1861  	sc.startGracefulShutdownInternal()
  1862  	// http://tools.ietf.org/html/rfc7540#section-6.8
  1863  	// We should not create any new streams, which means we should disable push.
  1864  	sc.pushEnabled = false
  1865  	return nil
  1866  }
  1867  
  1868  // isPushed reports whether the stream is server-initiated.
  1869  func (st *stream) isPushed() bool {
  1870  	return st.id%2 == 0
  1871  }
  1872  
  1873  // endStream closes a Request.Body's pipe. It is called when a DATA
  1874  // frame says a request body is over (or after trailers).
  1875  func (st *stream) endStream() {
  1876  	sc := st.sc
  1877  	sc.serveG.check()
  1878  
  1879  	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
  1880  		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
  1881  			st.declBodyBytes, st.bodyBytes))
  1882  	} else {
  1883  		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
  1884  		st.body.CloseWithError(io.EOF)
  1885  	}
  1886  	st.state = stateHalfClosedRemote
  1887  }
  1888  
  1889  // copyTrailersToHandlerRequest is run in the Handler's goroutine in
  1890  // its Request.Body.Read just before it gets io.EOF.
  1891  func (st *stream) copyTrailersToHandlerRequest() {
  1892  	for k, vv := range st.trailer {
  1893  		if _, ok := st.reqTrailer[k]; ok {
  1894  			// Only copy it over it was pre-declared.
  1895  			st.reqTrailer[k] = vv
  1896  		}
  1897  	}
  1898  }
  1899  
  1900  // onReadTimeout is run on its own goroutine (from time.AfterFunc)
  1901  // when the stream's ReadTimeout has fired.
  1902  func (st *stream) onReadTimeout() {
  1903  	if st.body != nil {
  1904  		// Wrap the ErrDeadlineExceeded to avoid callers depending on us
  1905  		// returning the bare error.
  1906  		st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
  1907  	}
  1908  }
  1909  
  1910  // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
  1911  // when the stream's WriteTimeout has fired.
  1912  func (st *stream) onWriteTimeout() {
  1913  	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
  1914  		StreamID: st.id,
  1915  		Code:     ErrCodeInternal,
  1916  		Cause:    os.ErrDeadlineExceeded,
  1917  	}})
  1918  }
  1919  
  1920  func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
  1921  	sc.serveG.check()
  1922  	id := f.StreamID
  1923  	// http://tools.ietf.org/html/rfc7540#section-5.1.1
  1924  	// Streams initiated by a client MUST use odd-numbered stream
  1925  	// identifiers. [...] An endpoint that receives an unexpected
  1926  	// stream identifier MUST respond with a connection error
  1927  	// (Section 5.4.1) of type PROTOCOL_ERROR.
  1928  	if id%2 != 1 {
  1929  		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
  1930  	}
  1931  	// A HEADERS frame can be used to create a new stream or
  1932  	// send a trailer for an open one. If we already have a stream
  1933  	// open, let it process its own HEADERS frame (trailers at this
  1934  	// point, if it's valid).
  1935  	if st := sc.streams[f.StreamID]; st != nil {
  1936  		if st.resetQueued {
  1937  			// We're sending RST_STREAM to close the stream, so don't bother
  1938  			// processing this frame.
  1939  			return nil
  1940  		}
  1941  		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
  1942  		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
  1943  		// this state, it MUST respond with a stream error (Section 5.4.2) of
  1944  		// type STREAM_CLOSED.
  1945  		if st.state == stateHalfClosedRemote {
  1946  			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
  1947  		}
  1948  		return st.processTrailerHeaders(f)
  1949  	}
  1950  
  1951  	// [...] The identifier of a newly established stream MUST be
  1952  	// numerically greater than all streams that the initiating
  1953  	// endpoint has opened or reserved. [...]  An endpoint that
  1954  	// receives an unexpected stream identifier MUST respond with
  1955  	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1956  	if id <= sc.maxClientStreamID {
  1957  		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
  1958  	}
  1959  	sc.maxClientStreamID = id
  1960  
  1961  	if sc.idleTimer != nil {
  1962  		sc.idleTimer.Stop()
  1963  	}
  1964  
  1965  	// http://tools.ietf.org/html/rfc7540#section-5.1.2
  1966  	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
  1967  	// endpoint that receives a HEADERS frame that causes their
  1968  	// advertised concurrent stream limit to be exceeded MUST treat
  1969  	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
  1970  	// or REFUSED_STREAM.
  1971  	if sc.curClientStreams+1 > sc.advMaxStreams {
  1972  		if sc.unackedSettings == 0 {
  1973  			// They should know better.
  1974  			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
  1975  		}
  1976  		// Assume it's a network race, where they just haven't
  1977  		// received our last SETTINGS update. But actually
  1978  		// this can't happen yet, because we don't yet provide
  1979  		// a way for users to adjust server parameters at
  1980  		// runtime.
  1981  		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
  1982  	}
  1983  
  1984  	initialState := stateOpen
  1985  	if f.StreamEnded() {
  1986  		initialState = stateHalfClosedRemote
  1987  	}
  1988  	st := sc.newStream(id, 0, initialState)
  1989  
  1990  	if f.HasPriority() {
  1991  		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
  1992  			return err
  1993  		}
  1994  		sc.writeSched.AdjustStream(st.id, f.Priority)
  1995  	}
  1996  
  1997  	rw, req, err := sc.newWriterAndRequest(st, f)
  1998  	if err != nil {
  1999  		return err
  2000  	}
  2001  	st.reqTrailer = req.Trailer
  2002  	if st.reqTrailer != nil {
  2003  		st.trailer = make(http.Header)
  2004  	}
  2005  	st.body = req.Body.(*requestBody).pipe // may be nil
  2006  	st.declBodyBytes = req.ContentLength
  2007  
  2008  	handler := sc.handler.ServeHTTP
  2009  	if f.Truncated {
  2010  		// Their header list was too long. Send a 431 error.
  2011  		handler = handleHeaderListTooLong
  2012  	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
  2013  		handler = new400Handler(err)
  2014  	}
  2015  
  2016  	// The net/http package sets the read deadline from the
  2017  	// http.Server.ReadTimeout during the TLS handshake, but then
  2018  	// passes the connection off to us with the deadline already
  2019  	// set. Disarm it here after the request headers are read,
  2020  	// similar to how the http1 server works. Here it's
  2021  	// technically more like the http1 Server's ReadHeaderTimeout
  2022  	// (in Go 1.8), though. That's a more sane option anyway.
  2023  	if sc.hs.ReadTimeout > 0 {
  2024  		sc.conn.SetReadDeadline(time.Time{})
  2025  		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
  2026  	}
  2027  
  2028  	return sc.scheduleHandler(id, rw, req, handler)
  2029  }
  2030  
  2031  func (sc *serverConn) upgradeRequest(req *http.Request) {
  2032  	sc.serveG.check()
  2033  	id := uint32(1)
  2034  	sc.maxClientStreamID = id
  2035  	st := sc.newStream(id, 0, stateHalfClosedRemote)
  2036  	st.reqTrailer = req.Trailer
  2037  	if st.reqTrailer != nil {
  2038  		st.trailer = make(http.Header)
  2039  	}
  2040  	rw := sc.newResponseWriter(st, req)
  2041  
  2042  	// Disable any read deadline set by the net/http package
  2043  	// prior to the upgrade.
  2044  	if sc.hs.ReadTimeout > 0 {
  2045  		sc.conn.SetReadDeadline(time.Time{})
  2046  	}
  2047  
  2048  	// This is the first request on the connection,
  2049  	// so start the handler directly rather than going
  2050  	// through scheduleHandler.
  2051  	sc.curHandlers++
  2052  	go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  2053  }
  2054  
  2055  func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
  2056  	sc := st.sc
  2057  	sc.serveG.check()
  2058  	if st.gotTrailerHeader {
  2059  		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
  2060  	}
  2061  	st.gotTrailerHeader = true
  2062  	if !f.StreamEnded() {
  2063  		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
  2064  	}
  2065  
  2066  	if len(f.PseudoFields()) > 0 {
  2067  		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
  2068  	}
  2069  	if st.trailer != nil {
  2070  		for _, hf := range f.RegularFields() {
  2071  			key := sc.canonicalHeader(hf.Name)
  2072  			if !httpguts.ValidTrailerHeader(key) {
  2073  				// TODO: send more details to the peer somehow. But http2 has
  2074  				// no way to send debug data at a stream level. Discuss with
  2075  				// HTTP folk.
  2076  				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
  2077  			}
  2078  			st.trailer[key] = append(st.trailer[key], hf.Value)
  2079  		}
  2080  	}
  2081  	st.endStream()
  2082  	return nil
  2083  }
  2084  
  2085  func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
  2086  	if streamID == p.StreamDep {
  2087  		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
  2088  		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
  2089  		// Section 5.3.3 says that a stream can depend on one of its dependencies,
  2090  		// so it's only self-dependencies that are forbidden.
  2091  		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
  2092  	}
  2093  	return nil
  2094  }
  2095  
  2096  func (sc *serverConn) processPriority(f *PriorityFrame) error {
  2097  	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
  2098  		return err
  2099  	}
  2100  	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
  2101  	return nil
  2102  }
  2103  
  2104  func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
  2105  	sc.serveG.check()
  2106  	if id == 0 {
  2107  		panic("internal error: cannot create stream with id 0")
  2108  	}
  2109  
  2110  	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
  2111  	st := &stream{
  2112  		sc:        sc,
  2113  		id:        id,
  2114  		state:     state,
  2115  		ctx:       ctx,
  2116  		cancelCtx: cancelCtx,
  2117  	}
  2118  	st.cw.Init()
  2119  	st.flow.conn = &sc.flow // link to conn-level counter
  2120  	st.flow.add(sc.initialStreamSendWindowSize)
  2121  	st.inflow.init(sc.srv.initialStreamRecvWindowSize())
  2122  	if sc.hs.WriteTimeout > 0 {
  2123  		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
  2124  	}
  2125  
  2126  	sc.streams[id] = st
  2127  	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
  2128  	if st.isPushed() {
  2129  		sc.curPushedStreams++
  2130  	} else {
  2131  		sc.curClientStreams++
  2132  	}
  2133  	if sc.curOpenStreams() == 1 {
  2134  		sc.setConnState(http.StateActive)
  2135  	}
  2136  
  2137  	return st
  2138  }
  2139  
  2140  func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
  2141  	sc.serveG.check()
  2142  
  2143  	rp := requestParam{
  2144  		method:    f.PseudoValue("method"),
  2145  		scheme:    f.PseudoValue("scheme"),
  2146  		authority: f.PseudoValue("authority"),
  2147  		path:      f.PseudoValue("path"),
  2148  	}
  2149  
  2150  	isConnect := rp.method == "CONNECT"
  2151  	if isConnect {
  2152  		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
  2153  			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
  2154  		}
  2155  	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
  2156  		// See 8.1.2.6 Malformed Requests and Responses:
  2157  		//
  2158  		// Malformed requests or responses that are detected
  2159  		// MUST be treated as a stream error (Section 5.4.2)
  2160  		// of type PROTOCOL_ERROR."
  2161  		//
  2162  		// 8.1.2.3 Request Pseudo-Header Fields
  2163  		// "All HTTP/2 requests MUST include exactly one valid
  2164  		// value for the :method, :scheme, and :path
  2165  		// pseudo-header fields"
  2166  		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
  2167  	}
  2168  
  2169  	rp.header = make(http.Header)
  2170  	for _, hf := range f.RegularFields() {
  2171  		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
  2172  	}
  2173  	if rp.authority == "" {
  2174  		rp.authority = rp.header.Get("Host")
  2175  	}
  2176  
  2177  	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
  2178  	if err != nil {
  2179  		return nil, nil, err
  2180  	}
  2181  	bodyOpen := !f.StreamEnded()
  2182  	if bodyOpen {
  2183  		if vv, ok := rp.header["Content-Length"]; ok {
  2184  			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
  2185  				req.ContentLength = int64(cl)
  2186  			} else {
  2187  				req.ContentLength = 0
  2188  			}
  2189  		} else {
  2190  			req.ContentLength = -1
  2191  		}
  2192  		req.Body.(*requestBody).pipe = &pipe{
  2193  			b: &dataBuffer{expected: req.ContentLength},
  2194  		}
  2195  	}
  2196  	return rw, req, nil
  2197  }
  2198  
  2199  type requestParam struct {
  2200  	method                  string
  2201  	scheme, authority, path string
  2202  	header                  http.Header
  2203  }
  2204  
  2205  func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
  2206  	sc.serveG.check()
  2207  
  2208  	var tlsState *tls.ConnectionState // nil if not scheme https
  2209  	if rp.scheme == "https" {
  2210  		tlsState = sc.tlsState
  2211  	}
  2212  
  2213  	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
  2214  	if needsContinue {
  2215  		rp.header.Del("Expect")
  2216  	}
  2217  	// Merge Cookie headers into one "; "-delimited value.
  2218  	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
  2219  		rp.header.Set("Cookie", strings.Join(cookies, "; "))
  2220  	}
  2221  
  2222  	// Setup Trailers
  2223  	var trailer http.Header
  2224  	for _, v := range rp.header["Trailer"] {
  2225  		for _, key := range strings.Split(v, ",") {
  2226  			key = http.CanonicalHeaderKey(textproto.TrimString(key))
  2227  			switch key {
  2228  			case "Transfer-Encoding", "Trailer", "Content-Length":
  2229  				// Bogus. (copy of http1 rules)
  2230  				// Ignore.
  2231  			default:
  2232  				if trailer == nil {
  2233  					trailer = make(http.Header)
  2234  				}
  2235  				trailer[key] = nil
  2236  			}
  2237  		}
  2238  	}
  2239  	delete(rp.header, "Trailer")
  2240  
  2241  	var url_ *url.URL
  2242  	var requestURI string
  2243  	if rp.method == "CONNECT" {
  2244  		url_ = &url.URL{Host: rp.authority}
  2245  		requestURI = rp.authority // mimic HTTP/1 server behavior
  2246  	} else {
  2247  		var err error
  2248  		url_, err = url.ParseRequestURI(rp.path)
  2249  		if err != nil {
  2250  			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
  2251  		}
  2252  		requestURI = rp.path
  2253  	}
  2254  
  2255  	body := &requestBody{
  2256  		conn:          sc,
  2257  		stream:        st,
  2258  		needsContinue: needsContinue,
  2259  	}
  2260  	req := &http.Request{
  2261  		Method:     rp.method,
  2262  		URL:        url_,
  2263  		RemoteAddr: sc.remoteAddrStr,
  2264  		Header:     rp.header,
  2265  		RequestURI: requestURI,
  2266  		Proto:      "HTTP/2.0",
  2267  		ProtoMajor: 2,
  2268  		ProtoMinor: 0,
  2269  		TLS:        tlsState,
  2270  		Host:       rp.authority,
  2271  		Body:       body,
  2272  		Trailer:    trailer,
  2273  	}
  2274  	req = req.WithContext(st.ctx)
  2275  
  2276  	rw := sc.newResponseWriter(st, req)
  2277  	return rw, req, nil
  2278  }
  2279  
  2280  func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
  2281  	rws := responseWriterStatePool.Get().(*responseWriterState)
  2282  	bwSave := rws.bw
  2283  	*rws = responseWriterState{} // zero all the fields
  2284  	rws.conn = sc
  2285  	rws.bw = bwSave
  2286  	rws.bw.Reset(chunkWriter{rws})
  2287  	rws.stream = st
  2288  	rws.req = req
  2289  	return &responseWriter{rws: rws}
  2290  }
  2291  
  2292  type unstartedHandler struct {
  2293  	streamID uint32
  2294  	rw       *responseWriter
  2295  	req      *http.Request
  2296  	handler  func(http.ResponseWriter, *http.Request)
  2297  }
  2298  
  2299  // scheduleHandler starts a handler goroutine,
  2300  // or schedules one to start as soon as an existing handler finishes.
  2301  func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
  2302  	sc.serveG.check()
  2303  	maxHandlers := sc.advMaxStreams
  2304  	if sc.curHandlers < maxHandlers {
  2305  		sc.curHandlers++
  2306  		go sc.runHandler(rw, req, handler)
  2307  		return nil
  2308  	}
  2309  	if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
  2310  		return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
  2311  	}
  2312  	sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
  2313  		streamID: streamID,
  2314  		rw:       rw,
  2315  		req:      req,
  2316  		handler:  handler,
  2317  	})
  2318  	return nil
  2319  }
  2320  
  2321  func (sc *serverConn) handlerDone() {
  2322  	sc.serveG.check()
  2323  	sc.curHandlers--
  2324  	i := 0
  2325  	maxHandlers := sc.advMaxStreams
  2326  	for ; i < len(sc.unstartedHandlers); i++ {
  2327  		u := sc.unstartedHandlers[i]
  2328  		if sc.streams[u.streamID] == nil {
  2329  			// This stream was reset before its goroutine had a chance to start.
  2330  			continue
  2331  		}
  2332  		if sc.curHandlers >= maxHandlers {
  2333  			break
  2334  		}
  2335  		sc.curHandlers++
  2336  		go sc.runHandler(u.rw, u.req, u.handler)
  2337  		sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
  2338  	}
  2339  	sc.unstartedHandlers = sc.unstartedHandlers[i:]
  2340  	if len(sc.unstartedHandlers) == 0 {
  2341  		sc.unstartedHandlers = nil
  2342  	}
  2343  }
  2344  
  2345  // Run on its own goroutine.
  2346  func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
  2347  	defer sc.sendServeMsg(handlerDoneMsg)
  2348  	didPanic := true
  2349  	defer func() {
  2350  		rw.rws.stream.cancelCtx()
  2351  		if req.MultipartForm != nil {
  2352  			req.MultipartForm.RemoveAll()
  2353  		}
  2354  		if didPanic {
  2355  			e := recover()
  2356  			sc.writeFrameFromHandler(FrameWriteRequest{
  2357  				write:  handlerPanicRST{rw.rws.stream.id},
  2358  				stream: rw.rws.stream,
  2359  			})
  2360  			// Same as net/http:
  2361  			if e != nil && e != http.ErrAbortHandler {
  2362  				const size = 64 << 10
  2363  				buf := make([]byte, size)
  2364  				buf = buf[:runtime.Stack(buf, false)]
  2365  				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
  2366  			}
  2367  			return
  2368  		}
  2369  		rw.handlerDone()
  2370  	}()
  2371  	handler(rw, req)
  2372  	didPanic = false
  2373  }
  2374  
  2375  func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
  2376  	// 10.5.1 Limits on Header Block Size:
  2377  	// .. "A server that receives a larger header block than it is
  2378  	// willing to handle can send an HTTP 431 (Request Header Fields Too
  2379  	// Large) status code"
  2380  	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
  2381  	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
  2382  	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
  2383  }
  2384  
  2385  // called from handler goroutines.
  2386  // h may be nil.
  2387  func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
  2388  	sc.serveG.checkNotOn() // NOT on
  2389  	var errc chan error
  2390  	if headerData.h != nil {
  2391  		// If there's a header map (which we don't own), so we have to block on
  2392  		// waiting for this frame to be written, so an http.Flush mid-handler
  2393  		// writes out the correct value of keys, before a handler later potentially
  2394  		// mutates it.
  2395  		errc = errChanPool.Get().(chan error)
  2396  	}
  2397  	if err := sc.writeFrameFromHandler(FrameWriteRequest{
  2398  		write:  headerData,
  2399  		stream: st,
  2400  		done:   errc,
  2401  	}); err != nil {
  2402  		return err
  2403  	}
  2404  	if errc != nil {
  2405  		select {
  2406  		case err := <-errc:
  2407  			errChanPool.Put(errc)
  2408  			return err
  2409  		case <-sc.doneServing:
  2410  			return errClientDisconnected
  2411  		case <-st.cw:
  2412  			return errStreamClosed
  2413  		}
  2414  	}
  2415  	return nil
  2416  }
  2417  
  2418  // called from handler goroutines.
  2419  func (sc *serverConn) write100ContinueHeaders(st *stream) {
  2420  	sc.writeFrameFromHandler(FrameWriteRequest{
  2421  		write:  write100ContinueHeadersFrame{st.id},
  2422  		stream: st,
  2423  	})
  2424  }
  2425  
  2426  // A bodyReadMsg tells the server loop that the http.Handler read n
  2427  // bytes of the DATA from the client on the given stream.
  2428  type bodyReadMsg struct {
  2429  	st *stream
  2430  	n  int
  2431  }
  2432  
  2433  // called from handler goroutines.
  2434  // Notes that the handler for the given stream ID read n bytes of its body
  2435  // and schedules flow control tokens to be sent.
  2436  func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
  2437  	sc.serveG.checkNotOn() // NOT on
  2438  	if n > 0 {
  2439  		select {
  2440  		case sc.bodyReadCh <- bodyReadMsg{st, n}:
  2441  		case <-sc.doneServing:
  2442  		}
  2443  	}
  2444  }
  2445  
  2446  func (sc *serverConn) noteBodyRead(st *stream, n int) {
  2447  	sc.serveG.check()
  2448  	sc.sendWindowUpdate(nil, n) // conn-level
  2449  	if st.state != stateHalfClosedRemote && st.state != stateClosed {
  2450  		// Don't send this WINDOW_UPDATE if the stream is closed
  2451  		// remotely.
  2452  		sc.sendWindowUpdate(st, n)
  2453  	}
  2454  }
  2455  
  2456  // st may be nil for conn-level
  2457  func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
  2458  	sc.sendWindowUpdate(st, int(n))
  2459  }
  2460  
  2461  // st may be nil for conn-level
  2462  func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
  2463  	sc.serveG.check()
  2464  	var streamID uint32
  2465  	var send int32
  2466  	if st == nil {
  2467  		send = sc.inflow.add(n)
  2468  	} else {
  2469  		streamID = st.id
  2470  		send = st.inflow.add(n)
  2471  	}
  2472  	if send == 0 {
  2473  		return
  2474  	}
  2475  	sc.writeFrame(FrameWriteRequest{
  2476  		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
  2477  		stream: st,
  2478  	})
  2479  }
  2480  
  2481  // requestBody is the Handler's Request.Body type.
  2482  // Read and Close may be called concurrently.
  2483  type requestBody struct {
  2484  	_             incomparable
  2485  	stream        *stream
  2486  	conn          *serverConn
  2487  	closeOnce     sync.Once // for use by Close only
  2488  	sawEOF        bool      // for use by Read only
  2489  	pipe          *pipe     // non-nil if we have an HTTP entity message body
  2490  	needsContinue bool      // need to send a 100-continue
  2491  }
  2492  
  2493  func (b *requestBody) Close() error {
  2494  	b.closeOnce.Do(func() {
  2495  		if b.pipe != nil {
  2496  			b.pipe.BreakWithError(errClosedBody)
  2497  		}
  2498  	})
  2499  	return nil
  2500  }
  2501  
  2502  func (b *requestBody) Read(p []byte) (n int, err error) {
  2503  	if b.needsContinue {
  2504  		b.needsContinue = false
  2505  		b.conn.write100ContinueHeaders(b.stream)
  2506  	}
  2507  	if b.pipe == nil || b.sawEOF {
  2508  		return 0, io.EOF
  2509  	}
  2510  	n, err = b.pipe.Read(p)
  2511  	if err == io.EOF {
  2512  		b.sawEOF = true
  2513  	}
  2514  	if b.conn == nil && inTests {
  2515  		return
  2516  	}
  2517  	b.conn.noteBodyReadFromHandler(b.stream, n, err)
  2518  	return
  2519  }
  2520  
  2521  // responseWriter is the http.ResponseWriter implementation. It's
  2522  // intentionally small (1 pointer wide) to minimize garbage. The
  2523  // responseWriterState pointer inside is zeroed at the end of a
  2524  // request (in handlerDone) and calls on the responseWriter thereafter
  2525  // simply crash (caller's mistake), but the much larger responseWriterState
  2526  // and buffers are reused between multiple requests.
  2527  type responseWriter struct {
  2528  	rws *responseWriterState
  2529  }
  2530  
  2531  // Optional http.ResponseWriter interfaces implemented.
  2532  var (
  2533  	_ http.CloseNotifier = (*responseWriter)(nil)
  2534  	_ http.Flusher       = (*responseWriter)(nil)
  2535  	_ stringWriter       = (*responseWriter)(nil)
  2536  )
  2537  
  2538  type responseWriterState struct {
  2539  	// immutable within a request:
  2540  	stream *stream
  2541  	req    *http.Request
  2542  	conn   *serverConn
  2543  
  2544  	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
  2545  	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
  2546  
  2547  	// mutated by http.Handler goroutine:
  2548  	handlerHeader http.Header // nil until called
  2549  	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
  2550  	trailers      []string    // set in writeChunk
  2551  	status        int         // status code passed to WriteHeader
  2552  	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
  2553  	sentHeader    bool        // have we sent the header frame?
  2554  	handlerDone   bool        // handler has finished
  2555  
  2556  	sentContentLen int64 // non-zero if handler set a Content-Length header
  2557  	wroteBytes     int64
  2558  
  2559  	closeNotifierMu sync.Mutex // guards closeNotifierCh
  2560  	closeNotifierCh chan bool  // nil until first used
  2561  }
  2562  
  2563  type chunkWriter struct{ rws *responseWriterState }
  2564  
  2565  func (cw chunkWriter) Write(p []byte) (n int, err error) {
  2566  	n, err = cw.rws.writeChunk(p)
  2567  	if err == errStreamClosed {
  2568  		// If writing failed because the stream has been closed,
  2569  		// return the reason it was closed.
  2570  		err = cw.rws.stream.closeErr
  2571  	}
  2572  	return n, err
  2573  }
  2574  
  2575  func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
  2576  
  2577  func (rws *responseWriterState) hasNonemptyTrailers() bool {
  2578  	for _, trailer := range rws.trailers {
  2579  		if _, ok := rws.handlerHeader[trailer]; ok {
  2580  			return true
  2581  		}
  2582  	}
  2583  	return false
  2584  }
  2585  
  2586  // declareTrailer is called for each Trailer header when the
  2587  // response header is written. It notes that a header will need to be
  2588  // written in the trailers at the end of the response.
  2589  func (rws *responseWriterState) declareTrailer(k string) {
  2590  	k = http.CanonicalHeaderKey(k)
  2591  	if !httpguts.ValidTrailerHeader(k) {
  2592  		// Forbidden by RFC 7230, section 4.1.2.
  2593  		rws.conn.logf("ignoring invalid trailer %q", k)
  2594  		return
  2595  	}
  2596  	if !strSliceContains(rws.trailers, k) {
  2597  		rws.trailers = append(rws.trailers, k)
  2598  	}
  2599  }
  2600  
  2601  // writeChunk writes chunks from the bufio.Writer. But because
  2602  // bufio.Writer may bypass its chunking, sometimes p may be
  2603  // arbitrarily large.
  2604  //
  2605  // writeChunk is also responsible (on the first chunk) for sending the
  2606  // HEADER response.
  2607  func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
  2608  	if !rws.wroteHeader {
  2609  		rws.writeHeader(200)
  2610  	}
  2611  
  2612  	if rws.handlerDone {
  2613  		rws.promoteUndeclaredTrailers()
  2614  	}
  2615  
  2616  	isHeadResp := rws.req.Method == "HEAD"
  2617  	if !rws.sentHeader {
  2618  		rws.sentHeader = true
  2619  		var ctype, clen string
  2620  		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
  2621  			rws.snapHeader.Del("Content-Length")
  2622  			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
  2623  				rws.sentContentLen = int64(cl)
  2624  			} else {
  2625  				clen = ""
  2626  			}
  2627  		}
  2628  		_, hasContentLength := rws.snapHeader["Content-Length"]
  2629  		if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
  2630  			clen = strconv.Itoa(len(p))
  2631  		}
  2632  		_, hasContentType := rws.snapHeader["Content-Type"]
  2633  		// If the Content-Encoding is non-blank, we shouldn't
  2634  		// sniff the body. See Issue golang.org/issue/31753.
  2635  		ce := rws.snapHeader.Get("Content-Encoding")
  2636  		hasCE := len(ce) > 0
  2637  		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
  2638  			ctype = http.DetectContentType(p)
  2639  		}
  2640  		var date string
  2641  		if _, ok := rws.snapHeader["Date"]; !ok {
  2642  			// TODO(bradfitz): be faster here, like net/http? measure.
  2643  			date = time.Now().UTC().Format(http.TimeFormat)
  2644  		}
  2645  
  2646  		for _, v := range rws.snapHeader["Trailer"] {
  2647  			foreachHeaderElement(v, rws.declareTrailer)
  2648  		}
  2649  
  2650  		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
  2651  		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
  2652  		// down the TCP connection when idle, like we do for HTTP/1.
  2653  		// TODO: remove more Connection-specific header fields here, in addition
  2654  		// to "Connection".
  2655  		if _, ok := rws.snapHeader["Connection"]; ok {
  2656  			v := rws.snapHeader.Get("Connection")
  2657  			delete(rws.snapHeader, "Connection")
  2658  			if v == "close" {
  2659  				rws.conn.startGracefulShutdown()
  2660  			}
  2661  		}
  2662  
  2663  		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
  2664  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2665  			streamID:      rws.stream.id,
  2666  			httpResCode:   rws.status,
  2667  			h:             rws.snapHeader,
  2668  			endStream:     endStream,
  2669  			contentType:   ctype,
  2670  			contentLength: clen,
  2671  			date:          date,
  2672  		})
  2673  		if err != nil {
  2674  			return 0, err
  2675  		}
  2676  		if endStream {
  2677  			return 0, nil
  2678  		}
  2679  	}
  2680  	if isHeadResp {
  2681  		return len(p), nil
  2682  	}
  2683  	if len(p) == 0 && !rws.handlerDone {
  2684  		return 0, nil
  2685  	}
  2686  
  2687  	// only send trailers if they have actually been defined by the
  2688  	// server handler.
  2689  	hasNonemptyTrailers := rws.hasNonemptyTrailers()
  2690  	endStream := rws.handlerDone && !hasNonemptyTrailers
  2691  	if len(p) > 0 || endStream {
  2692  		// only send a 0 byte DATA frame if we're ending the stream.
  2693  		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
  2694  			return 0, err
  2695  		}
  2696  	}
  2697  
  2698  	if rws.handlerDone && hasNonemptyTrailers {
  2699  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2700  			streamID:  rws.stream.id,
  2701  			h:         rws.handlerHeader,
  2702  			trailers:  rws.trailers,
  2703  			endStream: true,
  2704  		})
  2705  		return len(p), err
  2706  	}
  2707  	return len(p), nil
  2708  }
  2709  
  2710  // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
  2711  // that, if present, signals that the map entry is actually for
  2712  // the response trailers, and not the response headers. The prefix
  2713  // is stripped after the ServeHTTP call finishes and the values are
  2714  // sent in the trailers.
  2715  //
  2716  // This mechanism is intended only for trailers that are not known
  2717  // prior to the headers being written. If the set of trailers is fixed
  2718  // or known before the header is written, the normal Go trailers mechanism
  2719  // is preferred:
  2720  //
  2721  //	https://golang.org/pkg/net/http/#ResponseWriter
  2722  //	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
  2723  const TrailerPrefix = "Trailer:"
  2724  
  2725  // promoteUndeclaredTrailers permits http.Handlers to set trailers
  2726  // after the header has already been flushed. Because the Go
  2727  // ResponseWriter interface has no way to set Trailers (only the
  2728  // Header), and because we didn't want to expand the ResponseWriter
  2729  // interface, and because nobody used trailers, and because RFC 7230
  2730  // says you SHOULD (but not must) predeclare any trailers in the
  2731  // header, the official ResponseWriter rules said trailers in Go must
  2732  // be predeclared, and then we reuse the same ResponseWriter.Header()
  2733  // map to mean both Headers and Trailers. When it's time to write the
  2734  // Trailers, we pick out the fields of Headers that were declared as
  2735  // trailers. That worked for a while, until we found the first major
  2736  // user of Trailers in the wild: gRPC (using them only over http2),
  2737  // and gRPC libraries permit setting trailers mid-stream without
  2738  // predeclaring them. So: change of plans. We still permit the old
  2739  // way, but we also permit this hack: if a Header() key begins with
  2740  // "Trailer:", the suffix of that key is a Trailer. Because ':' is an
  2741  // invalid token byte anyway, there is no ambiguity. (And it's already
  2742  // filtered out) It's mildly hacky, but not terrible.
  2743  //
  2744  // This method runs after the Handler is done and promotes any Header
  2745  // fields to be trailers.
  2746  func (rws *responseWriterState) promoteUndeclaredTrailers() {
  2747  	for k, vv := range rws.handlerHeader {
  2748  		if !strings.HasPrefix(k, TrailerPrefix) {
  2749  			continue
  2750  		}
  2751  		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
  2752  		rws.declareTrailer(trailerKey)
  2753  		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
  2754  	}
  2755  
  2756  	if len(rws.trailers) > 1 {
  2757  		sorter := sorterPool.Get().(*sorter)
  2758  		sorter.SortStrings(rws.trailers)
  2759  		sorterPool.Put(sorter)
  2760  	}
  2761  }
  2762  
  2763  func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
  2764  	st := w.rws.stream
  2765  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2766  		// If we're setting a deadline in the past, reset the stream immediately
  2767  		// so writes after SetWriteDeadline returns will fail.
  2768  		st.onReadTimeout()
  2769  		return nil
  2770  	}
  2771  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2772  		if st.readDeadline != nil {
  2773  			if !st.readDeadline.Stop() {
  2774  				// Deadline already exceeded, or stream has been closed.
  2775  				return
  2776  			}
  2777  		}
  2778  		if deadline.IsZero() {
  2779  			st.readDeadline = nil
  2780  		} else if st.readDeadline == nil {
  2781  			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
  2782  		} else {
  2783  			st.readDeadline.Reset(deadline.Sub(time.Now()))
  2784  		}
  2785  	})
  2786  	return nil
  2787  }
  2788  
  2789  func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
  2790  	st := w.rws.stream
  2791  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2792  		// If we're setting a deadline in the past, reset the stream immediately
  2793  		// so writes after SetWriteDeadline returns will fail.
  2794  		st.onWriteTimeout()
  2795  		return nil
  2796  	}
  2797  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2798  		if st.writeDeadline != nil {
  2799  			if !st.writeDeadline.Stop() {
  2800  				// Deadline already exceeded, or stream has been closed.
  2801  				return
  2802  			}
  2803  		}
  2804  		if deadline.IsZero() {
  2805  			st.writeDeadline = nil
  2806  		} else if st.writeDeadline == nil {
  2807  			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
  2808  		} else {
  2809  			st.writeDeadline.Reset(deadline.Sub(time.Now()))
  2810  		}
  2811  	})
  2812  	return nil
  2813  }
  2814  
  2815  func (w *responseWriter) Flush() {
  2816  	w.FlushError()
  2817  }
  2818  
  2819  func (w *responseWriter) FlushError() error {
  2820  	rws := w.rws
  2821  	if rws == nil {
  2822  		panic("Header called after Handler finished")
  2823  	}
  2824  	var err error
  2825  	if rws.bw.Buffered() > 0 {
  2826  		err = rws.bw.Flush()
  2827  	} else {
  2828  		// The bufio.Writer won't call chunkWriter.Write
  2829  		// (writeChunk with zero bytes), so we have to do it
  2830  		// ourselves to force the HTTP response header and/or
  2831  		// final DATA frame (with END_STREAM) to be sent.
  2832  		_, err = chunkWriter{rws}.Write(nil)
  2833  		if err == nil {
  2834  			select {
  2835  			case <-rws.stream.cw:
  2836  				err = rws.stream.closeErr
  2837  			default:
  2838  			}
  2839  		}
  2840  	}
  2841  	return err
  2842  }
  2843  
  2844  func (w *responseWriter) CloseNotify() <-chan bool {
  2845  	rws := w.rws
  2846  	if rws == nil {
  2847  		panic("CloseNotify called after Handler finished")
  2848  	}
  2849  	rws.closeNotifierMu.Lock()
  2850  	ch := rws.closeNotifierCh
  2851  	if ch == nil {
  2852  		ch = make(chan bool, 1)
  2853  		rws.closeNotifierCh = ch
  2854  		cw := rws.stream.cw
  2855  		go func() {
  2856  			cw.Wait() // wait for close
  2857  			ch <- true
  2858  		}()
  2859  	}
  2860  	rws.closeNotifierMu.Unlock()
  2861  	return ch
  2862  }
  2863  
  2864  func (w *responseWriter) Header() http.Header {
  2865  	rws := w.rws
  2866  	if rws == nil {
  2867  		panic("Header called after Handler finished")
  2868  	}
  2869  	if rws.handlerHeader == nil {
  2870  		rws.handlerHeader = make(http.Header)
  2871  	}
  2872  	return rws.handlerHeader
  2873  }
  2874  
  2875  // checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
  2876  func checkWriteHeaderCode(code int) {
  2877  	// Issue 22880: require valid WriteHeader status codes.
  2878  	// For now we only enforce that it's three digits.
  2879  	// In the future we might block things over 599 (600 and above aren't defined
  2880  	// at http://httpwg.org/specs/rfc7231.html#status.codes).
  2881  	// But for now any three digits.
  2882  	//
  2883  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  2884  	// no equivalent bogus thing we can realistically send in HTTP/2,
  2885  	// so we'll consistently panic instead and help people find their bugs
  2886  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  2887  	if code < 100 || code > 999 {
  2888  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  2889  	}
  2890  }
  2891  
  2892  func (w *responseWriter) WriteHeader(code int) {
  2893  	rws := w.rws
  2894  	if rws == nil {
  2895  		panic("WriteHeader called after Handler finished")
  2896  	}
  2897  	rws.writeHeader(code)
  2898  }
  2899  
  2900  func (rws *responseWriterState) writeHeader(code int) {
  2901  	if rws.wroteHeader {
  2902  		return
  2903  	}
  2904  
  2905  	checkWriteHeaderCode(code)
  2906  
  2907  	// Handle informational headers
  2908  	if code >= 100 && code <= 199 {
  2909  		// Per RFC 8297 we must not clear the current header map
  2910  		h := rws.handlerHeader
  2911  
  2912  		_, cl := h["Content-Length"]
  2913  		_, te := h["Transfer-Encoding"]
  2914  		if cl || te {
  2915  			h = h.Clone()
  2916  			h.Del("Content-Length")
  2917  			h.Del("Transfer-Encoding")
  2918  		}
  2919  
  2920  		rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2921  			streamID:    rws.stream.id,
  2922  			httpResCode: code,
  2923  			h:           h,
  2924  			endStream:   rws.handlerDone && !rws.hasTrailers(),
  2925  		})
  2926  
  2927  		return
  2928  	}
  2929  
  2930  	rws.wroteHeader = true
  2931  	rws.status = code
  2932  	if len(rws.handlerHeader) > 0 {
  2933  		rws.snapHeader = cloneHeader(rws.handlerHeader)
  2934  	}
  2935  }
  2936  
  2937  func cloneHeader(h http.Header) http.Header {
  2938  	h2 := make(http.Header, len(h))
  2939  	for k, vv := range h {
  2940  		vv2 := make([]string, len(vv))
  2941  		copy(vv2, vv)
  2942  		h2[k] = vv2
  2943  	}
  2944  	return h2
  2945  }
  2946  
  2947  // The Life Of A Write is like this:
  2948  //
  2949  // * Handler calls w.Write or w.WriteString ->
  2950  // * -> rws.bw (*bufio.Writer) ->
  2951  // * (Handler might call Flush)
  2952  // * -> chunkWriter{rws}
  2953  // * -> responseWriterState.writeChunk(p []byte)
  2954  // * -> responseWriterState.writeChunk (most of the magic; see comment there)
  2955  func (w *responseWriter) Write(p []byte) (n int, err error) {
  2956  	return w.write(len(p), p, "")
  2957  }
  2958  
  2959  func (w *responseWriter) WriteString(s string) (n int, err error) {
  2960  	return w.write(len(s), nil, s)
  2961  }
  2962  
  2963  // either dataB or dataS is non-zero.
  2964  func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  2965  	rws := w.rws
  2966  	if rws == nil {
  2967  		panic("Write called after Handler finished")
  2968  	}
  2969  	if !rws.wroteHeader {
  2970  		w.WriteHeader(200)
  2971  	}
  2972  	if !bodyAllowedForStatus(rws.status) {
  2973  		return 0, http.ErrBodyNotAllowed
  2974  	}
  2975  	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
  2976  	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
  2977  		// TODO: send a RST_STREAM
  2978  		return 0, errors.New("http2: handler wrote more than declared Content-Length")
  2979  	}
  2980  
  2981  	if dataB != nil {
  2982  		return rws.bw.Write(dataB)
  2983  	} else {
  2984  		return rws.bw.WriteString(dataS)
  2985  	}
  2986  }
  2987  
  2988  func (w *responseWriter) handlerDone() {
  2989  	rws := w.rws
  2990  	rws.handlerDone = true
  2991  	w.Flush()
  2992  	w.rws = nil
  2993  	responseWriterStatePool.Put(rws)
  2994  }
  2995  
  2996  // Push errors.
  2997  var (
  2998  	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
  2999  	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
  3000  )
  3001  
  3002  var _ http.Pusher = (*responseWriter)(nil)
  3003  
  3004  func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
  3005  	st := w.rws.stream
  3006  	sc := st.sc
  3007  	sc.serveG.checkNotOn()
  3008  
  3009  	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
  3010  	// http://tools.ietf.org/html/rfc7540#section-6.6
  3011  	if st.isPushed() {
  3012  		return ErrRecursivePush
  3013  	}
  3014  
  3015  	if opts == nil {
  3016  		opts = new(http.PushOptions)
  3017  	}
  3018  
  3019  	// Default options.
  3020  	if opts.Method == "" {
  3021  		opts.Method = "GET"
  3022  	}
  3023  	if opts.Header == nil {
  3024  		opts.Header = http.Header{}
  3025  	}
  3026  	wantScheme := "http"
  3027  	if w.rws.req.TLS != nil {
  3028  		wantScheme = "https"
  3029  	}
  3030  
  3031  	// Validate the request.
  3032  	u, err := url.Parse(target)
  3033  	if err != nil {
  3034  		return err
  3035  	}
  3036  	if u.Scheme == "" {
  3037  		if !strings.HasPrefix(target, "/") {
  3038  			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
  3039  		}
  3040  		u.Scheme = wantScheme
  3041  		u.Host = w.rws.req.Host
  3042  	} else {
  3043  		if u.Scheme != wantScheme {
  3044  			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
  3045  		}
  3046  		if u.Host == "" {
  3047  			return errors.New("URL must have a host")
  3048  		}
  3049  	}
  3050  	for k := range opts.Header {
  3051  		if strings.HasPrefix(k, ":") {
  3052  			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
  3053  		}
  3054  		// These headers are meaningful only if the request has a body,
  3055  		// but PUSH_PROMISE requests cannot have a body.
  3056  		// http://tools.ietf.org/html/rfc7540#section-8.2
  3057  		// Also disallow Host, since the promised URL must be absolute.
  3058  		if asciiEqualFold(k, "content-length") ||
  3059  			asciiEqualFold(k, "content-encoding") ||
  3060  			asciiEqualFold(k, "trailer") ||
  3061  			asciiEqualFold(k, "te") ||
  3062  			asciiEqualFold(k, "expect") ||
  3063  			asciiEqualFold(k, "host") {
  3064  			return fmt.Errorf("promised request headers cannot include %q", k)
  3065  		}
  3066  	}
  3067  	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
  3068  		return err
  3069  	}
  3070  
  3071  	// The RFC effectively limits promised requests to GET and HEAD:
  3072  	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
  3073  	// http://tools.ietf.org/html/rfc7540#section-8.2
  3074  	if opts.Method != "GET" && opts.Method != "HEAD" {
  3075  		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
  3076  	}
  3077  
  3078  	msg := &startPushRequest{
  3079  		parent: st,
  3080  		method: opts.Method,
  3081  		url:    u,
  3082  		header: cloneHeader(opts.Header),
  3083  		done:   errChanPool.Get().(chan error),
  3084  	}
  3085  
  3086  	select {
  3087  	case <-sc.doneServing:
  3088  		return errClientDisconnected
  3089  	case <-st.cw:
  3090  		return errStreamClosed
  3091  	case sc.serveMsgCh <- msg:
  3092  	}
  3093  
  3094  	select {
  3095  	case <-sc.doneServing:
  3096  		return errClientDisconnected
  3097  	case <-st.cw:
  3098  		return errStreamClosed
  3099  	case err := <-msg.done:
  3100  		errChanPool.Put(msg.done)
  3101  		return err
  3102  	}
  3103  }
  3104  
  3105  type startPushRequest struct {
  3106  	parent *stream
  3107  	method string
  3108  	url    *url.URL
  3109  	header http.Header
  3110  	done   chan error
  3111  }
  3112  
  3113  func (sc *serverConn) startPush(msg *startPushRequest) {
  3114  	sc.serveG.check()
  3115  
  3116  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3117  	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
  3118  	// is in either the "open" or "half-closed (remote)" state.
  3119  	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
  3120  		// responseWriter.Push checks that the stream is peer-initiated.
  3121  		msg.done <- errStreamClosed
  3122  		return
  3123  	}
  3124  
  3125  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3126  	if !sc.pushEnabled {
  3127  		msg.done <- http.ErrNotSupported
  3128  		return
  3129  	}
  3130  
  3131  	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
  3132  	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
  3133  	// is written. Once the ID is allocated, we start the request handler.
  3134  	allocatePromisedID := func() (uint32, error) {
  3135  		sc.serveG.check()
  3136  
  3137  		// Check this again, just in case. Technically, we might have received
  3138  		// an updated SETTINGS by the time we got around to writing this frame.
  3139  		if !sc.pushEnabled {
  3140  			return 0, http.ErrNotSupported
  3141  		}
  3142  		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
  3143  		if sc.curPushedStreams+1 > sc.clientMaxStreams {
  3144  			return 0, ErrPushLimitReached
  3145  		}
  3146  
  3147  		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
  3148  		// Streams initiated by the server MUST use even-numbered identifiers.
  3149  		// A server that is unable to establish a new stream identifier can send a GOAWAY
  3150  		// frame so that the client is forced to open a new connection for new streams.
  3151  		if sc.maxPushPromiseID+2 >= 1<<31 {
  3152  			sc.startGracefulShutdownInternal()
  3153  			return 0, ErrPushLimitReached
  3154  		}
  3155  		sc.maxPushPromiseID += 2
  3156  		promisedID := sc.maxPushPromiseID
  3157  
  3158  		// http://tools.ietf.org/html/rfc7540#section-8.2.
  3159  		// Strictly speaking, the new stream should start in "reserved (local)", then
  3160  		// transition to "half closed (remote)" after sending the initial HEADERS, but
  3161  		// we start in "half closed (remote)" for simplicity.
  3162  		// See further comments at the definition of stateHalfClosedRemote.
  3163  		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
  3164  		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
  3165  			method:    msg.method,
  3166  			scheme:    msg.url.Scheme,
  3167  			authority: msg.url.Host,
  3168  			path:      msg.url.RequestURI(),
  3169  			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
  3170  		})
  3171  		if err != nil {
  3172  			// Should not happen, since we've already validated msg.url.
  3173  			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
  3174  		}
  3175  
  3176  		sc.curHandlers++
  3177  		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  3178  		return promisedID, nil
  3179  	}
  3180  
  3181  	sc.writeFrame(FrameWriteRequest{
  3182  		write: &writePushPromise{
  3183  			streamID:           msg.parent.id,
  3184  			method:             msg.method,
  3185  			url:                msg.url,
  3186  			h:                  msg.header,
  3187  			allocatePromisedID: allocatePromisedID,
  3188  		},
  3189  		stream: msg.parent,
  3190  		done:   msg.done,
  3191  	})
  3192  }
  3193  
  3194  // foreachHeaderElement splits v according to the "#rule" construction
  3195  // in RFC 7230 section 7 and calls fn for each non-empty element.
  3196  func foreachHeaderElement(v string, fn func(string)) {
  3197  	v = textproto.TrimString(v)
  3198  	if v == "" {
  3199  		return
  3200  	}
  3201  	if !strings.Contains(v, ",") {
  3202  		fn(v)
  3203  		return
  3204  	}
  3205  	for _, f := range strings.Split(v, ",") {
  3206  		if f = textproto.TrimString(f); f != "" {
  3207  			fn(f)
  3208  		}
  3209  	}
  3210  }
  3211  
  3212  // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
  3213  var connHeaders = []string{
  3214  	"Connection",
  3215  	"Keep-Alive",
  3216  	"Proxy-Connection",
  3217  	"Transfer-Encoding",
  3218  	"Upgrade",
  3219  }
  3220  
  3221  // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
  3222  // per RFC 7540 Section 8.1.2.2.
  3223  // The returned error is reported to users.
  3224  func checkValidHTTP2RequestHeaders(h http.Header) error {
  3225  	for _, k := range connHeaders {
  3226  		if _, ok := h[k]; ok {
  3227  			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
  3228  		}
  3229  	}
  3230  	te := h["Te"]
  3231  	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
  3232  		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
  3233  	}
  3234  	return nil
  3235  }
  3236  
  3237  func new400Handler(err error) http.HandlerFunc {
  3238  	return func(w http.ResponseWriter, r *http.Request) {
  3239  		http.Error(w, err.Error(), http.StatusBadRequest)
  3240  	}
  3241  }
  3242  
  3243  // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
  3244  // disabled. See comments on h1ServerShutdownChan above for why
  3245  // the code is written this way.
  3246  func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
  3247  	var x interface{} = hs
  3248  	type I interface {
  3249  		doKeepAlives() bool
  3250  	}
  3251  	if hs, ok := x.(I); ok {
  3252  		return !hs.doKeepAlives()
  3253  	}
  3254  	return false
  3255  }
  3256  
  3257  func (sc *serverConn) countError(name string, err error) error {
  3258  	if sc == nil || sc.srv == nil {
  3259  		return err
  3260  	}
  3261  	f := sc.srv.CountError
  3262  	if f == nil {
  3263  		return err
  3264  	}
  3265  	var typ string
  3266  	var code ErrCode
  3267  	switch e := err.(type) {
  3268  	case ConnectionError:
  3269  		typ = "conn"
  3270  		code = ErrCode(e)
  3271  	case StreamError:
  3272  		typ = "stream"
  3273  		code = ErrCode(e.Code)
  3274  	default:
  3275  		return err
  3276  	}
  3277  	codeStr := errCodeName[code]
  3278  	if codeStr == "" {
  3279  		codeStr = strconv.Itoa(int(code))
  3280  	}
  3281  	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
  3282  	return err
  3283  }