github.com/Kolosok86/http@v0.1.2/http2/server.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO: turn off the serve goroutine when idle, so
     6  // an idle conn only has the readFrames goroutine active. (which could
     7  // also be optimized probably to pin less memory in crypto/tls). This
     8  // would involve tracking when the serve goroutine is active (atomic
     9  // int32 read/CAS probably?) and starting it up when frames arrive,
    10  // and shutting it down when all handlers exit. the occasional PING
    11  // packets could use time.AfterFunc to call sc.wakeStartServeLoop()
    12  // (which is a no-op if already running) and then queue the PING write
    13  // as normal. The serve loop would then exit in most cases (if no
    14  // Handlers running) and not be woken up again until the PING packet
    15  // returns.
    16  
    17  // TODO (maybe): add a mechanism for Handlers to going into
    18  // half-closed-local mode (rw.(io.Closer) test?) but not exit their
    19  // handler, and continue to be able to read from the
    20  // Request.Body. This would be a somewhat semantic change from HTTP/1
    21  // (or at least what we expose in net/http), so I'd probably want to
    22  // add it there too. For now, this package says that returning from
    23  // the Handler ServeHTTP function means you're both done reading and
    24  // done writing, without a way to stop just one or the other.
    25  
    26  package http2
    27  
    28  import (
    29  	"bufio"
    30  	"bytes"
    31  	"context"
    32  	"errors"
    33  	"fmt"
    34  	"io"
    35  	"log"
    36  	"math"
    37  	"net"
    38  	"net/textproto"
    39  	"net/url"
    40  	"os"
    41  	"reflect"
    42  	"runtime"
    43  	"strconv"
    44  	"strings"
    45  	"sync"
    46  	"time"
    47  
    48  	"github.com/Kolosok86/http"
    49  	tls "github.com/refraction-networking/utls"
    50  	"golang.org/x/net/http/httpguts"
    51  	"golang.org/x/net/http2/hpack"
    52  )
    53  
    54  const (
    55  	prefaceTimeout         = 10 * time.Second
    56  	firstSettingsTimeout   = 2 * time.Second // should be in-flight with preface anyway
    57  	handlerChunkWriteSize  = 4 << 10
    58  	defaultMaxStreams      = 250 // TODO: make this 100 as the GFE seems to?
    59  	maxQueuedControlFrames = 10000
    60  )
    61  
    62  var (
    63  	errClientDisconnected = errors.New("client disconnected")
    64  	errClosedBody         = errors.New("body closed by handler")
    65  	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
    66  	errStreamClosed       = errors.New("http2: stream closed")
    67  )
    68  
    69  var responseWriterStatePool = sync.Pool{
    70  	New: func() interface{} {
    71  		rws := &responseWriterState{}
    72  		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
    73  		return rws
    74  	},
    75  }
    76  
    77  // Test hooks.
    78  var (
    79  	testHookOnConn        func()
    80  	testHookGetServerConn func(*serverConn)
    81  	testHookOnPanicMu     *sync.Mutex // nil except in tests
    82  	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
    83  )
    84  
    85  // Server is an HTTP/2 server.
    86  type Server struct {
    87  	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
    88  	// which may run at a time over all connections.
    89  	// Negative or zero no limit.
    90  	// TODO: implement
    91  	MaxHandlers int
    92  
    93  	// MaxConcurrentStreams optionally specifies the number of
    94  	// concurrent streams that each client may have open at a
    95  	// time. This is unrelated to the number of http.Handler goroutines
    96  	// which may be active globally, which is MaxHandlers.
    97  	// If zero, MaxConcurrentStreams defaults to at least 100, per
    98  	// the HTTP/2 spec's recommendations.
    99  	MaxConcurrentStreams uint32
   100  
   101  	// MaxDecoderHeaderTableSize optionally specifies the http2
   102  	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
   103  	// informs the remote endpoint of the maximum size of the header compression
   104  	// table used to decode header blocks, in octets. If zero, the default value
   105  	// of 4096 is used.
   106  	MaxDecoderHeaderTableSize uint32
   107  
   108  	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
   109  	// header compression table used for encoding request headers. Received
   110  	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
   111  	// the default value of 4096 is used.
   112  	MaxEncoderHeaderTableSize uint32
   113  
   114  	// MaxReadFrameSize optionally specifies the largest frame
   115  	// this server is willing to read. A valid value is between
   116  	// 16k and 16M, inclusive. If zero or otherwise invalid, a
   117  	// default value is used.
   118  	MaxReadFrameSize uint32
   119  
   120  	// PermitProhibitedCipherSuites, if true, permits the use of
   121  	// cipher suites prohibited by the HTTP/2 spec.
   122  	PermitProhibitedCipherSuites bool
   123  
   124  	// IdleTimeout specifies how long until idle clients should be
   125  	// closed with a GOAWAY frame. PING frames are not considered
   126  	// activity for the purposes of IdleTimeout.
   127  	IdleTimeout time.Duration
   128  
   129  	// MaxUploadBufferPerConnection is the size of the initial flow
   130  	// control window for each connections. The HTTP/2 spec does not
   131  	// allow this to be smaller than 65535 or larger than 2^32-1.
   132  	// If the value is outside this range, a default value will be
   133  	// used instead.
   134  	MaxUploadBufferPerConnection int32
   135  
   136  	// MaxUploadBufferPerStream is the size of the initial flow control
   137  	// window for each stream. The HTTP/2 spec does not allow this to
   138  	// be larger than 2^32-1. If the value is zero or larger than the
   139  	// maximum, a default value will be used instead.
   140  	MaxUploadBufferPerStream int32
   141  
   142  	// NewWriteScheduler constructs a write scheduler for a connection.
   143  	// If nil, a default scheduler is chosen.
   144  	NewWriteScheduler func() WriteScheduler
   145  
   146  	// CountError, if non-nil, is called on HTTP/2 server errors.
   147  	// It's intended to increment a metric for monitoring, such
   148  	// as an expvar or Prometheus metric.
   149  	// The errType consists of only ASCII word characters.
   150  	CountError func(errType string)
   151  
   152  	// Internal state. This is a pointer (rather than embedded directly)
   153  	// so that we don't embed a Mutex in this struct, which will make the
   154  	// struct non-copyable, which might break some callers.
   155  	state *serverInternalState
   156  }
   157  
   158  func (s *Server) initialConnRecvWindowSize() int32 {
   159  	if s.MaxUploadBufferPerConnection >= initialWindowSize {
   160  		return s.MaxUploadBufferPerConnection
   161  	}
   162  	return 1 << 20
   163  }
   164  
   165  func (s *Server) initialStreamRecvWindowSize() int32 {
   166  	if s.MaxUploadBufferPerStream > 0 {
   167  		return s.MaxUploadBufferPerStream
   168  	}
   169  	return 1 << 20
   170  }
   171  
   172  func (s *Server) maxReadFrameSize() uint32 {
   173  	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
   174  		return v
   175  	}
   176  	return defaultMaxReadFrameSize
   177  }
   178  
   179  func (s *Server) maxConcurrentStreams() uint32 {
   180  	if v := s.MaxConcurrentStreams; v > 0 {
   181  		return v
   182  	}
   183  	return defaultMaxStreams
   184  }
   185  
   186  func (s *Server) maxDecoderHeaderTableSize() uint32 {
   187  	if v := s.MaxDecoderHeaderTableSize; v > 0 {
   188  		return v
   189  	}
   190  	return initialHeaderTableSize
   191  }
   192  
   193  func (s *Server) maxEncoderHeaderTableSize() uint32 {
   194  	if v := s.MaxEncoderHeaderTableSize; v > 0 {
   195  		return v
   196  	}
   197  	return initialHeaderTableSize
   198  }
   199  
   200  // maxQueuedControlFrames is the maximum number of control frames like
   201  // SETTINGS, PING and RST_STREAM that will be queued for writing before
   202  // the connection is closed to prevent memory exhaustion attacks.
   203  func (s *Server) maxQueuedControlFrames() int {
   204  	// TODO: if anybody asks, add a Server field, and remember to define the
   205  	// behavior of negative values.
   206  	return maxQueuedControlFrames
   207  }
   208  
   209  type serverInternalState struct {
   210  	mu          sync.Mutex
   211  	activeConns map[*serverConn]struct{}
   212  }
   213  
   214  func (s *serverInternalState) registerConn(sc *serverConn) {
   215  	if s == nil {
   216  		return // if the Server was used without calling ConfigureServer
   217  	}
   218  	s.mu.Lock()
   219  	s.activeConns[sc] = struct{}{}
   220  	s.mu.Unlock()
   221  }
   222  
   223  func (s *serverInternalState) unregisterConn(sc *serverConn) {
   224  	if s == nil {
   225  		return // if the Server was used without calling ConfigureServer
   226  	}
   227  	s.mu.Lock()
   228  	delete(s.activeConns, sc)
   229  	s.mu.Unlock()
   230  }
   231  
   232  func (s *serverInternalState) startGracefulShutdown() {
   233  	if s == nil {
   234  		return // if the Server was used without calling ConfigureServer
   235  	}
   236  	s.mu.Lock()
   237  	for sc := range s.activeConns {
   238  		sc.startGracefulShutdown()
   239  	}
   240  	s.mu.Unlock()
   241  }
   242  
   243  // ConfigureServer adds HTTP/2 support to a net/http Server.
   244  //
   245  // The configuration conf may be nil.
   246  //
   247  // ConfigureServer must be called before s begins serving.
   248  func ConfigureServer(s *http.Server, conf *Server) error {
   249  	if s == nil {
   250  		panic("nil *http.Server")
   251  	}
   252  	if conf == nil {
   253  		conf = new(Server)
   254  	}
   255  	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
   256  	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
   257  		if h1.IdleTimeout != 0 {
   258  			h2.IdleTimeout = h1.IdleTimeout
   259  		} else {
   260  			h2.IdleTimeout = h1.ReadTimeout
   261  		}
   262  	}
   263  	s.RegisterOnShutdown(conf.state.startGracefulShutdown)
   264  
   265  	if s.TLSConfig == nil {
   266  		s.TLSConfig = new(tls.Config)
   267  	} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
   268  		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
   269  		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
   270  		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
   271  		haveRequired := false
   272  		for _, cs := range s.TLSConfig.CipherSuites {
   273  			switch cs {
   274  			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
   275  				// Alternative MTI cipher to not discourage ECDSA-only servers.
   276  				// See http://golang.org/cl/30721 for further information.
   277  				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
   278  				haveRequired = true
   279  			}
   280  		}
   281  		if !haveRequired {
   282  			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
   283  		}
   284  	}
   285  
   286  	// Note: not setting MinVersion to tls.VersionTLS12,
   287  	// as we don't want to interfere with HTTP/1.1 traffic
   288  	// on the user's server. We enforce TLS 1.2 later once
   289  	// we accept a connection. Ideally this should be done
   290  	// during next-proto selection, but using TLS <1.2 with
   291  	// HTTP/2 is still the client's bug.
   292  
   293  	s.TLSConfig.PreferServerCipherSuites = true
   294  
   295  	if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
   296  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
   297  	}
   298  	if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
   299  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
   300  	}
   301  
   302  	if s.TLSNextProto == nil {
   303  		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
   304  	}
   305  	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
   306  		if testHookOnConn != nil {
   307  			testHookOnConn()
   308  		}
   309  		// The TLSNextProto interface predates contexts, so
   310  		// the net/http package passes down its per-connection
   311  		// base context via an exported but unadvertised
   312  		// method on the Handler. This is for internal
   313  		// net/http<=>http2 use only.
   314  		var ctx context.Context
   315  		type baseContexter interface {
   316  			BaseContext() context.Context
   317  		}
   318  		if bc, ok := h.(baseContexter); ok {
   319  			ctx = bc.BaseContext()
   320  		}
   321  		conf.ServeConn(c, &ServeConnOpts{
   322  			Context:    ctx,
   323  			Handler:    h,
   324  			BaseConfig: hs,
   325  		})
   326  	}
   327  	s.TLSNextProto[NextProtoTLS] = protoHandler
   328  	return nil
   329  }
   330  
   331  // ServeConnOpts are options for the Server.ServeConn method.
   332  type ServeConnOpts struct {
   333  	// Context is the base context to use.
   334  	// If nil, context.Background is used.
   335  	Context context.Context
   336  
   337  	// BaseConfig optionally sets the base configuration
   338  	// for values. If nil, defaults are used.
   339  	BaseConfig *http.Server
   340  
   341  	// Handler specifies which handler to use for processing
   342  	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
   343  	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
   344  	Handler http.Handler
   345  
   346  	// UpgradeRequest is an initial request received on a connection
   347  	// undergoing an h2c upgrade. The request body must have been
   348  	// completely read from the connection before calling ServeConn,
   349  	// and the 101 Switching Protocols response written.
   350  	UpgradeRequest *http.Request
   351  
   352  	// Settings is the decoded contents of the HTTP2-Settings header
   353  	// in an h2c upgrade request.
   354  	Settings []byte
   355  
   356  	// SawClientPreface is set if the HTTP/2 connection preface
   357  	// has already been read from the connection.
   358  	SawClientPreface bool
   359  }
   360  
   361  func (o *ServeConnOpts) context() context.Context {
   362  	if o != nil && o.Context != nil {
   363  		return o.Context
   364  	}
   365  	return context.Background()
   366  }
   367  
   368  func (o *ServeConnOpts) baseConfig() *http.Server {
   369  	if o != nil && o.BaseConfig != nil {
   370  		return o.BaseConfig
   371  	}
   372  	return new(http.Server)
   373  }
   374  
   375  func (o *ServeConnOpts) handler() http.Handler {
   376  	if o != nil {
   377  		if o.Handler != nil {
   378  			return o.Handler
   379  		}
   380  		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
   381  			return o.BaseConfig.Handler
   382  		}
   383  	}
   384  	return http.DefaultServeMux
   385  }
   386  
   387  // ServeConn serves HTTP/2 requests on the provided connection and
   388  // blocks until the connection is no longer readable.
   389  //
   390  // ServeConn starts speaking HTTP/2 assuming that c has not had any
   391  // reads or writes. It writes its initial settings frame and expects
   392  // to be able to read the preface and settings frame from the
   393  // client. If c has a ConnectionState method like a *tls.Conn, the
   394  // ConnectionState is used to verify the TLS ciphersuite and to set
   395  // the Request.TLS field in Handlers.
   396  //
   397  // ServeConn does not support h2c by itself. Any h2c support must be
   398  // implemented in terms of providing a suitably-behaving net.Conn.
   399  //
   400  // The opts parameter is optional. If nil, default values are used.
   401  func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
   402  	baseCtx, cancel := serverConnBaseContext(c, opts)
   403  	defer cancel()
   404  
   405  	sc := &serverConn{
   406  		srv:                         s,
   407  		hs:                          opts.baseConfig(),
   408  		conn:                        c,
   409  		baseCtx:                     baseCtx,
   410  		remoteAddrStr:               c.RemoteAddr().String(),
   411  		bw:                          newBufferedWriter(c),
   412  		handler:                     opts.handler(),
   413  		streams:                     make(map[uint32]*stream),
   414  		readFrameCh:                 make(chan readFrameResult),
   415  		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
   416  		serveMsgCh:                  make(chan interface{}, 8),
   417  		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
   418  		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
   419  		doneServing:                 make(chan struct{}),
   420  		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
   421  		advMaxStreams:               s.maxConcurrentStreams(),
   422  		initialStreamSendWindowSize: initialWindowSize,
   423  		maxFrameSize:                initialMaxFrameSize,
   424  		serveG:                      newGoroutineLock(),
   425  		pushEnabled:                 true,
   426  		sawClientPreface:            opts.SawClientPreface,
   427  	}
   428  
   429  	s.state.registerConn(sc)
   430  	defer s.state.unregisterConn(sc)
   431  
   432  	// The net/http package sets the write deadline from the
   433  	// http.Server.WriteTimeout during the TLS handshake, but then
   434  	// passes the connection off to us with the deadline already set.
   435  	// Write deadlines are set per stream in serverConn.newStream.
   436  	// Disarm the net.Conn write deadline here.
   437  	if sc.hs.WriteTimeout != 0 {
   438  		sc.conn.SetWriteDeadline(time.Time{})
   439  	}
   440  
   441  	if s.NewWriteScheduler != nil {
   442  		sc.writeSched = s.NewWriteScheduler()
   443  	} else {
   444  		sc.writeSched = NewPriorityWriteScheduler(nil)
   445  	}
   446  
   447  	// These start at the RFC-specified defaults. If there is a higher
   448  	// configured value for inflow, that will be updated when we send a
   449  	// WINDOW_UPDATE shortly after sending SETTINGS.
   450  	sc.flow.add(initialWindowSize)
   451  	sc.inflow.init(initialWindowSize)
   452  	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
   453  	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
   454  
   455  	fr := NewFramer(sc.bw, c)
   456  	if s.CountError != nil {
   457  		fr.countError = s.CountError
   458  	}
   459  	fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
   460  	fr.MaxHeaderListSize = sc.maxHeaderListSize()
   461  	fr.SetMaxReadFrameSize(s.maxReadFrameSize())
   462  	sc.framer = fr
   463  
   464  	if tc, ok := c.(connectionStater); ok {
   465  		sc.tlsState = new(tls.ConnectionState)
   466  		*sc.tlsState = tc.ConnectionState()
   467  		// 9.2 Use of TLS Features
   468  		// An implementation of HTTP/2 over TLS MUST use TLS
   469  		// 1.2 or higher with the restrictions on feature set
   470  		// and cipher suite described in this section. Due to
   471  		// implementation limitations, it might not be
   472  		// possible to fail TLS negotiation. An endpoint MUST
   473  		// immediately terminate an HTTP/2 connection that
   474  		// does not meet the TLS requirements described in
   475  		// this section with a connection error (Section
   476  		// 5.4.1) of type INADEQUATE_SECURITY.
   477  		if sc.tlsState.Version < tls.VersionTLS12 {
   478  			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
   479  			return
   480  		}
   481  
   482  		if sc.tlsState.ServerName == "" {
   483  			// Client must use SNI, but we don't enforce that anymore,
   484  			// since it was causing problems when connecting to bare IP
   485  			// addresses during development.
   486  			//
   487  			// TODO: optionally enforce? Or enforce at the time we receive
   488  			// a new request, and verify the ServerName matches the :authority?
   489  			// But that precludes proxy situations, perhaps.
   490  			//
   491  			// So for now, do nothing here again.
   492  		}
   493  
   494  		if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
   495  			// "Endpoints MAY choose to generate a connection error
   496  			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
   497  			// the prohibited cipher suites are negotiated."
   498  			//
   499  			// We choose that. In my opinion, the spec is weak
   500  			// here. It also says both parties must support at least
   501  			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
   502  			// excuses here. If we really must, we could allow an
   503  			// "AllowInsecureWeakCiphers" option on the server later.
   504  			// Let's see how it plays out first.
   505  			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
   506  			return
   507  		}
   508  	}
   509  
   510  	if opts.Settings != nil {
   511  		fr := &SettingsFrame{
   512  			FrameHeader: FrameHeader{valid: true},
   513  			p:           opts.Settings,
   514  		}
   515  		if err := fr.ForeachSetting(sc.processSetting); err != nil {
   516  			sc.rejectConn(ErrCodeProtocol, "invalid settings")
   517  			return
   518  		}
   519  		opts.Settings = nil
   520  	}
   521  
   522  	if hook := testHookGetServerConn; hook != nil {
   523  		hook(sc)
   524  	}
   525  
   526  	if opts.UpgradeRequest != nil {
   527  		sc.upgradeRequest(opts.UpgradeRequest)
   528  		opts.UpgradeRequest = nil
   529  	}
   530  
   531  	sc.serve()
   532  }
   533  
   534  func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
   535  	ctx, cancel = context.WithCancel(opts.context())
   536  	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
   537  	if hs := opts.baseConfig(); hs != nil {
   538  		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
   539  	}
   540  	return
   541  }
   542  
   543  func (sc *serverConn) rejectConn(err ErrCode, debug string) {
   544  	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
   545  	// ignoring errors. hanging up anyway.
   546  	sc.framer.WriteGoAway(0, err, []byte(debug))
   547  	sc.bw.Flush()
   548  	sc.conn.Close()
   549  }
   550  
   551  type serverConn struct {
   552  	// Immutable:
   553  	srv              *Server
   554  	hs               *http.Server
   555  	conn             net.Conn
   556  	bw               *bufferedWriter // writing to conn
   557  	handler          http.Handler
   558  	baseCtx          context.Context
   559  	framer           *Framer
   560  	doneServing      chan struct{}          // closed when serverConn.serve ends
   561  	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
   562  	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
   563  	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
   564  	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
   565  	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
   566  	flow             outflow                // conn-wide (not stream-specific) outbound flow control
   567  	inflow           inflow                 // conn-wide inbound flow control
   568  	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
   569  	remoteAddrStr    string
   570  	writeSched       WriteScheduler
   571  
   572  	// Everything following is owned by the serve loop; use serveG.check():
   573  	serveG                      goroutineLock // used to verify funcs are on serve()
   574  	pushEnabled                 bool
   575  	sawClientPreface            bool // preface has already been read, used in h2c upgrade
   576  	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
   577  	needToSendSettingsAck       bool
   578  	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
   579  	queuedControlFrames         int    // control frames in the writeSched queue
   580  	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
   581  	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
   582  	curClientStreams            uint32 // number of open streams initiated by the client
   583  	curPushedStreams            uint32 // number of open streams initiated by server push
   584  	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
   585  	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
   586  	streams                     map[uint32]*stream
   587  	initialStreamSendWindowSize int32
   588  	maxFrameSize                int32
   589  	peerMaxHeaderListSize       uint32            // zero means unknown (default)
   590  	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
   591  	canonHeaderKeysSize         int               // canonHeader keys size in bytes
   592  	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
   593  	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
   594  	needsFrameFlush             bool              // last frame write wasn't a flush
   595  	inGoAway                    bool              // we've started to or sent GOAWAY
   596  	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
   597  	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
   598  	goAwayCode                  ErrCode
   599  	shutdownTimer               *time.Timer // nil until used
   600  	idleTimer                   *time.Timer // nil if unused
   601  
   602  	// Owned by the writeFrameAsync goroutine:
   603  	headerWriteBuf bytes.Buffer
   604  	hpackEncoder   *hpack.Encoder
   605  
   606  	// Used by startGracefulShutdown.
   607  	shutdownOnce sync.Once
   608  }
   609  
   610  func (sc *serverConn) maxHeaderListSize() uint32 {
   611  	n := sc.hs.MaxHeaderBytes
   612  	if n <= 0 {
   613  		n = http.DefaultMaxHeaderBytes
   614  	}
   615  	// http2's count is in a slightly different unit and includes 32 bytes per pair.
   616  	// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
   617  	const perFieldOverhead = 32 // per http2 spec
   618  	const typicalHeaders = 10   // conservative
   619  	return uint32(n + typicalHeaders*perFieldOverhead)
   620  }
   621  
   622  func (sc *serverConn) curOpenStreams() uint32 {
   623  	sc.serveG.check()
   624  	return sc.curClientStreams + sc.curPushedStreams
   625  }
   626  
   627  // stream represents a stream. This is the minimal metadata needed by
   628  // the serve goroutine. Most of the actual stream state is owned by
   629  // the http.Handler's goroutine in the responseWriter. Because the
   630  // responseWriter's responseWriterState is recycled at the end of a
   631  // handler, this struct intentionally has no pointer to the
   632  // *responseWriter{,State} itself, as the Handler ending nils out the
   633  // responseWriter's state field.
   634  type stream struct {
   635  	// immutable:
   636  	sc        *serverConn
   637  	id        uint32
   638  	body      *pipe       // non-nil if expecting DATA frames
   639  	cw        closeWaiter // closed wait stream transitions to closed state
   640  	ctx       context.Context
   641  	cancelCtx func()
   642  
   643  	// owned by serverConn's serve loop:
   644  	bodyBytes        int64   // body bytes seen so far
   645  	declBodyBytes    int64   // or -1 if undeclared
   646  	flow             outflow // limits writing from Handler to client
   647  	inflow           inflow  // what the client is allowed to POST/etc to us
   648  	state            streamState
   649  	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
   650  	gotTrailerHeader bool        // HEADER frame for trailers was seen
   651  	wroteHeaders     bool        // whether we wrote headers (not status 100)
   652  	readDeadline     *time.Timer // nil if unused
   653  	writeDeadline    *time.Timer // nil if unused
   654  	closeErr         error       // set before cw is closed
   655  
   656  	trailer    http.Header // accumulated trailers
   657  	reqTrailer http.Header // handler's Request.Trailer
   658  }
   659  
   660  func (sc *serverConn) Framer() *Framer  { return sc.framer }
   661  func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
   662  func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
   663  func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
   664  	return sc.hpackEncoder, &sc.headerWriteBuf
   665  }
   666  
   667  func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
   668  	sc.serveG.check()
   669  	// http://tools.ietf.org/html/rfc7540#section-5.1
   670  	if st, ok := sc.streams[streamID]; ok {
   671  		return st.state, st
   672  	}
   673  	// "The first use of a new stream identifier implicitly closes all
   674  	// streams in the "idle" state that might have been initiated by
   675  	// that peer with a lower-valued stream identifier. For example, if
   676  	// a client sends a HEADERS frame on stream 7 without ever sending a
   677  	// frame on stream 5, then stream 5 transitions to the "closed"
   678  	// state when the first frame for stream 7 is sent or received."
   679  	if streamID%2 == 1 {
   680  		if streamID <= sc.maxClientStreamID {
   681  			return stateClosed, nil
   682  		}
   683  	} else {
   684  		if streamID <= sc.maxPushPromiseID {
   685  			return stateClosed, nil
   686  		}
   687  	}
   688  	return stateIdle, nil
   689  }
   690  
   691  // setConnState calls the net/http ConnState hook for this connection, if configured.
   692  // Note that the net/http package does StateNew and StateClosed for us.
   693  // There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
   694  func (sc *serverConn) setConnState(state http.ConnState) {
   695  	if sc.hs.ConnState != nil {
   696  		sc.hs.ConnState(sc.conn, state)
   697  	}
   698  }
   699  
   700  func (sc *serverConn) vlogf(format string, args ...interface{}) {
   701  	if VerboseLogs {
   702  		sc.logf(format, args...)
   703  	}
   704  }
   705  
   706  func (sc *serverConn) logf(format string, args ...interface{}) {
   707  	if lg := sc.hs.ErrorLog; lg != nil {
   708  		lg.Printf(format, args...)
   709  	} else {
   710  		log.Printf(format, args...)
   711  	}
   712  }
   713  
   714  // errno returns v's underlying uintptr, else 0.
   715  //
   716  // TODO: remove this helper function once http2 can use build
   717  // tags. See comment in isClosedConnError.
   718  func errno(v error) uintptr {
   719  	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
   720  		return uintptr(rv.Uint())
   721  	}
   722  	return 0
   723  }
   724  
   725  // isClosedConnError reports whether err is an error from use of a closed
   726  // network connection.
   727  func isClosedConnError(err error) bool {
   728  	if err == nil {
   729  		return false
   730  	}
   731  
   732  	// TODO: remove this string search and be more like the Windows
   733  	// case below. That might involve modifying the standard library
   734  	// to return better error types.
   735  	str := err.Error()
   736  	if strings.Contains(str, "use of closed network connection") {
   737  		return true
   738  	}
   739  
   740  	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
   741  	// build tags, so I can't make an http2_windows.go file with
   742  	// Windows-specific stuff. Fix that and move this, once we
   743  	// have a way to bundle this into std's net/http somehow.
   744  	if runtime.GOOS == "windows" {
   745  		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
   746  			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
   747  				const WSAECONNABORTED = 10053
   748  				const WSAECONNRESET = 10054
   749  				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
   750  					return true
   751  				}
   752  			}
   753  		}
   754  	}
   755  	return false
   756  }
   757  
   758  func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
   759  	if err == nil {
   760  		return
   761  	}
   762  	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
   763  		// Boring, expected errors.
   764  		sc.vlogf(format, args...)
   765  	} else {
   766  		sc.logf(format, args...)
   767  	}
   768  }
   769  
   770  // maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
   771  // of the entries in the canonHeader cache.
   772  // This should be larger than the size of unique, uncommon header keys likely to
   773  // be sent by the peer, while not so high as to permit unreasonable memory usage
   774  // if the peer sends an unbounded number of unique header keys.
   775  const maxCachedCanonicalHeadersKeysSize = 2048
   776  
   777  func (sc *serverConn) canonicalHeader(v string) string {
   778  	sc.serveG.check()
   779  	buildCommonHeaderMapsOnce()
   780  	cv, ok := commonCanonHeader[v]
   781  	if ok {
   782  		return cv
   783  	}
   784  	cv, ok = sc.canonHeader[v]
   785  	if ok {
   786  		return cv
   787  	}
   788  	if sc.canonHeader == nil {
   789  		sc.canonHeader = make(map[string]string)
   790  	}
   791  	cv = http.CanonicalHeaderKey(v)
   792  	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
   793  	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
   794  		sc.canonHeader[v] = cv
   795  		sc.canonHeaderKeysSize += size
   796  	}
   797  	return cv
   798  }
   799  
   800  type readFrameResult struct {
   801  	f   Frame // valid until readMore is called
   802  	err error
   803  
   804  	// readMore should be called once the consumer no longer needs or
   805  	// retains f. After readMore, f is invalid and more frames can be
   806  	// read.
   807  	readMore func()
   808  }
   809  
   810  // readFrames is the loop that reads incoming frames.
   811  // It takes care to only read one frame at a time, blocking until the
   812  // consumer is done with the frame.
   813  // It's run on its own goroutine.
   814  func (sc *serverConn) readFrames() {
   815  	gate := make(gate)
   816  	gateDone := gate.Done
   817  	for {
   818  		f, err := sc.framer.ReadFrame()
   819  		select {
   820  		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
   821  		case <-sc.doneServing:
   822  			return
   823  		}
   824  		select {
   825  		case <-gate:
   826  		case <-sc.doneServing:
   827  			return
   828  		}
   829  		if terminalReadFrameError(err) {
   830  			return
   831  		}
   832  	}
   833  }
   834  
   835  // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
   836  type frameWriteResult struct {
   837  	_   incomparable
   838  	wr  FrameWriteRequest // what was written (or attempted)
   839  	err error             // result of the writeFrame call
   840  }
   841  
   842  // writeFrameAsync runs in its own goroutine and writes a single frame
   843  // and then reports when it's done.
   844  // At most one goroutine can be running writeFrameAsync at a time per
   845  // serverConn.
   846  func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
   847  	var err error
   848  	if wd == nil {
   849  		err = wr.write.writeFrame(sc)
   850  	} else {
   851  		err = sc.framer.endWrite()
   852  	}
   853  	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
   854  }
   855  
   856  func (sc *serverConn) closeAllStreamsOnConnClose() {
   857  	sc.serveG.check()
   858  	for _, st := range sc.streams {
   859  		sc.closeStream(st, errClientDisconnected)
   860  	}
   861  }
   862  
   863  func (sc *serverConn) stopShutdownTimer() {
   864  	sc.serveG.check()
   865  	if t := sc.shutdownTimer; t != nil {
   866  		t.Stop()
   867  	}
   868  }
   869  
   870  func (sc *serverConn) notePanic() {
   871  	// Note: this is for serverConn.serve panicking, not http.Handler code.
   872  	if testHookOnPanicMu != nil {
   873  		testHookOnPanicMu.Lock()
   874  		defer testHookOnPanicMu.Unlock()
   875  	}
   876  	if testHookOnPanic != nil {
   877  		if e := recover(); e != nil {
   878  			if testHookOnPanic(sc, e) {
   879  				panic(e)
   880  			}
   881  		}
   882  	}
   883  }
   884  
   885  func (sc *serverConn) serve() {
   886  	sc.serveG.check()
   887  	defer sc.notePanic()
   888  	defer sc.conn.Close()
   889  	defer sc.closeAllStreamsOnConnClose()
   890  	defer sc.stopShutdownTimer()
   891  	defer close(sc.doneServing) // unblocks handlers trying to send
   892  
   893  	if VerboseLogs {
   894  		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
   895  	}
   896  
   897  	sc.writeFrame(FrameWriteRequest{
   898  		write: writeSettings{
   899  			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
   900  			{SettingMaxConcurrentStreams, sc.advMaxStreams},
   901  			{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
   902  			{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
   903  			{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
   904  		},
   905  	})
   906  	sc.unackedSettings++
   907  
   908  	// Each connection starts with initialWindowSize inflow tokens.
   909  	// If a higher value is configured, we add more tokens.
   910  	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
   911  		sc.sendWindowUpdate(nil, int(diff))
   912  	}
   913  
   914  	if err := sc.readPreface(); err != nil {
   915  		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
   916  		return
   917  	}
   918  	// Now that we've got the preface, get us out of the
   919  	// "StateNew" state. We can't go directly to idle, though.
   920  	// Active means we read some data and anticipate a request. We'll
   921  	// do another Active when we get a HEADERS frame.
   922  	sc.setConnState(http.StateActive)
   923  	sc.setConnState(http.StateIdle)
   924  
   925  	if sc.srv.IdleTimeout != 0 {
   926  		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
   927  		defer sc.idleTimer.Stop()
   928  	}
   929  
   930  	go sc.readFrames() // closed by defer sc.conn.Close above
   931  
   932  	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
   933  	defer settingsTimer.Stop()
   934  
   935  	loopNum := 0
   936  	for {
   937  		loopNum++
   938  		select {
   939  		case wr := <-sc.wantWriteFrameCh:
   940  			if se, ok := wr.write.(StreamError); ok {
   941  				sc.resetStream(se)
   942  				break
   943  			}
   944  			sc.writeFrame(wr)
   945  		case res := <-sc.wroteFrameCh:
   946  			sc.wroteFrame(res)
   947  		case res := <-sc.readFrameCh:
   948  			// Process any written frames before reading new frames from the client since a
   949  			// written frame could have triggered a new stream to be started.
   950  			if sc.writingFrameAsync {
   951  				select {
   952  				case wroteRes := <-sc.wroteFrameCh:
   953  					sc.wroteFrame(wroteRes)
   954  				default:
   955  				}
   956  			}
   957  			if !sc.processFrameFromReader(res) {
   958  				return
   959  			}
   960  			res.readMore()
   961  			if settingsTimer != nil {
   962  				settingsTimer.Stop()
   963  				settingsTimer = nil
   964  			}
   965  		case m := <-sc.bodyReadCh:
   966  			sc.noteBodyRead(m.st, m.n)
   967  		case msg := <-sc.serveMsgCh:
   968  			switch v := msg.(type) {
   969  			case func(int):
   970  				v(loopNum) // for testing
   971  			case *serverMessage:
   972  				switch v {
   973  				case settingsTimerMsg:
   974  					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
   975  					return
   976  				case idleTimerMsg:
   977  					sc.vlogf("connection is idle")
   978  					sc.goAway(ErrCodeNo)
   979  				case shutdownTimerMsg:
   980  					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
   981  					return
   982  				case gracefulShutdownMsg:
   983  					sc.startGracefulShutdownInternal()
   984  				default:
   985  					panic("unknown timer")
   986  				}
   987  			case *startPushRequest:
   988  				sc.startPush(v)
   989  			case func(*serverConn):
   990  				v(sc)
   991  			default:
   992  				panic(fmt.Sprintf("unexpected type %T", v))
   993  			}
   994  		}
   995  
   996  		// If the peer is causing us to generate a lot of control frames,
   997  		// but not reading them from us, assume they are trying to make us
   998  		// run out of memory.
   999  		if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
  1000  			sc.vlogf("http2: too many control frames in send queue, closing connection")
  1001  			return
  1002  		}
  1003  
  1004  		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
  1005  		// with no error code (graceful shutdown), don't start the timer until
  1006  		// all open streams have been completed.
  1007  		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
  1008  		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
  1009  		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
  1010  			sc.shutDownIn(goAwayTimeout)
  1011  		}
  1012  	}
  1013  }
  1014  
  1015  func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
  1016  	select {
  1017  	case <-sc.doneServing:
  1018  	case <-sharedCh:
  1019  		close(privateCh)
  1020  	}
  1021  }
  1022  
  1023  type serverMessage int
  1024  
  1025  // Message values sent to serveMsgCh.
  1026  var (
  1027  	settingsTimerMsg    = new(serverMessage)
  1028  	idleTimerMsg        = new(serverMessage)
  1029  	shutdownTimerMsg    = new(serverMessage)
  1030  	gracefulShutdownMsg = new(serverMessage)
  1031  )
  1032  
  1033  func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
  1034  func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
  1035  func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
  1036  
  1037  func (sc *serverConn) sendServeMsg(msg interface{}) {
  1038  	sc.serveG.checkNotOn() // NOT
  1039  	select {
  1040  	case sc.serveMsgCh <- msg:
  1041  	case <-sc.doneServing:
  1042  	}
  1043  }
  1044  
  1045  var errPrefaceTimeout = errors.New("timeout waiting for client preface")
  1046  
  1047  // readPreface reads the ClientPreface greeting from the peer or
  1048  // returns errPrefaceTimeout on timeout, or an error if the greeting
  1049  // is invalid.
  1050  func (sc *serverConn) readPreface() error {
  1051  	if sc.sawClientPreface {
  1052  		return nil
  1053  	}
  1054  	errc := make(chan error, 1)
  1055  	go func() {
  1056  		// Read the client preface
  1057  		buf := make([]byte, len(ClientPreface))
  1058  		if _, err := io.ReadFull(sc.conn, buf); err != nil {
  1059  			errc <- err
  1060  		} else if !bytes.Equal(buf, clientPreface) {
  1061  			errc <- fmt.Errorf("bogus greeting %q", buf)
  1062  		} else {
  1063  			errc <- nil
  1064  		}
  1065  	}()
  1066  	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
  1067  	defer timer.Stop()
  1068  	select {
  1069  	case <-timer.C:
  1070  		return errPrefaceTimeout
  1071  	case err := <-errc:
  1072  		if err == nil {
  1073  			if VerboseLogs {
  1074  				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
  1075  			}
  1076  		}
  1077  		return err
  1078  	}
  1079  }
  1080  
  1081  var errChanPool = sync.Pool{
  1082  	New: func() interface{} { return make(chan error, 1) },
  1083  }
  1084  
  1085  var writeDataPool = sync.Pool{
  1086  	New: func() interface{} { return new(writeData) },
  1087  }
  1088  
  1089  // writeDataFromHandler writes DATA response frames from a handler on
  1090  // the given stream.
  1091  func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
  1092  	ch := errChanPool.Get().(chan error)
  1093  	writeArg := writeDataPool.Get().(*writeData)
  1094  	*writeArg = writeData{stream.id, data, endStream}
  1095  	err := sc.writeFrameFromHandler(FrameWriteRequest{
  1096  		write:  writeArg,
  1097  		stream: stream,
  1098  		done:   ch,
  1099  	})
  1100  	if err != nil {
  1101  		return err
  1102  	}
  1103  	var frameWriteDone bool // the frame write is done (successfully or not)
  1104  	select {
  1105  	case err = <-ch:
  1106  		frameWriteDone = true
  1107  	case <-sc.doneServing:
  1108  		return errClientDisconnected
  1109  	case <-stream.cw:
  1110  		// If both ch and stream.cw were ready (as might
  1111  		// happen on the final Write after an http.Handler
  1112  		// ends), prefer the write result. Otherwise this
  1113  		// might just be us successfully closing the stream.
  1114  		// The writeFrameAsync and serve goroutines guarantee
  1115  		// that the ch send will happen before the stream.cw
  1116  		// close.
  1117  		select {
  1118  		case err = <-ch:
  1119  			frameWriteDone = true
  1120  		default:
  1121  			return errStreamClosed
  1122  		}
  1123  	}
  1124  	errChanPool.Put(ch)
  1125  	if frameWriteDone {
  1126  		writeDataPool.Put(writeArg)
  1127  	}
  1128  	return err
  1129  }
  1130  
  1131  // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
  1132  // if the connection has gone away.
  1133  //
  1134  // This must not be run from the serve goroutine itself, else it might
  1135  // deadlock writing to sc.wantWriteFrameCh (which is only mildly
  1136  // buffered and is read by serve itself). If you're on the serve
  1137  // goroutine, call writeFrame instead.
  1138  func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
  1139  	sc.serveG.checkNotOn() // NOT
  1140  	select {
  1141  	case sc.wantWriteFrameCh <- wr:
  1142  		return nil
  1143  	case <-sc.doneServing:
  1144  		// Serve loop is gone.
  1145  		// Client has closed their connection to the server.
  1146  		return errClientDisconnected
  1147  	}
  1148  }
  1149  
  1150  // writeFrame schedules a frame to write and sends it if there's nothing
  1151  // already being written.
  1152  //
  1153  // There is no pushback here (the serve goroutine never blocks). It's
  1154  // the http.Handlers that block, waiting for their previous frames to
  1155  // make it onto the wire
  1156  //
  1157  // If you're not on the serve goroutine, use writeFrameFromHandler instead.
  1158  func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
  1159  	sc.serveG.check()
  1160  
  1161  	// If true, wr will not be written and wr.done will not be signaled.
  1162  	var ignoreWrite bool
  1163  
  1164  	// We are not allowed to write frames on closed streams. RFC 7540 Section
  1165  	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
  1166  	// a closed stream." Our server never sends PRIORITY, so that exception
  1167  	// does not apply.
  1168  	//
  1169  	// The serverConn might close an open stream while the stream's handler
  1170  	// is still running. For example, the server might close a stream when it
  1171  	// receives bad data from the client. If this happens, the handler might
  1172  	// attempt to write a frame after the stream has been closed (since the
  1173  	// handler hasn't yet been notified of the close). In this case, we simply
  1174  	// ignore the frame. The handler will notice that the stream is closed when
  1175  	// it waits for the frame to be written.
  1176  	//
  1177  	// As an exception to this rule, we allow sending RST_STREAM after close.
  1178  	// This allows us to immediately reject new streams without tracking any
  1179  	// state for those streams (except for the queued RST_STREAM frame). This
  1180  	// may result in duplicate RST_STREAMs in some cases, but the client should
  1181  	// ignore those.
  1182  	if wr.StreamID() != 0 {
  1183  		_, isReset := wr.write.(StreamError)
  1184  		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
  1185  			ignoreWrite = true
  1186  		}
  1187  	}
  1188  
  1189  	// Don't send a 100-continue response if we've already sent headers.
  1190  	// See golang.org/issue/14030.
  1191  	switch wr.write.(type) {
  1192  	case *writeResHeaders:
  1193  		wr.stream.wroteHeaders = true
  1194  	case write100ContinueHeadersFrame:
  1195  		if wr.stream.wroteHeaders {
  1196  			// We do not need to notify wr.done because this frame is
  1197  			// never written with wr.done != nil.
  1198  			if wr.done != nil {
  1199  				panic("wr.done != nil for write100ContinueHeadersFrame")
  1200  			}
  1201  			ignoreWrite = true
  1202  		}
  1203  	}
  1204  
  1205  	if !ignoreWrite {
  1206  		if wr.isControl() {
  1207  			sc.queuedControlFrames++
  1208  			// For extra safety, detect wraparounds, which should not happen,
  1209  			// and pull the plug.
  1210  			if sc.queuedControlFrames < 0 {
  1211  				sc.conn.Close()
  1212  			}
  1213  		}
  1214  		sc.writeSched.Push(wr)
  1215  	}
  1216  	sc.scheduleFrameWrite()
  1217  }
  1218  
  1219  // startFrameWrite starts a goroutine to write wr (in a separate
  1220  // goroutine since that might block on the network), and updates the
  1221  // serve goroutine's state about the world, updated from info in wr.
  1222  func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
  1223  	sc.serveG.check()
  1224  	if sc.writingFrame {
  1225  		panic("internal error: can only be writing one frame at a time")
  1226  	}
  1227  
  1228  	st := wr.stream
  1229  	if st != nil {
  1230  		switch st.state {
  1231  		case stateHalfClosedLocal:
  1232  			switch wr.write.(type) {
  1233  			case StreamError, handlerPanicRST, writeWindowUpdate:
  1234  				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
  1235  				// in this state. (We never send PRIORITY from the server, so that is not checked.)
  1236  			default:
  1237  				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
  1238  			}
  1239  		case stateClosed:
  1240  			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
  1241  		}
  1242  	}
  1243  	if wpp, ok := wr.write.(*writePushPromise); ok {
  1244  		var err error
  1245  		wpp.promisedID, err = wpp.allocatePromisedID()
  1246  		if err != nil {
  1247  			sc.writingFrameAsync = false
  1248  			wr.replyToWriter(err)
  1249  			return
  1250  		}
  1251  	}
  1252  
  1253  	sc.writingFrame = true
  1254  	sc.needsFrameFlush = true
  1255  	if wr.write.staysWithinBuffer(sc.bw.Available()) {
  1256  		sc.writingFrameAsync = false
  1257  		err := wr.write.writeFrame(sc)
  1258  		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
  1259  	} else if wd, ok := wr.write.(*writeData); ok {
  1260  		// Encode the frame in the serve goroutine, to ensure we don't have
  1261  		// any lingering asynchronous references to data passed to Write.
  1262  		// See https://go.dev/issue/58446.
  1263  		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
  1264  		sc.writingFrameAsync = true
  1265  		go sc.writeFrameAsync(wr, wd)
  1266  	} else {
  1267  		sc.writingFrameAsync = true
  1268  		go sc.writeFrameAsync(wr, nil)
  1269  	}
  1270  }
  1271  
  1272  // errHandlerPanicked is the error given to any callers blocked in a read from
  1273  // Request.Body when the main goroutine panics. Since most handlers read in the
  1274  // main ServeHTTP goroutine, this will show up rarely.
  1275  var errHandlerPanicked = errors.New("http2: handler panicked")
  1276  
  1277  // wroteFrame is called on the serve goroutine with the result of
  1278  // whatever happened on writeFrameAsync.
  1279  func (sc *serverConn) wroteFrame(res frameWriteResult) {
  1280  	sc.serveG.check()
  1281  	if !sc.writingFrame {
  1282  		panic("internal error: expected to be already writing a frame")
  1283  	}
  1284  	sc.writingFrame = false
  1285  	sc.writingFrameAsync = false
  1286  
  1287  	wr := res.wr
  1288  
  1289  	if writeEndsStream(wr.write) {
  1290  		st := wr.stream
  1291  		if st == nil {
  1292  			panic("internal error: expecting non-nil stream")
  1293  		}
  1294  		switch st.state {
  1295  		case stateOpen:
  1296  			// Here we would go to stateHalfClosedLocal in
  1297  			// theory, but since our handler is done and
  1298  			// the net/http package provides no mechanism
  1299  			// for closing a ResponseWriter while still
  1300  			// reading data (see possible TODO at top of
  1301  			// this file), we go into closed state here
  1302  			// anyway, after telling the peer we're
  1303  			// hanging up on them. We'll transition to
  1304  			// stateClosed after the RST_STREAM frame is
  1305  			// written.
  1306  			st.state = stateHalfClosedLocal
  1307  			// Section 8.1: a server MAY request that the client abort
  1308  			// transmission of a request without error by sending a
  1309  			// RST_STREAM with an error code of NO_ERROR after sending
  1310  			// a complete response.
  1311  			sc.resetStream(streamError(st.id, ErrCodeNo))
  1312  		case stateHalfClosedRemote:
  1313  			sc.closeStream(st, errHandlerComplete)
  1314  		}
  1315  	} else {
  1316  		switch v := wr.write.(type) {
  1317  		case StreamError:
  1318  			// st may be unknown if the RST_STREAM was generated to reject bad input.
  1319  			if st, ok := sc.streams[v.StreamID]; ok {
  1320  				sc.closeStream(st, v)
  1321  			}
  1322  		case handlerPanicRST:
  1323  			sc.closeStream(wr.stream, errHandlerPanicked)
  1324  		}
  1325  	}
  1326  
  1327  	// Reply (if requested) to unblock the ServeHTTP goroutine.
  1328  	wr.replyToWriter(res.err)
  1329  
  1330  	sc.scheduleFrameWrite()
  1331  }
  1332  
  1333  // scheduleFrameWrite tickles the frame writing scheduler.
  1334  //
  1335  // If a frame is already being written, nothing happens. This will be called again
  1336  // when the frame is done being written.
  1337  //
  1338  // If a frame isn't being written and we need to send one, the best frame
  1339  // to send is selected by writeSched.
  1340  //
  1341  // If a frame isn't being written and there's nothing else to send, we
  1342  // flush the write buffer.
  1343  func (sc *serverConn) scheduleFrameWrite() {
  1344  	sc.serveG.check()
  1345  	if sc.writingFrame || sc.inFrameScheduleLoop {
  1346  		return
  1347  	}
  1348  	sc.inFrameScheduleLoop = true
  1349  	for !sc.writingFrameAsync {
  1350  		if sc.needToSendGoAway {
  1351  			sc.needToSendGoAway = false
  1352  			sc.startFrameWrite(FrameWriteRequest{
  1353  				write: &writeGoAway{
  1354  					maxStreamID: sc.maxClientStreamID,
  1355  					code:        sc.goAwayCode,
  1356  				},
  1357  			})
  1358  			continue
  1359  		}
  1360  		if sc.needToSendSettingsAck {
  1361  			sc.needToSendSettingsAck = false
  1362  			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
  1363  			continue
  1364  		}
  1365  		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
  1366  			if wr, ok := sc.writeSched.Pop(); ok {
  1367  				if wr.isControl() {
  1368  					sc.queuedControlFrames--
  1369  				}
  1370  				sc.startFrameWrite(wr)
  1371  				continue
  1372  			}
  1373  		}
  1374  		if sc.needsFrameFlush {
  1375  			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
  1376  			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
  1377  			continue
  1378  		}
  1379  		break
  1380  	}
  1381  	sc.inFrameScheduleLoop = false
  1382  }
  1383  
  1384  // startGracefulShutdown gracefully shuts down a connection. This
  1385  // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
  1386  // shutting down. The connection isn't closed until all current
  1387  // streams are done.
  1388  //
  1389  // startGracefulShutdown returns immediately; it does not wait until
  1390  // the connection has shut down.
  1391  func (sc *serverConn) startGracefulShutdown() {
  1392  	sc.serveG.checkNotOn() // NOT
  1393  	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
  1394  }
  1395  
  1396  // After sending GOAWAY with an error code (non-graceful shutdown), the
  1397  // connection will close after goAwayTimeout.
  1398  //
  1399  // If we close the connection immediately after sending GOAWAY, there may
  1400  // be unsent data in our kernel receive buffer, which will cause the kernel
  1401  // to send a TCP RST on close() instead of a FIN. This RST will abort the
  1402  // connection immediately, whether or not the client had received the GOAWAY.
  1403  //
  1404  // Ideally we should delay for at least 1 RTT + epsilon so the client has
  1405  // a chance to read the GOAWAY and stop sending messages. Measuring RTT
  1406  // is hard, so we approximate with 1 second. See golang.org/issue/18701.
  1407  //
  1408  // This is a var so it can be shorter in tests, where all requests uses the
  1409  // loopback interface making the expected RTT very small.
  1410  //
  1411  // TODO: configurable?
  1412  var goAwayTimeout = 1 * time.Second
  1413  
  1414  func (sc *serverConn) startGracefulShutdownInternal() {
  1415  	sc.goAway(ErrCodeNo)
  1416  }
  1417  
  1418  func (sc *serverConn) goAway(code ErrCode) {
  1419  	sc.serveG.check()
  1420  	if sc.inGoAway {
  1421  		if sc.goAwayCode == ErrCodeNo {
  1422  			sc.goAwayCode = code
  1423  		}
  1424  		return
  1425  	}
  1426  	sc.inGoAway = true
  1427  	sc.needToSendGoAway = true
  1428  	sc.goAwayCode = code
  1429  	sc.scheduleFrameWrite()
  1430  }
  1431  
  1432  func (sc *serverConn) shutDownIn(d time.Duration) {
  1433  	sc.serveG.check()
  1434  	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
  1435  }
  1436  
  1437  func (sc *serverConn) resetStream(se StreamError) {
  1438  	sc.serveG.check()
  1439  	sc.writeFrame(FrameWriteRequest{write: se})
  1440  	if st, ok := sc.streams[se.StreamID]; ok {
  1441  		st.resetQueued = true
  1442  	}
  1443  }
  1444  
  1445  // processFrameFromReader processes the serve loop's read from readFrameCh from the
  1446  // frame-reading goroutine.
  1447  // processFrameFromReader returns whether the connection should be kept open.
  1448  func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
  1449  	sc.serveG.check()
  1450  	err := res.err
  1451  	if err != nil {
  1452  		if err == ErrFrameTooLarge {
  1453  			sc.goAway(ErrCodeFrameSize)
  1454  			return true // goAway will close the loop
  1455  		}
  1456  		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
  1457  		if clientGone {
  1458  			// TODO: could we also get into this state if
  1459  			// the peer does a half close
  1460  			// (e.g. CloseWrite) because they're done
  1461  			// sending frames but they're still wanting
  1462  			// our open replies?  Investigate.
  1463  			// TODO: add CloseWrite to crypto/tls.Conn first
  1464  			// so we have a way to test this? I suppose
  1465  			// just for testing we could have a non-TLS mode.
  1466  			return false
  1467  		}
  1468  	} else {
  1469  		f := res.f
  1470  		if VerboseLogs {
  1471  			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
  1472  		}
  1473  		err = sc.processFrame(f)
  1474  		if err == nil {
  1475  			return true
  1476  		}
  1477  	}
  1478  
  1479  	switch ev := err.(type) {
  1480  	case StreamError:
  1481  		sc.resetStream(ev)
  1482  		return true
  1483  	case goAwayFlowError:
  1484  		sc.goAway(ErrCodeFlowControl)
  1485  		return true
  1486  	case ConnectionError:
  1487  		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
  1488  		sc.goAway(ErrCode(ev))
  1489  		return true // goAway will handle shutdown
  1490  	default:
  1491  		if res.err != nil {
  1492  			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
  1493  		} else {
  1494  			sc.logf("http2: server closing client connection: %v", err)
  1495  		}
  1496  		return false
  1497  	}
  1498  }
  1499  
  1500  func (sc *serverConn) processFrame(f Frame) error {
  1501  	sc.serveG.check()
  1502  
  1503  	// First frame received must be SETTINGS.
  1504  	if !sc.sawFirstSettings {
  1505  		if _, ok := f.(*SettingsFrame); !ok {
  1506  			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
  1507  		}
  1508  		sc.sawFirstSettings = true
  1509  	}
  1510  
  1511  	// Discard frames for streams initiated after the identified last
  1512  	// stream sent in a GOAWAY, or all frames after sending an error.
  1513  	// We still need to return connection-level flow control for DATA frames.
  1514  	// RFC 9113 Section 6.8.
  1515  	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
  1516  
  1517  		if f, ok := f.(*DataFrame); ok {
  1518  			if !sc.inflow.take(f.Length) {
  1519  				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
  1520  			}
  1521  			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1522  		}
  1523  		return nil
  1524  	}
  1525  
  1526  	switch f := f.(type) {
  1527  	case *SettingsFrame:
  1528  		return sc.processSettings(f)
  1529  	case *MetaHeadersFrame:
  1530  		return sc.processHeaders(f)
  1531  	case *WindowUpdateFrame:
  1532  		return sc.processWindowUpdate(f)
  1533  	case *PingFrame:
  1534  		return sc.processPing(f)
  1535  	case *DataFrame:
  1536  		return sc.processData(f)
  1537  	case *RSTStreamFrame:
  1538  		return sc.processResetStream(f)
  1539  	case *PriorityFrame:
  1540  		return sc.processPriority(f)
  1541  	case *GoAwayFrame:
  1542  		return sc.processGoAway(f)
  1543  	case *PushPromiseFrame:
  1544  		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
  1545  		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1546  		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
  1547  	default:
  1548  		sc.vlogf("http2: server ignoring frame: %v", f.Header())
  1549  		return nil
  1550  	}
  1551  }
  1552  
  1553  func (sc *serverConn) processPing(f *PingFrame) error {
  1554  	sc.serveG.check()
  1555  	if f.IsAck() {
  1556  		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
  1557  		// containing this flag."
  1558  		return nil
  1559  	}
  1560  	if f.StreamID != 0 {
  1561  		// "PING frames are not associated with any individual
  1562  		// stream. If a PING frame is received with a stream
  1563  		// identifier field value other than 0x0, the recipient MUST
  1564  		// respond with a connection error (Section 5.4.1) of type
  1565  		// PROTOCOL_ERROR."
  1566  		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
  1567  	}
  1568  	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
  1569  	return nil
  1570  }
  1571  
  1572  func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
  1573  	sc.serveG.check()
  1574  	switch {
  1575  	case f.StreamID != 0: // stream-level flow control
  1576  		state, st := sc.state(f.StreamID)
  1577  		if state == stateIdle {
  1578  			// Section 5.1: "Receiving any frame other than HEADERS
  1579  			// or PRIORITY on a stream in this state MUST be
  1580  			// treated as a connection error (Section 5.4.1) of
  1581  			// type PROTOCOL_ERROR."
  1582  			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
  1583  		}
  1584  		if st == nil {
  1585  			// "WINDOW_UPDATE can be sent by a peer that has sent a
  1586  			// frame bearing the END_STREAM flag. This means that a
  1587  			// receiver could receive a WINDOW_UPDATE frame on a "half
  1588  			// closed (remote)" or "closed" stream. A receiver MUST
  1589  			// NOT treat this as an error, see Section 5.1."
  1590  			return nil
  1591  		}
  1592  		if !st.flow.add(int32(f.Increment)) {
  1593  			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
  1594  		}
  1595  	default: // connection-level flow control
  1596  		if !sc.flow.add(int32(f.Increment)) {
  1597  			return goAwayFlowError{}
  1598  		}
  1599  	}
  1600  	sc.scheduleFrameWrite()
  1601  	return nil
  1602  }
  1603  
  1604  func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
  1605  	sc.serveG.check()
  1606  
  1607  	state, st := sc.state(f.StreamID)
  1608  	if state == stateIdle {
  1609  		// 6.4 "RST_STREAM frames MUST NOT be sent for a
  1610  		// stream in the "idle" state. If a RST_STREAM frame
  1611  		// identifying an idle stream is received, the
  1612  		// recipient MUST treat this as a connection error
  1613  		// (Section 5.4.1) of type PROTOCOL_ERROR.
  1614  		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
  1615  	}
  1616  	if st != nil {
  1617  		st.cancelCtx()
  1618  		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
  1619  	}
  1620  	return nil
  1621  }
  1622  
  1623  func (sc *serverConn) closeStream(st *stream, err error) {
  1624  	sc.serveG.check()
  1625  	if st.state == stateIdle || st.state == stateClosed {
  1626  		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
  1627  	}
  1628  	st.state = stateClosed
  1629  	if st.readDeadline != nil {
  1630  		st.readDeadline.Stop()
  1631  	}
  1632  	if st.writeDeadline != nil {
  1633  		st.writeDeadline.Stop()
  1634  	}
  1635  	if st.isPushed() {
  1636  		sc.curPushedStreams--
  1637  	} else {
  1638  		sc.curClientStreams--
  1639  	}
  1640  	delete(sc.streams, st.id)
  1641  	if len(sc.streams) == 0 {
  1642  		sc.setConnState(http.StateIdle)
  1643  		if sc.srv.IdleTimeout != 0 {
  1644  			sc.idleTimer.Reset(sc.srv.IdleTimeout)
  1645  		}
  1646  		if h1ServerKeepAlivesDisabled(sc.hs) {
  1647  			sc.startGracefulShutdownInternal()
  1648  		}
  1649  	}
  1650  	if p := st.body; p != nil {
  1651  		// Return any buffered unread bytes worth of conn-level flow control.
  1652  		// See golang.org/issue/16481
  1653  		sc.sendWindowUpdate(nil, p.Len())
  1654  
  1655  		p.CloseWithError(err)
  1656  	}
  1657  	if e, ok := err.(StreamError); ok {
  1658  		if e.Cause != nil {
  1659  			err = e.Cause
  1660  		} else {
  1661  			err = errStreamClosed
  1662  		}
  1663  	}
  1664  	st.closeErr = err
  1665  	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
  1666  	sc.writeSched.CloseStream(st.id)
  1667  }
  1668  
  1669  func (sc *serverConn) processSettings(f *SettingsFrame) error {
  1670  	sc.serveG.check()
  1671  	if f.IsAck() {
  1672  		sc.unackedSettings--
  1673  		if sc.unackedSettings < 0 {
  1674  			// Why is the peer ACKing settings we never sent?
  1675  			// The spec doesn't mention this case, but
  1676  			// hang up on them anyway.
  1677  			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
  1678  		}
  1679  		return nil
  1680  	}
  1681  	if f.NumSettings() > 100 || f.HasDuplicates() {
  1682  		// This isn't actually in the spec, but hang up on
  1683  		// suspiciously large settings frames or those with
  1684  		// duplicate entries.
  1685  		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
  1686  	}
  1687  	if err := f.ForeachSetting(sc.processSetting); err != nil {
  1688  		return err
  1689  	}
  1690  	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
  1691  	// acknowledged individually, even if multiple are received before the ACK.
  1692  	sc.needToSendSettingsAck = true
  1693  	sc.scheduleFrameWrite()
  1694  	return nil
  1695  }
  1696  
  1697  func (sc *serverConn) processSetting(s Setting) error {
  1698  	sc.serveG.check()
  1699  	if err := s.Valid(); err != nil {
  1700  		return err
  1701  	}
  1702  	if VerboseLogs {
  1703  		sc.vlogf("http2: server processing setting %v", s)
  1704  	}
  1705  	switch s.ID {
  1706  	case SettingHeaderTableSize:
  1707  		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
  1708  	case SettingEnablePush:
  1709  		sc.pushEnabled = s.Val != 0
  1710  	case SettingMaxConcurrentStreams:
  1711  		sc.clientMaxStreams = s.Val
  1712  	case SettingInitialWindowSize:
  1713  		return sc.processSettingInitialWindowSize(s.Val)
  1714  	case SettingMaxFrameSize:
  1715  		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
  1716  	case SettingMaxHeaderListSize:
  1717  		sc.peerMaxHeaderListSize = s.Val
  1718  	default:
  1719  		// Unknown setting: "An endpoint that receives a SETTINGS
  1720  		// frame with any unknown or unsupported identifier MUST
  1721  		// ignore that setting."
  1722  		if VerboseLogs {
  1723  			sc.vlogf("http2: server ignoring unknown setting %v", s)
  1724  		}
  1725  	}
  1726  	return nil
  1727  }
  1728  
  1729  func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
  1730  	sc.serveG.check()
  1731  	// Note: val already validated to be within range by
  1732  	// processSetting's Valid call.
  1733  
  1734  	// "A SETTINGS frame can alter the initial flow control window
  1735  	// size for all current streams. When the value of
  1736  	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
  1737  	// adjust the size of all stream flow control windows that it
  1738  	// maintains by the difference between the new value and the
  1739  	// old value."
  1740  	old := sc.initialStreamSendWindowSize
  1741  	sc.initialStreamSendWindowSize = int32(val)
  1742  	growth := int32(val) - old // may be negative
  1743  	for _, st := range sc.streams {
  1744  		if !st.flow.add(growth) {
  1745  			// 6.9.2 Initial Flow Control Window Size
  1746  			// "An endpoint MUST treat a change to
  1747  			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
  1748  			// control window to exceed the maximum size as a
  1749  			// connection error (Section 5.4.1) of type
  1750  			// FLOW_CONTROL_ERROR."
  1751  			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
  1752  		}
  1753  	}
  1754  	return nil
  1755  }
  1756  
  1757  func (sc *serverConn) processData(f *DataFrame) error {
  1758  	sc.serveG.check()
  1759  	id := f.Header().StreamID
  1760  
  1761  	data := f.Data()
  1762  	state, st := sc.state(id)
  1763  	if id == 0 || state == stateIdle {
  1764  		// Section 6.1: "DATA frames MUST be associated with a
  1765  		// stream. If a DATA frame is received whose stream
  1766  		// identifier field is 0x0, the recipient MUST respond
  1767  		// with a connection error (Section 5.4.1) of type
  1768  		// PROTOCOL_ERROR."
  1769  		//
  1770  		// Section 5.1: "Receiving any frame other than HEADERS
  1771  		// or PRIORITY on a stream in this state MUST be
  1772  		// treated as a connection error (Section 5.4.1) of
  1773  		// type PROTOCOL_ERROR."
  1774  		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
  1775  	}
  1776  
  1777  	// "If a DATA frame is received whose stream is not in "open"
  1778  	// or "half closed (local)" state, the recipient MUST respond
  1779  	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
  1780  	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
  1781  		// This includes sending a RST_STREAM if the stream is
  1782  		// in stateHalfClosedLocal (which currently means that
  1783  		// the http.Handler returned, so it's done reading &
  1784  		// done writing). Try to stop the client from sending
  1785  		// more DATA.
  1786  
  1787  		// But still enforce their connection-level flow control,
  1788  		// and return any flow control bytes since we're not going
  1789  		// to consume them.
  1790  		if !sc.inflow.take(f.Length) {
  1791  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1792  		}
  1793  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1794  
  1795  		if st != nil && st.resetQueued {
  1796  			// Already have a stream error in flight. Don't send another.
  1797  			return nil
  1798  		}
  1799  		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
  1800  	}
  1801  	if st.body == nil {
  1802  		panic("internal error: should have a body in this state")
  1803  	}
  1804  
  1805  	// Sender sending more than they'd declared?
  1806  	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
  1807  		if !sc.inflow.take(f.Length) {
  1808  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1809  		}
  1810  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1811  
  1812  		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
  1813  		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
  1814  		// value of a content-length header field does not equal the sum of the
  1815  		// DATA frame payload lengths that form the body.
  1816  		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
  1817  	}
  1818  	if f.Length > 0 {
  1819  		// Check whether the client has flow control quota.
  1820  		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
  1821  			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
  1822  		}
  1823  
  1824  		if len(data) > 0 {
  1825  			wrote, err := st.body.Write(data)
  1826  			if err != nil {
  1827  				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
  1828  				return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
  1829  			}
  1830  			if wrote != len(data) {
  1831  				panic("internal error: bad Writer")
  1832  			}
  1833  			st.bodyBytes += int64(len(data))
  1834  		}
  1835  
  1836  		// Return any padded flow control now, since we won't
  1837  		// refund it later on body reads.
  1838  		// Call sendWindowUpdate even if there is no padding,
  1839  		// to return buffered flow control credit if the sent
  1840  		// window has shrunk.
  1841  		pad := int32(f.Length) - int32(len(data))
  1842  		sc.sendWindowUpdate32(nil, pad)
  1843  		sc.sendWindowUpdate32(st, pad)
  1844  	}
  1845  	if f.StreamEnded() {
  1846  		st.endStream()
  1847  	}
  1848  	return nil
  1849  }
  1850  
  1851  func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
  1852  	sc.serveG.check()
  1853  	if f.ErrCode != ErrCodeNo {
  1854  		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1855  	} else {
  1856  		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1857  	}
  1858  	sc.startGracefulShutdownInternal()
  1859  	// http://tools.ietf.org/html/rfc7540#section-6.8
  1860  	// We should not create any new streams, which means we should disable push.
  1861  	sc.pushEnabled = false
  1862  	return nil
  1863  }
  1864  
  1865  // isPushed reports whether the stream is server-initiated.
  1866  func (st *stream) isPushed() bool {
  1867  	return st.id%2 == 0
  1868  }
  1869  
  1870  // endStream closes a Request.Body's pipe. It is called when a DATA
  1871  // frame says a request body is over (or after trailers).
  1872  func (st *stream) endStream() {
  1873  	sc := st.sc
  1874  	sc.serveG.check()
  1875  
  1876  	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
  1877  		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
  1878  			st.declBodyBytes, st.bodyBytes))
  1879  	} else {
  1880  		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
  1881  		st.body.CloseWithError(io.EOF)
  1882  	}
  1883  	st.state = stateHalfClosedRemote
  1884  }
  1885  
  1886  // copyTrailersToHandlerRequest is run in the Handler's goroutine in
  1887  // its Request.Body.Read just before it gets io.EOF.
  1888  func (st *stream) copyTrailersToHandlerRequest() {
  1889  	for k, vv := range st.trailer {
  1890  		if _, ok := st.reqTrailer[k]; ok {
  1891  			// Only copy it over it was pre-declared.
  1892  			st.reqTrailer[k] = vv
  1893  		}
  1894  	}
  1895  }
  1896  
  1897  // onReadTimeout is run on its own goroutine (from time.AfterFunc)
  1898  // when the stream's ReadTimeout has fired.
  1899  func (st *stream) onReadTimeout() {
  1900  	// Wrap the ErrDeadlineExceeded to avoid callers depending on us
  1901  	// returning the bare error.
  1902  	st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
  1903  }
  1904  
  1905  // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
  1906  // when the stream's WriteTimeout has fired.
  1907  func (st *stream) onWriteTimeout() {
  1908  	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
  1909  		StreamID: st.id,
  1910  		Code:     ErrCodeInternal,
  1911  		Cause:    os.ErrDeadlineExceeded,
  1912  	}})
  1913  }
  1914  
  1915  func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
  1916  	sc.serveG.check()
  1917  	id := f.StreamID
  1918  	// http://tools.ietf.org/html/rfc7540#section-5.1.1
  1919  	// Streams initiated by a client MUST use odd-numbered stream
  1920  	// identifiers. [...] An endpoint that receives an unexpected
  1921  	// stream identifier MUST respond with a connection error
  1922  	// (Section 5.4.1) of type PROTOCOL_ERROR.
  1923  	if id%2 != 1 {
  1924  		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
  1925  	}
  1926  	// A HEADERS frame can be used to create a new stream or
  1927  	// send a trailer for an open one. If we already have a stream
  1928  	// open, let it process its own HEADERS frame (trailers at this
  1929  	// point, if it's valid).
  1930  	if st := sc.streams[f.StreamID]; st != nil {
  1931  		if st.resetQueued {
  1932  			// We're sending RST_STREAM to close the stream, so don't bother
  1933  			// processing this frame.
  1934  			return nil
  1935  		}
  1936  		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
  1937  		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
  1938  		// this state, it MUST respond with a stream error (Section 5.4.2) of
  1939  		// type STREAM_CLOSED.
  1940  		if st.state == stateHalfClosedRemote {
  1941  			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
  1942  		}
  1943  		return st.processTrailerHeaders(f)
  1944  	}
  1945  
  1946  	// [...] The identifier of a newly established stream MUST be
  1947  	// numerically greater than all streams that the initiating
  1948  	// endpoint has opened or reserved. [...]  An endpoint that
  1949  	// receives an unexpected stream identifier MUST respond with
  1950  	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1951  	if id <= sc.maxClientStreamID {
  1952  		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
  1953  	}
  1954  	sc.maxClientStreamID = id
  1955  
  1956  	if sc.idleTimer != nil {
  1957  		sc.idleTimer.Stop()
  1958  	}
  1959  
  1960  	// http://tools.ietf.org/html/rfc7540#section-5.1.2
  1961  	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
  1962  	// endpoint that receives a HEADERS frame that causes their
  1963  	// advertised concurrent stream limit to be exceeded MUST treat
  1964  	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
  1965  	// or REFUSED_STREAM.
  1966  	if sc.curClientStreams+1 > sc.advMaxStreams {
  1967  		if sc.unackedSettings == 0 {
  1968  			// They should know better.
  1969  			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
  1970  		}
  1971  		// Assume it's a network race, where they just haven't
  1972  		// received our last SETTINGS update. But actually
  1973  		// this can't happen yet, because we don't yet provide
  1974  		// a way for users to adjust server parameters at
  1975  		// runtime.
  1976  		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
  1977  	}
  1978  
  1979  	initialState := stateOpen
  1980  	if f.StreamEnded() {
  1981  		initialState = stateHalfClosedRemote
  1982  	}
  1983  	st := sc.newStream(id, 0, initialState)
  1984  
  1985  	if f.HasPriority() {
  1986  		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
  1987  			return err
  1988  		}
  1989  		sc.writeSched.AdjustStream(st.id, f.Priority)
  1990  	}
  1991  
  1992  	rw, req, err := sc.newWriterAndRequest(st, f)
  1993  	if err != nil {
  1994  		return err
  1995  	}
  1996  	st.reqTrailer = req.Trailer
  1997  	if st.reqTrailer != nil {
  1998  		st.trailer = make(http.Header)
  1999  	}
  2000  	st.body = req.Body.(*requestBody).pipe // may be nil
  2001  	st.declBodyBytes = req.ContentLength
  2002  
  2003  	handler := sc.handler.ServeHTTP
  2004  	if f.Truncated {
  2005  		// Their header list was too long. Send a 431 error.
  2006  		handler = handleHeaderListTooLong
  2007  	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
  2008  		handler = new400Handler(err)
  2009  	}
  2010  
  2011  	// The net/http package sets the read deadline from the
  2012  	// http.Server.ReadTimeout during the TLS handshake, but then
  2013  	// passes the connection off to us with the deadline already
  2014  	// set. Disarm it here after the request headers are read,
  2015  	// similar to how the http1 server works. Here it's
  2016  	// technically more like the http1 Server's ReadHeaderTimeout
  2017  	// (in Go 1.8), though. That's a more sane option anyway.
  2018  	if sc.hs.ReadTimeout != 0 {
  2019  		sc.conn.SetReadDeadline(time.Time{})
  2020  		if st.body != nil {
  2021  			st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
  2022  		}
  2023  	}
  2024  
  2025  	go sc.runHandler(rw, req, handler)
  2026  	return nil
  2027  }
  2028  
  2029  func (sc *serverConn) upgradeRequest(req *http.Request) {
  2030  	sc.serveG.check()
  2031  	id := uint32(1)
  2032  	sc.maxClientStreamID = id
  2033  	st := sc.newStream(id, 0, stateHalfClosedRemote)
  2034  	st.reqTrailer = req.Trailer
  2035  	if st.reqTrailer != nil {
  2036  		st.trailer = make(http.Header)
  2037  	}
  2038  	rw := sc.newResponseWriter(st, req)
  2039  
  2040  	// Disable any read deadline set by the net/http package
  2041  	// prior to the upgrade.
  2042  	if sc.hs.ReadTimeout != 0 {
  2043  		sc.conn.SetReadDeadline(time.Time{})
  2044  	}
  2045  
  2046  	go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  2047  }
  2048  
  2049  func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
  2050  	sc := st.sc
  2051  	sc.serveG.check()
  2052  	if st.gotTrailerHeader {
  2053  		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
  2054  	}
  2055  	st.gotTrailerHeader = true
  2056  	if !f.StreamEnded() {
  2057  		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
  2058  	}
  2059  
  2060  	if len(f.PseudoFields()) > 0 {
  2061  		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
  2062  	}
  2063  	if st.trailer != nil {
  2064  		for _, hf := range f.RegularFields() {
  2065  			key := sc.canonicalHeader(hf.Name)
  2066  			if !httpguts.ValidTrailerHeader(key) {
  2067  				// TODO: send more details to the peer somehow. But http2 has
  2068  				// no way to send debug data at a stream level. Discuss with
  2069  				// HTTP folk.
  2070  				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
  2071  			}
  2072  			st.trailer[key] = append(st.trailer[key], hf.Value)
  2073  		}
  2074  	}
  2075  	st.endStream()
  2076  	return nil
  2077  }
  2078  
  2079  func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
  2080  	if streamID == p.StreamDep {
  2081  		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
  2082  		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
  2083  		// Section 5.3.3 says that a stream can depend on one of its dependencies,
  2084  		// so it's only self-dependencies that are forbidden.
  2085  		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
  2086  	}
  2087  	return nil
  2088  }
  2089  
  2090  func (sc *serverConn) processPriority(f *PriorityFrame) error {
  2091  	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
  2092  		return err
  2093  	}
  2094  	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
  2095  	return nil
  2096  }
  2097  
  2098  func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
  2099  	sc.serveG.check()
  2100  	if id == 0 {
  2101  		panic("internal error: cannot create stream with id 0")
  2102  	}
  2103  
  2104  	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
  2105  	st := &stream{
  2106  		sc:        sc,
  2107  		id:        id,
  2108  		state:     state,
  2109  		ctx:       ctx,
  2110  		cancelCtx: cancelCtx,
  2111  	}
  2112  	st.cw.Init()
  2113  	st.flow.conn = &sc.flow // link to conn-level counter
  2114  	st.flow.add(sc.initialStreamSendWindowSize)
  2115  	st.inflow.init(sc.srv.initialStreamRecvWindowSize())
  2116  	if sc.hs.WriteTimeout != 0 {
  2117  		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
  2118  	}
  2119  
  2120  	sc.streams[id] = st
  2121  	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
  2122  	if st.isPushed() {
  2123  		sc.curPushedStreams++
  2124  	} else {
  2125  		sc.curClientStreams++
  2126  	}
  2127  	if sc.curOpenStreams() == 1 {
  2128  		sc.setConnState(http.StateActive)
  2129  	}
  2130  
  2131  	return st
  2132  }
  2133  
  2134  func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
  2135  	sc.serveG.check()
  2136  
  2137  	rp := requestParam{
  2138  		method:    f.PseudoValue("method"),
  2139  		scheme:    f.PseudoValue("scheme"),
  2140  		authority: f.PseudoValue("authority"),
  2141  		path:      f.PseudoValue("path"),
  2142  	}
  2143  
  2144  	isConnect := rp.method == "CONNECT"
  2145  	if isConnect {
  2146  		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
  2147  			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
  2148  		}
  2149  	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
  2150  		// See 8.1.2.6 Malformed Requests and Responses:
  2151  		//
  2152  		// Malformed requests or responses that are detected
  2153  		// MUST be treated as a stream error (Section 5.4.2)
  2154  		// of type PROTOCOL_ERROR."
  2155  		//
  2156  		// 8.1.2.3 Request Pseudo-Header Fields
  2157  		// "All HTTP/2 requests MUST include exactly one valid
  2158  		// value for the :method, :scheme, and :path
  2159  		// pseudo-header fields"
  2160  		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
  2161  	}
  2162  
  2163  	rp.header = make(http.Header)
  2164  	for _, hf := range f.RegularFields() {
  2165  		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
  2166  	}
  2167  	if rp.authority == "" {
  2168  		rp.authority = rp.header.Get("Host")
  2169  	}
  2170  
  2171  	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
  2172  	if err != nil {
  2173  		return nil, nil, err
  2174  	}
  2175  	bodyOpen := !f.StreamEnded()
  2176  	if bodyOpen {
  2177  		if vv, ok := rp.header["Content-Length"]; ok {
  2178  			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
  2179  				req.ContentLength = int64(cl)
  2180  			} else {
  2181  				req.ContentLength = 0
  2182  			}
  2183  		} else {
  2184  			req.ContentLength = -1
  2185  		}
  2186  		req.Body.(*requestBody).pipe = &pipe{
  2187  			b: &dataBuffer{expected: req.ContentLength},
  2188  		}
  2189  	}
  2190  	return rw, req, nil
  2191  }
  2192  
  2193  type requestParam struct {
  2194  	method                  string
  2195  	scheme, authority, path string
  2196  	header                  http.Header
  2197  }
  2198  
  2199  func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
  2200  	sc.serveG.check()
  2201  
  2202  	var tlsState *tls.ConnectionState // nil if not scheme https
  2203  	if rp.scheme == "https" {
  2204  		tlsState = sc.tlsState
  2205  	}
  2206  
  2207  	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
  2208  	if needsContinue {
  2209  		rp.header.Del("Expect")
  2210  	}
  2211  	// Merge Cookie headers into one "; "-delimited value.
  2212  	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
  2213  		rp.header.Set("Cookie", strings.Join(cookies, "; "))
  2214  	}
  2215  
  2216  	// Setup Trailers
  2217  	var trailer http.Header
  2218  	for _, v := range rp.header["Trailer"] {
  2219  		for _, key := range strings.Split(v, ",") {
  2220  			key = http.CanonicalHeaderKey(textproto.TrimString(key))
  2221  			switch key {
  2222  			case "Transfer-Encoding", "Trailer", "Content-Length":
  2223  				// Bogus. (copy of http1 rules)
  2224  				// Ignore.
  2225  			default:
  2226  				if trailer == nil {
  2227  					trailer = make(http.Header)
  2228  				}
  2229  				trailer[key] = nil
  2230  			}
  2231  		}
  2232  	}
  2233  	delete(rp.header, "Trailer")
  2234  
  2235  	var url_ *url.URL
  2236  	var requestURI string
  2237  	if rp.method == "CONNECT" {
  2238  		url_ = &url.URL{Host: rp.authority}
  2239  		requestURI = rp.authority // mimic HTTP/1 server behavior
  2240  	} else {
  2241  		var err error
  2242  		url_, err = url.ParseRequestURI(rp.path)
  2243  		if err != nil {
  2244  			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
  2245  		}
  2246  		requestURI = rp.path
  2247  	}
  2248  
  2249  	body := &requestBody{
  2250  		conn:          sc,
  2251  		stream:        st,
  2252  		needsContinue: needsContinue,
  2253  	}
  2254  	req := &http.Request{
  2255  		Method:     rp.method,
  2256  		URL:        url_,
  2257  		RemoteAddr: sc.remoteAddrStr,
  2258  		Header:     rp.header,
  2259  		RequestURI: requestURI,
  2260  		Proto:      "HTTP/2.0",
  2261  		ProtoMajor: 2,
  2262  		ProtoMinor: 0,
  2263  		TLS:        tlsState,
  2264  		Host:       rp.authority,
  2265  		Body:       body,
  2266  		Trailer:    trailer,
  2267  	}
  2268  	req = req.WithContext(st.ctx)
  2269  
  2270  	rw := sc.newResponseWriter(st, req)
  2271  	return rw, req, nil
  2272  }
  2273  
  2274  func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
  2275  	rws := responseWriterStatePool.Get().(*responseWriterState)
  2276  	bwSave := rws.bw
  2277  	*rws = responseWriterState{} // zero all the fields
  2278  	rws.conn = sc
  2279  	rws.bw = bwSave
  2280  	rws.bw.Reset(chunkWriter{rws})
  2281  	rws.stream = st
  2282  	rws.req = req
  2283  	return &responseWriter{rws: rws}
  2284  }
  2285  
  2286  // Run on its own goroutine.
  2287  func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
  2288  	didPanic := true
  2289  	defer func() {
  2290  		rw.rws.stream.cancelCtx()
  2291  		if req.MultipartForm != nil {
  2292  			req.MultipartForm.RemoveAll()
  2293  		}
  2294  		if didPanic {
  2295  			e := recover()
  2296  			sc.writeFrameFromHandler(FrameWriteRequest{
  2297  				write:  handlerPanicRST{rw.rws.stream.id},
  2298  				stream: rw.rws.stream,
  2299  			})
  2300  			// Same as net/http:
  2301  			if e != nil && e != http.ErrAbortHandler {
  2302  				const size = 64 << 10
  2303  				buf := make([]byte, size)
  2304  				buf = buf[:runtime.Stack(buf, false)]
  2305  				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
  2306  			}
  2307  			return
  2308  		}
  2309  		rw.handlerDone()
  2310  	}()
  2311  	handler(rw, req)
  2312  	didPanic = false
  2313  }
  2314  
  2315  func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
  2316  	// 10.5.1 Limits on Header Block Size:
  2317  	// .. "A server that receives a larger header block than it is
  2318  	// willing to handle can send an HTTP 431 (Request Header Fields Too
  2319  	// Large) status code"
  2320  	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
  2321  	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
  2322  	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
  2323  }
  2324  
  2325  // called from handler goroutines.
  2326  // h may be nil.
  2327  func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
  2328  	sc.serveG.checkNotOn() // NOT on
  2329  	var errc chan error
  2330  	if headerData.h != nil {
  2331  		// If there's a header map (which we don't own), so we have to block on
  2332  		// waiting for this frame to be written, so an http.Flush mid-handler
  2333  		// writes out the correct value of keys, before a handler later potentially
  2334  		// mutates it.
  2335  		errc = errChanPool.Get().(chan error)
  2336  	}
  2337  	if err := sc.writeFrameFromHandler(FrameWriteRequest{
  2338  		write:  headerData,
  2339  		stream: st,
  2340  		done:   errc,
  2341  	}); err != nil {
  2342  		return err
  2343  	}
  2344  	if errc != nil {
  2345  		select {
  2346  		case err := <-errc:
  2347  			errChanPool.Put(errc)
  2348  			return err
  2349  		case <-sc.doneServing:
  2350  			return errClientDisconnected
  2351  		case <-st.cw:
  2352  			return errStreamClosed
  2353  		}
  2354  	}
  2355  	return nil
  2356  }
  2357  
  2358  // called from handler goroutines.
  2359  func (sc *serverConn) write100ContinueHeaders(st *stream) {
  2360  	sc.writeFrameFromHandler(FrameWriteRequest{
  2361  		write:  write100ContinueHeadersFrame{st.id},
  2362  		stream: st,
  2363  	})
  2364  }
  2365  
  2366  // A bodyReadMsg tells the server loop that the http.Handler read n
  2367  // bytes of the DATA from the client on the given stream.
  2368  type bodyReadMsg struct {
  2369  	st *stream
  2370  	n  int
  2371  }
  2372  
  2373  // called from handler goroutines.
  2374  // Notes that the handler for the given stream ID read n bytes of its body
  2375  // and schedules flow control tokens to be sent.
  2376  func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
  2377  	sc.serveG.checkNotOn() // NOT on
  2378  	if n > 0 {
  2379  		select {
  2380  		case sc.bodyReadCh <- bodyReadMsg{st, n}:
  2381  		case <-sc.doneServing:
  2382  		}
  2383  	}
  2384  }
  2385  
  2386  func (sc *serverConn) noteBodyRead(st *stream, n int) {
  2387  	sc.serveG.check()
  2388  	sc.sendWindowUpdate(nil, n) // conn-level
  2389  	if st.state != stateHalfClosedRemote && st.state != stateClosed {
  2390  		// Don't send this WINDOW_UPDATE if the stream is closed
  2391  		// remotely.
  2392  		sc.sendWindowUpdate(st, n)
  2393  	}
  2394  }
  2395  
  2396  // st may be nil for conn-level
  2397  func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
  2398  	sc.sendWindowUpdate(st, int(n))
  2399  }
  2400  
  2401  // st may be nil for conn-level
  2402  func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
  2403  	sc.serveG.check()
  2404  	var streamID uint32
  2405  	var send int32
  2406  	if st == nil {
  2407  		send = sc.inflow.add(n)
  2408  	} else {
  2409  		streamID = st.id
  2410  		send = st.inflow.add(n)
  2411  	}
  2412  	if send == 0 {
  2413  		return
  2414  	}
  2415  	sc.writeFrame(FrameWriteRequest{
  2416  		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
  2417  		stream: st,
  2418  	})
  2419  }
  2420  
  2421  // requestBody is the Handler's Request.Body type.
  2422  // Read and Close may be called concurrently.
  2423  type requestBody struct {
  2424  	_             incomparable
  2425  	stream        *stream
  2426  	conn          *serverConn
  2427  	closeOnce     sync.Once // for use by Close only
  2428  	sawEOF        bool      // for use by Read only
  2429  	pipe          *pipe     // non-nil if we have a HTTP entity message body
  2430  	needsContinue bool      // need to send a 100-continue
  2431  }
  2432  
  2433  func (b *requestBody) Close() error {
  2434  	b.closeOnce.Do(func() {
  2435  		if b.pipe != nil {
  2436  			b.pipe.BreakWithError(errClosedBody)
  2437  		}
  2438  	})
  2439  	return nil
  2440  }
  2441  
  2442  func (b *requestBody) Read(p []byte) (n int, err error) {
  2443  	if b.needsContinue {
  2444  		b.needsContinue = false
  2445  		b.conn.write100ContinueHeaders(b.stream)
  2446  	}
  2447  	if b.pipe == nil || b.sawEOF {
  2448  		return 0, io.EOF
  2449  	}
  2450  	n, err = b.pipe.Read(p)
  2451  	if err == io.EOF {
  2452  		b.sawEOF = true
  2453  	}
  2454  	if b.conn == nil && inTests {
  2455  		return
  2456  	}
  2457  	b.conn.noteBodyReadFromHandler(b.stream, n, err)
  2458  	return
  2459  }
  2460  
  2461  // responseWriter is the http.ResponseWriter implementation. It's
  2462  // intentionally small (1 pointer wide) to minimize garbage. The
  2463  // responseWriterState pointer inside is zeroed at the end of a
  2464  // request (in handlerDone) and calls on the responseWriter thereafter
  2465  // simply crash (caller's mistake), but the much larger responseWriterState
  2466  // and buffers are reused between multiple requests.
  2467  type responseWriter struct {
  2468  	rws *responseWriterState
  2469  }
  2470  
  2471  // Optional http.ResponseWriter interfaces implemented.
  2472  var (
  2473  	_ http.CloseNotifier = (*responseWriter)(nil)
  2474  	_ http.Flusher       = (*responseWriter)(nil)
  2475  	_ stringWriter       = (*responseWriter)(nil)
  2476  )
  2477  
  2478  type responseWriterState struct {
  2479  	// immutable within a request:
  2480  	stream *stream
  2481  	req    *http.Request
  2482  	conn   *serverConn
  2483  
  2484  	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
  2485  	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
  2486  
  2487  	// mutated by http.Handler goroutine:
  2488  	handlerHeader http.Header // nil until called
  2489  	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
  2490  	trailers      []string    // set in writeChunk
  2491  	status        int         // status code passed to WriteHeader
  2492  	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
  2493  	sentHeader    bool        // have we sent the header frame?
  2494  	handlerDone   bool        // handler has finished
  2495  	dirty         bool        // a Write failed; don't reuse this responseWriterState
  2496  
  2497  	sentContentLen int64 // non-zero if handler set a Content-Length header
  2498  	wroteBytes     int64
  2499  
  2500  	closeNotifierMu sync.Mutex // guards closeNotifierCh
  2501  	closeNotifierCh chan bool  // nil until first used
  2502  }
  2503  
  2504  type chunkWriter struct{ rws *responseWriterState }
  2505  
  2506  func (cw chunkWriter) Write(p []byte) (n int, err error) {
  2507  	n, err = cw.rws.writeChunk(p)
  2508  	if err == errStreamClosed {
  2509  		// If writing failed because the stream has been closed,
  2510  		// return the reason it was closed.
  2511  		err = cw.rws.stream.closeErr
  2512  	}
  2513  	return n, err
  2514  }
  2515  
  2516  func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
  2517  
  2518  func (rws *responseWriterState) hasNonemptyTrailers() bool {
  2519  	for _, trailer := range rws.trailers {
  2520  		if _, ok := rws.handlerHeader[trailer]; ok {
  2521  			return true
  2522  		}
  2523  	}
  2524  	return false
  2525  }
  2526  
  2527  // declareTrailer is called for each Trailer header when the
  2528  // response header is written. It notes that a header will need to be
  2529  // written in the trailers at the end of the response.
  2530  func (rws *responseWriterState) declareTrailer(k string) {
  2531  	k = http.CanonicalHeaderKey(k)
  2532  	if !httpguts.ValidTrailerHeader(k) {
  2533  		// Forbidden by RFC 7230, section 4.1.2.
  2534  		rws.conn.logf("ignoring invalid trailer %q", k)
  2535  		return
  2536  	}
  2537  	if !strSliceContains(rws.trailers, k) {
  2538  		rws.trailers = append(rws.trailers, k)
  2539  	}
  2540  }
  2541  
  2542  // writeChunk writes chunks from the bufio.Writer. But because
  2543  // bufio.Writer may bypass its chunking, sometimes p may be
  2544  // arbitrarily large.
  2545  //
  2546  // writeChunk is also responsible (on the first chunk) for sending the
  2547  // HEADER response.
  2548  func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
  2549  	if !rws.wroteHeader {
  2550  		rws.writeHeader(200)
  2551  	}
  2552  
  2553  	if rws.handlerDone {
  2554  		rws.promoteUndeclaredTrailers()
  2555  	}
  2556  
  2557  	isHeadResp := rws.req.Method == "HEAD"
  2558  	if !rws.sentHeader {
  2559  		rws.sentHeader = true
  2560  		var ctype, clen string
  2561  		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
  2562  			rws.snapHeader.Del("Content-Length")
  2563  			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
  2564  				rws.sentContentLen = int64(cl)
  2565  			} else {
  2566  				clen = ""
  2567  			}
  2568  		}
  2569  		if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
  2570  			clen = strconv.Itoa(len(p))
  2571  		}
  2572  		_, hasContentType := rws.snapHeader["Content-Type"]
  2573  		// If the Content-Encoding is non-blank, we shouldn't
  2574  		// sniff the body. See Issue golang.org/issue/31753.
  2575  		ce := rws.snapHeader.Get("Content-Encoding")
  2576  		hasCE := len(ce) > 0
  2577  		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
  2578  			ctype = http.DetectContentType(p)
  2579  		}
  2580  		var date string
  2581  		if _, ok := rws.snapHeader["Date"]; !ok {
  2582  			// TODO(bradfitz): be faster here, like net/http? measure.
  2583  			date = time.Now().UTC().Format(http.TimeFormat)
  2584  		}
  2585  
  2586  		for _, v := range rws.snapHeader["Trailer"] {
  2587  			foreachHeaderElement(v, rws.declareTrailer)
  2588  		}
  2589  
  2590  		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
  2591  		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
  2592  		// down the TCP connection when idle, like we do for HTTP/1.
  2593  		// TODO: remove more Connection-specific header fields here, in addition
  2594  		// to "Connection".
  2595  		if _, ok := rws.snapHeader["Connection"]; ok {
  2596  			v := rws.snapHeader.Get("Connection")
  2597  			delete(rws.snapHeader, "Connection")
  2598  			if v == "close" {
  2599  				rws.conn.startGracefulShutdown()
  2600  			}
  2601  		}
  2602  
  2603  		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
  2604  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2605  			streamID:      rws.stream.id,
  2606  			httpResCode:   rws.status,
  2607  			h:             rws.snapHeader,
  2608  			endStream:     endStream,
  2609  			contentType:   ctype,
  2610  			contentLength: clen,
  2611  			date:          date,
  2612  		})
  2613  		if err != nil {
  2614  			rws.dirty = true
  2615  			return 0, err
  2616  		}
  2617  		if endStream {
  2618  			return 0, nil
  2619  		}
  2620  	}
  2621  	if isHeadResp {
  2622  		return len(p), nil
  2623  	}
  2624  	if len(p) == 0 && !rws.handlerDone {
  2625  		return 0, nil
  2626  	}
  2627  
  2628  	// only send trailers if they have actually been defined by the
  2629  	// server handler.
  2630  	hasNonemptyTrailers := rws.hasNonemptyTrailers()
  2631  	endStream := rws.handlerDone && !hasNonemptyTrailers
  2632  	if len(p) > 0 || endStream {
  2633  		// only send a 0 byte DATA frame if we're ending the stream.
  2634  		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
  2635  			rws.dirty = true
  2636  			return 0, err
  2637  		}
  2638  	}
  2639  
  2640  	if rws.handlerDone && hasNonemptyTrailers {
  2641  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2642  			streamID:  rws.stream.id,
  2643  			h:         rws.handlerHeader,
  2644  			trailers:  rws.trailers,
  2645  			endStream: true,
  2646  		})
  2647  		if err != nil {
  2648  			rws.dirty = true
  2649  		}
  2650  		return len(p), err
  2651  	}
  2652  	return len(p), nil
  2653  }
  2654  
  2655  // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
  2656  // that, if present, signals that the map entry is actually for
  2657  // the response trailers, and not the response headers. The prefix
  2658  // is stripped after the ServeHTTP call finishes and the values are
  2659  // sent in the trailers.
  2660  //
  2661  // This mechanism is intended only for trailers that are not known
  2662  // prior to the headers being written. If the set of trailers is fixed
  2663  // or known before the header is written, the normal Go trailers mechanism
  2664  // is preferred:
  2665  //
  2666  //	https://golang.org/pkg/net/http/#ResponseWriter
  2667  //	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
  2668  const TrailerPrefix = "Trailer:"
  2669  
  2670  // promoteUndeclaredTrailers permits http.Handlers to set trailers
  2671  // after the header has already been flushed. Because the Go
  2672  // ResponseWriter interface has no way to set Trailers (only the
  2673  // Header), and because we didn't want to expand the ResponseWriter
  2674  // interface, and because nobody used trailers, and because RFC 7230
  2675  // says you SHOULD (but not must) predeclare any trailers in the
  2676  // header, the official ResponseWriter rules said trailers in Go must
  2677  // be predeclared, and then we reuse the same ResponseWriter.Header()
  2678  // map to mean both Headers and Trailers. When it's time to write the
  2679  // Trailers, we pick out the fields of Headers that were declared as
  2680  // trailers. That worked for a while, until we found the first major
  2681  // user of Trailers in the wild: gRPC (using them only over http2),
  2682  // and gRPC libraries permit setting trailers mid-stream without
  2683  // predeclaring them. So: change of plans. We still permit the old
  2684  // way, but we also permit this hack: if a Header() key begins with
  2685  // "Trailer:", the suffix of that key is a Trailer. Because ':' is an
  2686  // invalid token byte anyway, there is no ambiguity. (And it's already
  2687  // filtered out) It's mildly hacky, but not terrible.
  2688  //
  2689  // This method runs after the Handler is done and promotes any Header
  2690  // fields to be trailers.
  2691  func (rws *responseWriterState) promoteUndeclaredTrailers() {
  2692  	for k, vv := range rws.handlerHeader {
  2693  		if !strings.HasPrefix(k, TrailerPrefix) {
  2694  			continue
  2695  		}
  2696  		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
  2697  		rws.declareTrailer(trailerKey)
  2698  		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
  2699  	}
  2700  
  2701  	if len(rws.trailers) > 1 {
  2702  		sorter := sorterPool.Get().(*sorter)
  2703  		sorter.SortStrings(rws.trailers)
  2704  		sorterPool.Put(sorter)
  2705  	}
  2706  }
  2707  
  2708  func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
  2709  	st := w.rws.stream
  2710  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2711  		// If we're setting a deadline in the past, reset the stream immediately
  2712  		// so writes after SetWriteDeadline returns will fail.
  2713  		st.onReadTimeout()
  2714  		return nil
  2715  	}
  2716  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2717  		if st.readDeadline != nil {
  2718  			if !st.readDeadline.Stop() {
  2719  				// Deadline already exceeded, or stream has been closed.
  2720  				return
  2721  			}
  2722  		}
  2723  		if deadline.IsZero() {
  2724  			st.readDeadline = nil
  2725  		} else if st.readDeadline == nil {
  2726  			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
  2727  		} else {
  2728  			st.readDeadline.Reset(deadline.Sub(time.Now()))
  2729  		}
  2730  	})
  2731  	return nil
  2732  }
  2733  
  2734  func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
  2735  	st := w.rws.stream
  2736  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2737  		// If we're setting a deadline in the past, reset the stream immediately
  2738  		// so writes after SetWriteDeadline returns will fail.
  2739  		st.onWriteTimeout()
  2740  		return nil
  2741  	}
  2742  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2743  		if st.writeDeadline != nil {
  2744  			if !st.writeDeadline.Stop() {
  2745  				// Deadline already exceeded, or stream has been closed.
  2746  				return
  2747  			}
  2748  		}
  2749  		if deadline.IsZero() {
  2750  			st.writeDeadline = nil
  2751  		} else if st.writeDeadline == nil {
  2752  			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
  2753  		} else {
  2754  			st.writeDeadline.Reset(deadline.Sub(time.Now()))
  2755  		}
  2756  	})
  2757  	return nil
  2758  }
  2759  
  2760  func (w *responseWriter) Flush() {
  2761  	w.FlushError()
  2762  }
  2763  
  2764  func (w *responseWriter) FlushError() error {
  2765  	rws := w.rws
  2766  	if rws == nil {
  2767  		panic("Header called after Handler finished")
  2768  	}
  2769  	var err error
  2770  	if rws.bw.Buffered() > 0 {
  2771  		err = rws.bw.Flush()
  2772  	} else {
  2773  		// The bufio.Writer won't call chunkWriter.Write
  2774  		// (writeChunk with zero bytes, so we have to do it
  2775  		// ourselves to force the HTTP response header and/or
  2776  		// final DATA frame (with END_STREAM) to be sent.
  2777  		_, err = chunkWriter{rws}.Write(nil)
  2778  		if err == nil {
  2779  			select {
  2780  			case <-rws.stream.cw:
  2781  				err = rws.stream.closeErr
  2782  			default:
  2783  			}
  2784  		}
  2785  	}
  2786  	return err
  2787  }
  2788  
  2789  func (w *responseWriter) CloseNotify() <-chan bool {
  2790  	rws := w.rws
  2791  	if rws == nil {
  2792  		panic("CloseNotify called after Handler finished")
  2793  	}
  2794  	rws.closeNotifierMu.Lock()
  2795  	ch := rws.closeNotifierCh
  2796  	if ch == nil {
  2797  		ch = make(chan bool, 1)
  2798  		rws.closeNotifierCh = ch
  2799  		cw := rws.stream.cw
  2800  		go func() {
  2801  			cw.Wait() // wait for close
  2802  			ch <- true
  2803  		}()
  2804  	}
  2805  	rws.closeNotifierMu.Unlock()
  2806  	return ch
  2807  }
  2808  
  2809  func (w *responseWriter) Header() http.Header {
  2810  	rws := w.rws
  2811  	if rws == nil {
  2812  		panic("Header called after Handler finished")
  2813  	}
  2814  	if rws.handlerHeader == nil {
  2815  		rws.handlerHeader = make(http.Header)
  2816  	}
  2817  	return rws.handlerHeader
  2818  }
  2819  
  2820  // checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
  2821  func checkWriteHeaderCode(code int) {
  2822  	// Issue 22880: require valid WriteHeader status codes.
  2823  	// For now we only enforce that it's three digits.
  2824  	// In the future we might block things over 599 (600 and above aren't defined
  2825  	// at http://httpwg.org/specs/rfc7231.html#status.codes).
  2826  	// But for now any three digits.
  2827  	//
  2828  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  2829  	// no equivalent bogus thing we can realistically send in HTTP/2,
  2830  	// so we'll consistently panic instead and help people find their bugs
  2831  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  2832  	if code < 100 || code > 999 {
  2833  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  2834  	}
  2835  }
  2836  
  2837  func (w *responseWriter) WriteHeader(code int) {
  2838  	rws := w.rws
  2839  	if rws == nil {
  2840  		panic("WriteHeader called after Handler finished")
  2841  	}
  2842  	rws.writeHeader(code)
  2843  }
  2844  
  2845  func (rws *responseWriterState) writeHeader(code int) {
  2846  	if rws.wroteHeader {
  2847  		return
  2848  	}
  2849  
  2850  	checkWriteHeaderCode(code)
  2851  
  2852  	// Handle informational headers
  2853  	if code >= 100 && code <= 199 {
  2854  		// Per RFC 8297 we must not clear the current header map
  2855  		h := rws.handlerHeader
  2856  
  2857  		_, cl := h["Content-Length"]
  2858  		_, te := h["Transfer-Encoding"]
  2859  		if cl || te {
  2860  			h = h.Clone()
  2861  			h.Del("Content-Length")
  2862  			h.Del("Transfer-Encoding")
  2863  		}
  2864  
  2865  		if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2866  			streamID:    rws.stream.id,
  2867  			httpResCode: code,
  2868  			h:           h,
  2869  			endStream:   rws.handlerDone && !rws.hasTrailers(),
  2870  		}) != nil {
  2871  			rws.dirty = true
  2872  		}
  2873  
  2874  		return
  2875  	}
  2876  
  2877  	rws.wroteHeader = true
  2878  	rws.status = code
  2879  	if len(rws.handlerHeader) > 0 {
  2880  		rws.snapHeader = cloneHeader(rws.handlerHeader)
  2881  	}
  2882  }
  2883  
  2884  func cloneHeader(h http.Header) http.Header {
  2885  	h2 := make(http.Header, len(h))
  2886  	for k, vv := range h {
  2887  		vv2 := make([]string, len(vv))
  2888  		copy(vv2, vv)
  2889  		h2[k] = vv2
  2890  	}
  2891  	return h2
  2892  }
  2893  
  2894  // The Life Of A Write is like this:
  2895  //
  2896  // * Handler calls w.Write or w.WriteString ->
  2897  // * -> rws.bw (*bufio.Writer) ->
  2898  // * (Handler might call Flush)
  2899  // * -> chunkWriter{rws}
  2900  // * -> responseWriterState.writeChunk(p []byte)
  2901  // * -> responseWriterState.writeChunk (most of the magic; see comment there)
  2902  func (w *responseWriter) Write(p []byte) (n int, err error) {
  2903  	return w.write(len(p), p, "")
  2904  }
  2905  
  2906  func (w *responseWriter) WriteString(s string) (n int, err error) {
  2907  	return w.write(len(s), nil, s)
  2908  }
  2909  
  2910  // either dataB or dataS is non-zero.
  2911  func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  2912  	rws := w.rws
  2913  	if rws == nil {
  2914  		panic("Write called after Handler finished")
  2915  	}
  2916  	if !rws.wroteHeader {
  2917  		w.WriteHeader(200)
  2918  	}
  2919  	if !bodyAllowedForStatus(rws.status) {
  2920  		return 0, http.ErrBodyNotAllowed
  2921  	}
  2922  	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
  2923  	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
  2924  		// TODO: send a RST_STREAM
  2925  		return 0, errors.New("http2: handler wrote more than declared Content-Length")
  2926  	}
  2927  
  2928  	if dataB != nil {
  2929  		return rws.bw.Write(dataB)
  2930  	} else {
  2931  		return rws.bw.WriteString(dataS)
  2932  	}
  2933  }
  2934  
  2935  func (w *responseWriter) handlerDone() {
  2936  	rws := w.rws
  2937  	dirty := rws.dirty
  2938  	rws.handlerDone = true
  2939  	w.Flush()
  2940  	w.rws = nil
  2941  	if !dirty {
  2942  		// Only recycle the pool if all prior Write calls to
  2943  		// the serverConn goroutine completed successfully. If
  2944  		// they returned earlier due to resets from the peer
  2945  		// there might still be write goroutines outstanding
  2946  		// from the serverConn referencing the rws memory. See
  2947  		// issue 20704.
  2948  		responseWriterStatePool.Put(rws)
  2949  	}
  2950  }
  2951  
  2952  // Push errors.
  2953  var (
  2954  	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
  2955  	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
  2956  )
  2957  
  2958  var _ http.Pusher = (*responseWriter)(nil)
  2959  
  2960  func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
  2961  	st := w.rws.stream
  2962  	sc := st.sc
  2963  	sc.serveG.checkNotOn()
  2964  
  2965  	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
  2966  	// http://tools.ietf.org/html/rfc7540#section-6.6
  2967  	if st.isPushed() {
  2968  		return ErrRecursivePush
  2969  	}
  2970  
  2971  	if opts == nil {
  2972  		opts = new(http.PushOptions)
  2973  	}
  2974  
  2975  	// Default options.
  2976  	if opts.Method == "" {
  2977  		opts.Method = "GET"
  2978  	}
  2979  	if opts.Header == nil {
  2980  		opts.Header = http.Header{}
  2981  	}
  2982  	wantScheme := "http"
  2983  	if w.rws.req.TLS != nil {
  2984  		wantScheme = "https"
  2985  	}
  2986  
  2987  	// Validate the request.
  2988  	u, err := url.Parse(target)
  2989  	if err != nil {
  2990  		return err
  2991  	}
  2992  	if u.Scheme == "" {
  2993  		if !strings.HasPrefix(target, "/") {
  2994  			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
  2995  		}
  2996  		u.Scheme = wantScheme
  2997  		u.Host = w.rws.req.Host
  2998  	} else {
  2999  		if u.Scheme != wantScheme {
  3000  			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
  3001  		}
  3002  		if u.Host == "" {
  3003  			return errors.New("URL must have a host")
  3004  		}
  3005  	}
  3006  	for k := range opts.Header {
  3007  		if strings.HasPrefix(k, ":") {
  3008  			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
  3009  		}
  3010  		// These headers are meaningful only if the request has a body,
  3011  		// but PUSH_PROMISE requests cannot have a body.
  3012  		// http://tools.ietf.org/html/rfc7540#section-8.2
  3013  		// Also disallow Host, since the promised URL must be absolute.
  3014  		if asciiEqualFold(k, "content-length") ||
  3015  			asciiEqualFold(k, "content-encoding") ||
  3016  			asciiEqualFold(k, "trailer") ||
  3017  			asciiEqualFold(k, "te") ||
  3018  			asciiEqualFold(k, "expect") ||
  3019  			asciiEqualFold(k, "host") {
  3020  			return fmt.Errorf("promised request headers cannot include %q", k)
  3021  		}
  3022  	}
  3023  	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
  3024  		return err
  3025  	}
  3026  
  3027  	// The RFC effectively limits promised requests to GET and HEAD:
  3028  	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
  3029  	// http://tools.ietf.org/html/rfc7540#section-8.2
  3030  	if opts.Method != "GET" && opts.Method != "HEAD" {
  3031  		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
  3032  	}
  3033  
  3034  	msg := &startPushRequest{
  3035  		parent: st,
  3036  		method: opts.Method,
  3037  		url:    u,
  3038  		header: cloneHeader(opts.Header),
  3039  		done:   errChanPool.Get().(chan error),
  3040  	}
  3041  
  3042  	select {
  3043  	case <-sc.doneServing:
  3044  		return errClientDisconnected
  3045  	case <-st.cw:
  3046  		return errStreamClosed
  3047  	case sc.serveMsgCh <- msg:
  3048  	}
  3049  
  3050  	select {
  3051  	case <-sc.doneServing:
  3052  		return errClientDisconnected
  3053  	case <-st.cw:
  3054  		return errStreamClosed
  3055  	case err := <-msg.done:
  3056  		errChanPool.Put(msg.done)
  3057  		return err
  3058  	}
  3059  }
  3060  
  3061  type startPushRequest struct {
  3062  	parent *stream
  3063  	method string
  3064  	url    *url.URL
  3065  	header http.Header
  3066  	done   chan error
  3067  }
  3068  
  3069  func (sc *serverConn) startPush(msg *startPushRequest) {
  3070  	sc.serveG.check()
  3071  
  3072  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3073  	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
  3074  	// is in either the "open" or "half-closed (remote)" state.
  3075  	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
  3076  		// responseWriter.Push checks that the stream is peer-initiated.
  3077  		msg.done <- errStreamClosed
  3078  		return
  3079  	}
  3080  
  3081  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3082  	if !sc.pushEnabled {
  3083  		msg.done <- http.ErrNotSupported
  3084  		return
  3085  	}
  3086  
  3087  	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
  3088  	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
  3089  	// is written. Once the ID is allocated, we start the request handler.
  3090  	allocatePromisedID := func() (uint32, error) {
  3091  		sc.serveG.check()
  3092  
  3093  		// Check this again, just in case. Technically, we might have received
  3094  		// an updated SETTINGS by the time we got around to writing this frame.
  3095  		if !sc.pushEnabled {
  3096  			return 0, http.ErrNotSupported
  3097  		}
  3098  		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
  3099  		if sc.curPushedStreams+1 > sc.clientMaxStreams {
  3100  			return 0, ErrPushLimitReached
  3101  		}
  3102  
  3103  		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
  3104  		// Streams initiated by the server MUST use even-numbered identifiers.
  3105  		// A server that is unable to establish a new stream identifier can send a GOAWAY
  3106  		// frame so that the client is forced to open a new connection for new streams.
  3107  		if sc.maxPushPromiseID+2 >= 1<<31 {
  3108  			sc.startGracefulShutdownInternal()
  3109  			return 0, ErrPushLimitReached
  3110  		}
  3111  		sc.maxPushPromiseID += 2
  3112  		promisedID := sc.maxPushPromiseID
  3113  
  3114  		// http://tools.ietf.org/html/rfc7540#section-8.2.
  3115  		// Strictly speaking, the new stream should start in "reserved (local)", then
  3116  		// transition to "half closed (remote)" after sending the initial HEADERS, but
  3117  		// we start in "half closed (remote)" for simplicity.
  3118  		// See further comments at the definition of stateHalfClosedRemote.
  3119  		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
  3120  		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
  3121  			method:    msg.method,
  3122  			scheme:    msg.url.Scheme,
  3123  			authority: msg.url.Host,
  3124  			path:      msg.url.RequestURI(),
  3125  			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
  3126  		})
  3127  		if err != nil {
  3128  			// Should not happen, since we've already validated msg.url.
  3129  			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
  3130  		}
  3131  
  3132  		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  3133  		return promisedID, nil
  3134  	}
  3135  
  3136  	sc.writeFrame(FrameWriteRequest{
  3137  		write: &writePushPromise{
  3138  			streamID:           msg.parent.id,
  3139  			method:             msg.method,
  3140  			url:                msg.url,
  3141  			h:                  msg.header,
  3142  			allocatePromisedID: allocatePromisedID,
  3143  		},
  3144  		stream: msg.parent,
  3145  		done:   msg.done,
  3146  	})
  3147  }
  3148  
  3149  // foreachHeaderElement splits v according to the "#rule" construction
  3150  // in RFC 7230 section 7 and calls fn for each non-empty element.
  3151  func foreachHeaderElement(v string, fn func(string)) {
  3152  	v = textproto.TrimString(v)
  3153  	if v == "" {
  3154  		return
  3155  	}
  3156  	if !strings.Contains(v, ",") {
  3157  		fn(v)
  3158  		return
  3159  	}
  3160  	for _, f := range strings.Split(v, ",") {
  3161  		if f = textproto.TrimString(f); f != "" {
  3162  			fn(f)
  3163  		}
  3164  	}
  3165  }
  3166  
  3167  // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
  3168  var connHeaders = []string{
  3169  	"Connection",
  3170  	"Keep-Alive",
  3171  	"Proxy-Connection",
  3172  	"Transfer-Encoding",
  3173  	"Upgrade",
  3174  }
  3175  
  3176  // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
  3177  // per RFC 7540 Section 8.1.2.2.
  3178  // The returned error is reported to users.
  3179  func checkValidHTTP2RequestHeaders(h http.Header) error {
  3180  	for _, k := range connHeaders {
  3181  		if _, ok := h[k]; ok {
  3182  			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
  3183  		}
  3184  	}
  3185  	te := h["Te"]
  3186  	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
  3187  		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
  3188  	}
  3189  	return nil
  3190  }
  3191  
  3192  func new400Handler(err error) http.HandlerFunc {
  3193  	return func(w http.ResponseWriter, r *http.Request) {
  3194  		http.Error(w, err.Error(), http.StatusBadRequest)
  3195  	}
  3196  }
  3197  
  3198  // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
  3199  // disabled. See comments on h1ServerShutdownChan above for why
  3200  // the code is written this way.
  3201  func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
  3202  	var x interface{} = hs
  3203  	type I interface {
  3204  		doKeepAlives() bool
  3205  	}
  3206  	if hs, ok := x.(I); ok {
  3207  		return !hs.doKeepAlives()
  3208  	}
  3209  	return false
  3210  }
  3211  
  3212  func (sc *serverConn) countError(name string, err error) error {
  3213  	if sc == nil || sc.srv == nil {
  3214  		return err
  3215  	}
  3216  	f := sc.srv.CountError
  3217  	if f == nil {
  3218  		return err
  3219  	}
  3220  	var typ string
  3221  	var code ErrCode
  3222  	switch e := err.(type) {
  3223  	case ConnectionError:
  3224  		typ = "conn"
  3225  		code = ErrCode(e)
  3226  	case StreamError:
  3227  		typ = "stream"
  3228  		code = ErrCode(e.Code)
  3229  	default:
  3230  		return err
  3231  	}
  3232  	codeStr := errCodeName[code]
  3233  	if codeStr == "" {
  3234  		codeStr = strconv.Itoa(int(code))
  3235  	}
  3236  	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
  3237  	return err
  3238  }