github.com/mdaxf/iac@v0.0.0-20240519030858-58a061660378/vendor_skip/golang.org/x/net/http2/server.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO: turn off the serve goroutine when idle, so
     6  // an idle conn only has the readFrames goroutine active. (which could
     7  // also be optimized probably to pin less memory in crypto/tls). This
     8  // would involve tracking when the serve goroutine is active (atomic
     9  // int32 read/CAS probably?) and starting it up when frames arrive,
    10  // and shutting it down when all handlers exit. the occasional PING
    11  // packets could use time.AfterFunc to call sc.wakeStartServeLoop()
    12  // (which is a no-op if already running) and then queue the PING write
    13  // as normal. The serve loop would then exit in most cases (if no
    14  // Handlers running) and not be woken up again until the PING packet
    15  // returns.
    16  
    17  // TODO (maybe): add a mechanism for Handlers to going into
    18  // half-closed-local mode (rw.(io.Closer) test?) but not exit their
    19  // handler, and continue to be able to read from the
    20  // Request.Body. This would be a somewhat semantic change from HTTP/1
    21  // (or at least what we expose in net/http), so I'd probably want to
    22  // add it there too. For now, this package says that returning from
    23  // the Handler ServeHTTP function means you're both done reading and
    24  // done writing, without a way to stop just one or the other.
    25  
    26  package http2
    27  
    28  import (
    29  	"bufio"
    30  	"bytes"
    31  	"context"
    32  	"crypto/tls"
    33  	"errors"
    34  	"fmt"
    35  	"io"
    36  	"log"
    37  	"math"
    38  	"net"
    39  	"net/http"
    40  	"net/textproto"
    41  	"net/url"
    42  	"os"
    43  	"reflect"
    44  	"runtime"
    45  	"strconv"
    46  	"strings"
    47  	"sync"
    48  	"time"
    49  
    50  	"golang.org/x/net/http/httpguts"
    51  	"golang.org/x/net/http2/hpack"
    52  )
    53  
    54  const (
    55  	prefaceTimeout         = 10 * time.Second
    56  	firstSettingsTimeout   = 2 * time.Second // should be in-flight with preface anyway
    57  	handlerChunkWriteSize  = 4 << 10
    58  	defaultMaxStreams      = 250 // TODO: make this 100 as the GFE seems to?
    59  	maxQueuedControlFrames = 10000
    60  )
    61  
    62  var (
    63  	errClientDisconnected = errors.New("client disconnected")
    64  	errClosedBody         = errors.New("body closed by handler")
    65  	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
    66  	errStreamClosed       = errors.New("http2: stream closed")
    67  )
    68  
    69  var responseWriterStatePool = sync.Pool{
    70  	New: func() interface{} {
    71  		rws := &responseWriterState{}
    72  		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
    73  		return rws
    74  	},
    75  }
    76  
    77  // Test hooks.
    78  var (
    79  	testHookOnConn        func()
    80  	testHookGetServerConn func(*serverConn)
    81  	testHookOnPanicMu     *sync.Mutex // nil except in tests
    82  	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
    83  )
    84  
    85  // Server is an HTTP/2 server.
    86  type Server struct {
    87  	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
    88  	// which may run at a time over all connections.
    89  	// Negative or zero no limit.
    90  	// TODO: implement
    91  	MaxHandlers int
    92  
    93  	// MaxConcurrentStreams optionally specifies the number of
    94  	// concurrent streams that each client may have open at a
    95  	// time. This is unrelated to the number of http.Handler goroutines
    96  	// which may be active globally, which is MaxHandlers.
    97  	// If zero, MaxConcurrentStreams defaults to at least 100, per
    98  	// the HTTP/2 spec's recommendations.
    99  	MaxConcurrentStreams uint32
   100  
   101  	// MaxDecoderHeaderTableSize optionally specifies the http2
   102  	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
   103  	// informs the remote endpoint of the maximum size of the header compression
   104  	// table used to decode header blocks, in octets. If zero, the default value
   105  	// of 4096 is used.
   106  	MaxDecoderHeaderTableSize uint32
   107  
   108  	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
   109  	// header compression table used for encoding request headers. Received
   110  	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
   111  	// the default value of 4096 is used.
   112  	MaxEncoderHeaderTableSize uint32
   113  
   114  	// MaxReadFrameSize optionally specifies the largest frame
   115  	// this server is willing to read. A valid value is between
   116  	// 16k and 16M, inclusive. If zero or otherwise invalid, a
   117  	// default value is used.
   118  	MaxReadFrameSize uint32
   119  
   120  	// PermitProhibitedCipherSuites, if true, permits the use of
   121  	// cipher suites prohibited by the HTTP/2 spec.
   122  	PermitProhibitedCipherSuites bool
   123  
   124  	// IdleTimeout specifies how long until idle clients should be
   125  	// closed with a GOAWAY frame. PING frames are not considered
   126  	// activity for the purposes of IdleTimeout.
   127  	IdleTimeout time.Duration
   128  
   129  	// MaxUploadBufferPerConnection is the size of the initial flow
   130  	// control window for each connections. The HTTP/2 spec does not
   131  	// allow this to be smaller than 65535 or larger than 2^32-1.
   132  	// If the value is outside this range, a default value will be
   133  	// used instead.
   134  	MaxUploadBufferPerConnection int32
   135  
   136  	// MaxUploadBufferPerStream is the size of the initial flow control
   137  	// window for each stream. The HTTP/2 spec does not allow this to
   138  	// be larger than 2^32-1. If the value is zero or larger than the
   139  	// maximum, a default value will be used instead.
   140  	MaxUploadBufferPerStream int32
   141  
   142  	// NewWriteScheduler constructs a write scheduler for a connection.
   143  	// If nil, a default scheduler is chosen.
   144  	NewWriteScheduler func() WriteScheduler
   145  
   146  	// CountError, if non-nil, is called on HTTP/2 server errors.
   147  	// It's intended to increment a metric for monitoring, such
   148  	// as an expvar or Prometheus metric.
   149  	// The errType consists of only ASCII word characters.
   150  	CountError func(errType string)
   151  
   152  	// Internal state. This is a pointer (rather than embedded directly)
   153  	// so that we don't embed a Mutex in this struct, which will make the
   154  	// struct non-copyable, which might break some callers.
   155  	state *serverInternalState
   156  }
   157  
   158  func (s *Server) initialConnRecvWindowSize() int32 {
   159  	if s.MaxUploadBufferPerConnection >= initialWindowSize {
   160  		return s.MaxUploadBufferPerConnection
   161  	}
   162  	return 1 << 20
   163  }
   164  
   165  func (s *Server) initialStreamRecvWindowSize() int32 {
   166  	if s.MaxUploadBufferPerStream > 0 {
   167  		return s.MaxUploadBufferPerStream
   168  	}
   169  	return 1 << 20
   170  }
   171  
   172  func (s *Server) maxReadFrameSize() uint32 {
   173  	if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
   174  		return v
   175  	}
   176  	return defaultMaxReadFrameSize
   177  }
   178  
   179  func (s *Server) maxConcurrentStreams() uint32 {
   180  	if v := s.MaxConcurrentStreams; v > 0 {
   181  		return v
   182  	}
   183  	return defaultMaxStreams
   184  }
   185  
   186  func (s *Server) maxDecoderHeaderTableSize() uint32 {
   187  	if v := s.MaxDecoderHeaderTableSize; v > 0 {
   188  		return v
   189  	}
   190  	return initialHeaderTableSize
   191  }
   192  
   193  func (s *Server) maxEncoderHeaderTableSize() uint32 {
   194  	if v := s.MaxEncoderHeaderTableSize; v > 0 {
   195  		return v
   196  	}
   197  	return initialHeaderTableSize
   198  }
   199  
   200  // maxQueuedControlFrames is the maximum number of control frames like
   201  // SETTINGS, PING and RST_STREAM that will be queued for writing before
   202  // the connection is closed to prevent memory exhaustion attacks.
   203  func (s *Server) maxQueuedControlFrames() int {
   204  	// TODO: if anybody asks, add a Server field, and remember to define the
   205  	// behavior of negative values.
   206  	return maxQueuedControlFrames
   207  }
   208  
   209  type serverInternalState struct {
   210  	mu          sync.Mutex
   211  	activeConns map[*serverConn]struct{}
   212  }
   213  
   214  func (s *serverInternalState) registerConn(sc *serverConn) {
   215  	if s == nil {
   216  		return // if the Server was used without calling ConfigureServer
   217  	}
   218  	s.mu.Lock()
   219  	s.activeConns[sc] = struct{}{}
   220  	s.mu.Unlock()
   221  }
   222  
   223  func (s *serverInternalState) unregisterConn(sc *serverConn) {
   224  	if s == nil {
   225  		return // if the Server was used without calling ConfigureServer
   226  	}
   227  	s.mu.Lock()
   228  	delete(s.activeConns, sc)
   229  	s.mu.Unlock()
   230  }
   231  
   232  func (s *serverInternalState) startGracefulShutdown() {
   233  	if s == nil {
   234  		return // if the Server was used without calling ConfigureServer
   235  	}
   236  	s.mu.Lock()
   237  	for sc := range s.activeConns {
   238  		sc.startGracefulShutdown()
   239  	}
   240  	s.mu.Unlock()
   241  }
   242  
   243  // ConfigureServer adds HTTP/2 support to a net/http Server.
   244  //
   245  // The configuration conf may be nil.
   246  //
   247  // ConfigureServer must be called before s begins serving.
   248  func ConfigureServer(s *http.Server, conf *Server) error {
   249  	if s == nil {
   250  		panic("nil *http.Server")
   251  	}
   252  	if conf == nil {
   253  		conf = new(Server)
   254  	}
   255  	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
   256  	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
   257  		if h1.IdleTimeout != 0 {
   258  			h2.IdleTimeout = h1.IdleTimeout
   259  		} else {
   260  			h2.IdleTimeout = h1.ReadTimeout
   261  		}
   262  	}
   263  	s.RegisterOnShutdown(conf.state.startGracefulShutdown)
   264  
   265  	if s.TLSConfig == nil {
   266  		s.TLSConfig = new(tls.Config)
   267  	} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
   268  		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
   269  		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
   270  		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
   271  		haveRequired := false
   272  		for _, cs := range s.TLSConfig.CipherSuites {
   273  			switch cs {
   274  			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
   275  				// Alternative MTI cipher to not discourage ECDSA-only servers.
   276  				// See http://golang.org/cl/30721 for further information.
   277  				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
   278  				haveRequired = true
   279  			}
   280  		}
   281  		if !haveRequired {
   282  			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
   283  		}
   284  	}
   285  
   286  	// Note: not setting MinVersion to tls.VersionTLS12,
   287  	// as we don't want to interfere with HTTP/1.1 traffic
   288  	// on the user's server. We enforce TLS 1.2 later once
   289  	// we accept a connection. Ideally this should be done
   290  	// during next-proto selection, but using TLS <1.2 with
   291  	// HTTP/2 is still the client's bug.
   292  
   293  	s.TLSConfig.PreferServerCipherSuites = true
   294  
   295  	if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
   296  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
   297  	}
   298  	if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
   299  		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
   300  	}
   301  
   302  	if s.TLSNextProto == nil {
   303  		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
   304  	}
   305  	protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
   306  		if testHookOnConn != nil {
   307  			testHookOnConn()
   308  		}
   309  		// The TLSNextProto interface predates contexts, so
   310  		// the net/http package passes down its per-connection
   311  		// base context via an exported but unadvertised
   312  		// method on the Handler. This is for internal
   313  		// net/http<=>http2 use only.
   314  		var ctx context.Context
   315  		type baseContexter interface {
   316  			BaseContext() context.Context
   317  		}
   318  		if bc, ok := h.(baseContexter); ok {
   319  			ctx = bc.BaseContext()
   320  		}
   321  		conf.ServeConn(c, &ServeConnOpts{
   322  			Context:    ctx,
   323  			Handler:    h,
   324  			BaseConfig: hs,
   325  		})
   326  	}
   327  	s.TLSNextProto[NextProtoTLS] = protoHandler
   328  	return nil
   329  }
   330  
   331  // ServeConnOpts are options for the Server.ServeConn method.
   332  type ServeConnOpts struct {
   333  	// Context is the base context to use.
   334  	// If nil, context.Background is used.
   335  	Context context.Context
   336  
   337  	// BaseConfig optionally sets the base configuration
   338  	// for values. If nil, defaults are used.
   339  	BaseConfig *http.Server
   340  
   341  	// Handler specifies which handler to use for processing
   342  	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
   343  	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
   344  	Handler http.Handler
   345  
   346  	// UpgradeRequest is an initial request received on a connection
   347  	// undergoing an h2c upgrade. The request body must have been
   348  	// completely read from the connection before calling ServeConn,
   349  	// and the 101 Switching Protocols response written.
   350  	UpgradeRequest *http.Request
   351  
   352  	// Settings is the decoded contents of the HTTP2-Settings header
   353  	// in an h2c upgrade request.
   354  	Settings []byte
   355  
   356  	// SawClientPreface is set if the HTTP/2 connection preface
   357  	// has already been read from the connection.
   358  	SawClientPreface bool
   359  }
   360  
   361  func (o *ServeConnOpts) context() context.Context {
   362  	if o != nil && o.Context != nil {
   363  		return o.Context
   364  	}
   365  	return context.Background()
   366  }
   367  
   368  func (o *ServeConnOpts) baseConfig() *http.Server {
   369  	if o != nil && o.BaseConfig != nil {
   370  		return o.BaseConfig
   371  	}
   372  	return new(http.Server)
   373  }
   374  
   375  func (o *ServeConnOpts) handler() http.Handler {
   376  	if o != nil {
   377  		if o.Handler != nil {
   378  			return o.Handler
   379  		}
   380  		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
   381  			return o.BaseConfig.Handler
   382  		}
   383  	}
   384  	return http.DefaultServeMux
   385  }
   386  
   387  // ServeConn serves HTTP/2 requests on the provided connection and
   388  // blocks until the connection is no longer readable.
   389  //
   390  // ServeConn starts speaking HTTP/2 assuming that c has not had any
   391  // reads or writes. It writes its initial settings frame and expects
   392  // to be able to read the preface and settings frame from the
   393  // client. If c has a ConnectionState method like a *tls.Conn, the
   394  // ConnectionState is used to verify the TLS ciphersuite and to set
   395  // the Request.TLS field in Handlers.
   396  //
   397  // ServeConn does not support h2c by itself. Any h2c support must be
   398  // implemented in terms of providing a suitably-behaving net.Conn.
   399  //
   400  // The opts parameter is optional. If nil, default values are used.
   401  func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
   402  	baseCtx, cancel := serverConnBaseContext(c, opts)
   403  	defer cancel()
   404  
   405  	sc := &serverConn{
   406  		srv:                         s,
   407  		hs:                          opts.baseConfig(),
   408  		conn:                        c,
   409  		baseCtx:                     baseCtx,
   410  		remoteAddrStr:               c.RemoteAddr().String(),
   411  		bw:                          newBufferedWriter(c),
   412  		handler:                     opts.handler(),
   413  		streams:                     make(map[uint32]*stream),
   414  		readFrameCh:                 make(chan readFrameResult),
   415  		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
   416  		serveMsgCh:                  make(chan interface{}, 8),
   417  		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
   418  		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
   419  		doneServing:                 make(chan struct{}),
   420  		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
   421  		advMaxStreams:               s.maxConcurrentStreams(),
   422  		initialStreamSendWindowSize: initialWindowSize,
   423  		maxFrameSize:                initialMaxFrameSize,
   424  		serveG:                      newGoroutineLock(),
   425  		pushEnabled:                 true,
   426  		sawClientPreface:            opts.SawClientPreface,
   427  	}
   428  
   429  	s.state.registerConn(sc)
   430  	defer s.state.unregisterConn(sc)
   431  
   432  	// The net/http package sets the write deadline from the
   433  	// http.Server.WriteTimeout during the TLS handshake, but then
   434  	// passes the connection off to us with the deadline already set.
   435  	// Write deadlines are set per stream in serverConn.newStream.
   436  	// Disarm the net.Conn write deadline here.
   437  	if sc.hs.WriteTimeout != 0 {
   438  		sc.conn.SetWriteDeadline(time.Time{})
   439  	}
   440  
   441  	if s.NewWriteScheduler != nil {
   442  		sc.writeSched = s.NewWriteScheduler()
   443  	} else {
   444  		sc.writeSched = newRoundRobinWriteScheduler()
   445  	}
   446  
   447  	// These start at the RFC-specified defaults. If there is a higher
   448  	// configured value for inflow, that will be updated when we send a
   449  	// WINDOW_UPDATE shortly after sending SETTINGS.
   450  	sc.flow.add(initialWindowSize)
   451  	sc.inflow.init(initialWindowSize)
   452  	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
   453  	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
   454  
   455  	fr := NewFramer(sc.bw, c)
   456  	if s.CountError != nil {
   457  		fr.countError = s.CountError
   458  	}
   459  	fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
   460  	fr.MaxHeaderListSize = sc.maxHeaderListSize()
   461  	fr.SetMaxReadFrameSize(s.maxReadFrameSize())
   462  	sc.framer = fr
   463  
   464  	if tc, ok := c.(connectionStater); ok {
   465  		sc.tlsState = new(tls.ConnectionState)
   466  		*sc.tlsState = tc.ConnectionState()
   467  		// 9.2 Use of TLS Features
   468  		// An implementation of HTTP/2 over TLS MUST use TLS
   469  		// 1.2 or higher with the restrictions on feature set
   470  		// and cipher suite described in this section. Due to
   471  		// implementation limitations, it might not be
   472  		// possible to fail TLS negotiation. An endpoint MUST
   473  		// immediately terminate an HTTP/2 connection that
   474  		// does not meet the TLS requirements described in
   475  		// this section with a connection error (Section
   476  		// 5.4.1) of type INADEQUATE_SECURITY.
   477  		if sc.tlsState.Version < tls.VersionTLS12 {
   478  			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
   479  			return
   480  		}
   481  
   482  		if sc.tlsState.ServerName == "" {
   483  			// Client must use SNI, but we don't enforce that anymore,
   484  			// since it was causing problems when connecting to bare IP
   485  			// addresses during development.
   486  			//
   487  			// TODO: optionally enforce? Or enforce at the time we receive
   488  			// a new request, and verify the ServerName matches the :authority?
   489  			// But that precludes proxy situations, perhaps.
   490  			//
   491  			// So for now, do nothing here again.
   492  		}
   493  
   494  		if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
   495  			// "Endpoints MAY choose to generate a connection error
   496  			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
   497  			// the prohibited cipher suites are negotiated."
   498  			//
   499  			// We choose that. In my opinion, the spec is weak
   500  			// here. It also says both parties must support at least
   501  			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
   502  			// excuses here. If we really must, we could allow an
   503  			// "AllowInsecureWeakCiphers" option on the server later.
   504  			// Let's see how it plays out first.
   505  			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
   506  			return
   507  		}
   508  	}
   509  
   510  	if opts.Settings != nil {
   511  		fr := &SettingsFrame{
   512  			FrameHeader: FrameHeader{valid: true},
   513  			p:           opts.Settings,
   514  		}
   515  		if err := fr.ForeachSetting(sc.processSetting); err != nil {
   516  			sc.rejectConn(ErrCodeProtocol, "invalid settings")
   517  			return
   518  		}
   519  		opts.Settings = nil
   520  	}
   521  
   522  	if hook := testHookGetServerConn; hook != nil {
   523  		hook(sc)
   524  	}
   525  
   526  	if opts.UpgradeRequest != nil {
   527  		sc.upgradeRequest(opts.UpgradeRequest)
   528  		opts.UpgradeRequest = nil
   529  	}
   530  
   531  	sc.serve()
   532  }
   533  
   534  func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
   535  	ctx, cancel = context.WithCancel(opts.context())
   536  	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
   537  	if hs := opts.baseConfig(); hs != nil {
   538  		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
   539  	}
   540  	return
   541  }
   542  
   543  func (sc *serverConn) rejectConn(err ErrCode, debug string) {
   544  	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
   545  	// ignoring errors. hanging up anyway.
   546  	sc.framer.WriteGoAway(0, err, []byte(debug))
   547  	sc.bw.Flush()
   548  	sc.conn.Close()
   549  }
   550  
   551  type serverConn struct {
   552  	// Immutable:
   553  	srv              *Server
   554  	hs               *http.Server
   555  	conn             net.Conn
   556  	bw               *bufferedWriter // writing to conn
   557  	handler          http.Handler
   558  	baseCtx          context.Context
   559  	framer           *Framer
   560  	doneServing      chan struct{}          // closed when serverConn.serve ends
   561  	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
   562  	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
   563  	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
   564  	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
   565  	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
   566  	flow             outflow                // conn-wide (not stream-specific) outbound flow control
   567  	inflow           inflow                 // conn-wide inbound flow control
   568  	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
   569  	remoteAddrStr    string
   570  	writeSched       WriteScheduler
   571  
   572  	// Everything following is owned by the serve loop; use serveG.check():
   573  	serveG                      goroutineLock // used to verify funcs are on serve()
   574  	pushEnabled                 bool
   575  	sawClientPreface            bool // preface has already been read, used in h2c upgrade
   576  	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
   577  	needToSendSettingsAck       bool
   578  	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
   579  	queuedControlFrames         int    // control frames in the writeSched queue
   580  	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
   581  	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
   582  	curClientStreams            uint32 // number of open streams initiated by the client
   583  	curPushedStreams            uint32 // number of open streams initiated by server push
   584  	curHandlers                 uint32 // number of running handler goroutines
   585  	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
   586  	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
   587  	streams                     map[uint32]*stream
   588  	unstartedHandlers           []unstartedHandler
   589  	initialStreamSendWindowSize int32
   590  	maxFrameSize                int32
   591  	peerMaxHeaderListSize       uint32            // zero means unknown (default)
   592  	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
   593  	canonHeaderKeysSize         int               // canonHeader keys size in bytes
   594  	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
   595  	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
   596  	needsFrameFlush             bool              // last frame write wasn't a flush
   597  	inGoAway                    bool              // we've started to or sent GOAWAY
   598  	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
   599  	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
   600  	goAwayCode                  ErrCode
   601  	shutdownTimer               *time.Timer // nil until used
   602  	idleTimer                   *time.Timer // nil if unused
   603  
   604  	// Owned by the writeFrameAsync goroutine:
   605  	headerWriteBuf bytes.Buffer
   606  	hpackEncoder   *hpack.Encoder
   607  
   608  	// Used by startGracefulShutdown.
   609  	shutdownOnce sync.Once
   610  }
   611  
   612  func (sc *serverConn) maxHeaderListSize() uint32 {
   613  	n := sc.hs.MaxHeaderBytes
   614  	if n <= 0 {
   615  		n = http.DefaultMaxHeaderBytes
   616  	}
   617  	// http2's count is in a slightly different unit and includes 32 bytes per pair.
   618  	// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
   619  	const perFieldOverhead = 32 // per http2 spec
   620  	const typicalHeaders = 10   // conservative
   621  	return uint32(n + typicalHeaders*perFieldOverhead)
   622  }
   623  
   624  func (sc *serverConn) curOpenStreams() uint32 {
   625  	sc.serveG.check()
   626  	return sc.curClientStreams + sc.curPushedStreams
   627  }
   628  
   629  // stream represents a stream. This is the minimal metadata needed by
   630  // the serve goroutine. Most of the actual stream state is owned by
   631  // the http.Handler's goroutine in the responseWriter. Because the
   632  // responseWriter's responseWriterState is recycled at the end of a
   633  // handler, this struct intentionally has no pointer to the
   634  // *responseWriter{,State} itself, as the Handler ending nils out the
   635  // responseWriter's state field.
   636  type stream struct {
   637  	// immutable:
   638  	sc        *serverConn
   639  	id        uint32
   640  	body      *pipe       // non-nil if expecting DATA frames
   641  	cw        closeWaiter // closed wait stream transitions to closed state
   642  	ctx       context.Context
   643  	cancelCtx func()
   644  
   645  	// owned by serverConn's serve loop:
   646  	bodyBytes        int64   // body bytes seen so far
   647  	declBodyBytes    int64   // or -1 if undeclared
   648  	flow             outflow // limits writing from Handler to client
   649  	inflow           inflow  // what the client is allowed to POST/etc to us
   650  	state            streamState
   651  	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
   652  	gotTrailerHeader bool        // HEADER frame for trailers was seen
   653  	wroteHeaders     bool        // whether we wrote headers (not status 100)
   654  	readDeadline     *time.Timer // nil if unused
   655  	writeDeadline    *time.Timer // nil if unused
   656  	closeErr         error       // set before cw is closed
   657  
   658  	trailer    http.Header // accumulated trailers
   659  	reqTrailer http.Header // handler's Request.Trailer
   660  }
   661  
   662  func (sc *serverConn) Framer() *Framer  { return sc.framer }
   663  func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
   664  func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
   665  func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
   666  	return sc.hpackEncoder, &sc.headerWriteBuf
   667  }
   668  
   669  func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
   670  	sc.serveG.check()
   671  	// http://tools.ietf.org/html/rfc7540#section-5.1
   672  	if st, ok := sc.streams[streamID]; ok {
   673  		return st.state, st
   674  	}
   675  	// "The first use of a new stream identifier implicitly closes all
   676  	// streams in the "idle" state that might have been initiated by
   677  	// that peer with a lower-valued stream identifier. For example, if
   678  	// a client sends a HEADERS frame on stream 7 without ever sending a
   679  	// frame on stream 5, then stream 5 transitions to the "closed"
   680  	// state when the first frame for stream 7 is sent or received."
   681  	if streamID%2 == 1 {
   682  		if streamID <= sc.maxClientStreamID {
   683  			return stateClosed, nil
   684  		}
   685  	} else {
   686  		if streamID <= sc.maxPushPromiseID {
   687  			return stateClosed, nil
   688  		}
   689  	}
   690  	return stateIdle, nil
   691  }
   692  
   693  // setConnState calls the net/http ConnState hook for this connection, if configured.
   694  // Note that the net/http package does StateNew and StateClosed for us.
   695  // There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
   696  func (sc *serverConn) setConnState(state http.ConnState) {
   697  	if sc.hs.ConnState != nil {
   698  		sc.hs.ConnState(sc.conn, state)
   699  	}
   700  }
   701  
   702  func (sc *serverConn) vlogf(format string, args ...interface{}) {
   703  	if VerboseLogs {
   704  		sc.logf(format, args...)
   705  	}
   706  }
   707  
   708  func (sc *serverConn) logf(format string, args ...interface{}) {
   709  	if lg := sc.hs.ErrorLog; lg != nil {
   710  		lg.Printf(format, args...)
   711  	} else {
   712  		log.Printf(format, args...)
   713  	}
   714  }
   715  
   716  // errno returns v's underlying uintptr, else 0.
   717  //
   718  // TODO: remove this helper function once http2 can use build
   719  // tags. See comment in isClosedConnError.
   720  func errno(v error) uintptr {
   721  	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
   722  		return uintptr(rv.Uint())
   723  	}
   724  	return 0
   725  }
   726  
   727  // isClosedConnError reports whether err is an error from use of a closed
   728  // network connection.
   729  func isClosedConnError(err error) bool {
   730  	if err == nil {
   731  		return false
   732  	}
   733  
   734  	// TODO: remove this string search and be more like the Windows
   735  	// case below. That might involve modifying the standard library
   736  	// to return better error types.
   737  	str := err.Error()
   738  	if strings.Contains(str, "use of closed network connection") {
   739  		return true
   740  	}
   741  
   742  	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
   743  	// build tags, so I can't make an http2_windows.go file with
   744  	// Windows-specific stuff. Fix that and move this, once we
   745  	// have a way to bundle this into std's net/http somehow.
   746  	if runtime.GOOS == "windows" {
   747  		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
   748  			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
   749  				const WSAECONNABORTED = 10053
   750  				const WSAECONNRESET = 10054
   751  				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
   752  					return true
   753  				}
   754  			}
   755  		}
   756  	}
   757  	return false
   758  }
   759  
   760  func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
   761  	if err == nil {
   762  		return
   763  	}
   764  	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
   765  		// Boring, expected errors.
   766  		sc.vlogf(format, args...)
   767  	} else {
   768  		sc.logf(format, args...)
   769  	}
   770  }
   771  
   772  // maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
   773  // of the entries in the canonHeader cache.
   774  // This should be larger than the size of unique, uncommon header keys likely to
   775  // be sent by the peer, while not so high as to permit unreasonable memory usage
   776  // if the peer sends an unbounded number of unique header keys.
   777  const maxCachedCanonicalHeadersKeysSize = 2048
   778  
   779  func (sc *serverConn) canonicalHeader(v string) string {
   780  	sc.serveG.check()
   781  	buildCommonHeaderMapsOnce()
   782  	cv, ok := commonCanonHeader[v]
   783  	if ok {
   784  		return cv
   785  	}
   786  	cv, ok = sc.canonHeader[v]
   787  	if ok {
   788  		return cv
   789  	}
   790  	if sc.canonHeader == nil {
   791  		sc.canonHeader = make(map[string]string)
   792  	}
   793  	cv = http.CanonicalHeaderKey(v)
   794  	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
   795  	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
   796  		sc.canonHeader[v] = cv
   797  		sc.canonHeaderKeysSize += size
   798  	}
   799  	return cv
   800  }
   801  
   802  type readFrameResult struct {
   803  	f   Frame // valid until readMore is called
   804  	err error
   805  
   806  	// readMore should be called once the consumer no longer needs or
   807  	// retains f. After readMore, f is invalid and more frames can be
   808  	// read.
   809  	readMore func()
   810  }
   811  
   812  // readFrames is the loop that reads incoming frames.
   813  // It takes care to only read one frame at a time, blocking until the
   814  // consumer is done with the frame.
   815  // It's run on its own goroutine.
   816  func (sc *serverConn) readFrames() {
   817  	gate := make(gate)
   818  	gateDone := gate.Done
   819  	for {
   820  		f, err := sc.framer.ReadFrame()
   821  		select {
   822  		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
   823  		case <-sc.doneServing:
   824  			return
   825  		}
   826  		select {
   827  		case <-gate:
   828  		case <-sc.doneServing:
   829  			return
   830  		}
   831  		if terminalReadFrameError(err) {
   832  			return
   833  		}
   834  	}
   835  }
   836  
   837  // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
   838  type frameWriteResult struct {
   839  	_   incomparable
   840  	wr  FrameWriteRequest // what was written (or attempted)
   841  	err error             // result of the writeFrame call
   842  }
   843  
   844  // writeFrameAsync runs in its own goroutine and writes a single frame
   845  // and then reports when it's done.
   846  // At most one goroutine can be running writeFrameAsync at a time per
   847  // serverConn.
   848  func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
   849  	var err error
   850  	if wd == nil {
   851  		err = wr.write.writeFrame(sc)
   852  	} else {
   853  		err = sc.framer.endWrite()
   854  	}
   855  	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
   856  }
   857  
   858  func (sc *serverConn) closeAllStreamsOnConnClose() {
   859  	sc.serveG.check()
   860  	for _, st := range sc.streams {
   861  		sc.closeStream(st, errClientDisconnected)
   862  	}
   863  }
   864  
   865  func (sc *serverConn) stopShutdownTimer() {
   866  	sc.serveG.check()
   867  	if t := sc.shutdownTimer; t != nil {
   868  		t.Stop()
   869  	}
   870  }
   871  
   872  func (sc *serverConn) notePanic() {
   873  	// Note: this is for serverConn.serve panicking, not http.Handler code.
   874  	if testHookOnPanicMu != nil {
   875  		testHookOnPanicMu.Lock()
   876  		defer testHookOnPanicMu.Unlock()
   877  	}
   878  	if testHookOnPanic != nil {
   879  		if e := recover(); e != nil {
   880  			if testHookOnPanic(sc, e) {
   881  				panic(e)
   882  			}
   883  		}
   884  	}
   885  }
   886  
   887  func (sc *serverConn) serve() {
   888  	sc.serveG.check()
   889  	defer sc.notePanic()
   890  	defer sc.conn.Close()
   891  	defer sc.closeAllStreamsOnConnClose()
   892  	defer sc.stopShutdownTimer()
   893  	defer close(sc.doneServing) // unblocks handlers trying to send
   894  
   895  	if VerboseLogs {
   896  		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
   897  	}
   898  
   899  	sc.writeFrame(FrameWriteRequest{
   900  		write: writeSettings{
   901  			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
   902  			{SettingMaxConcurrentStreams, sc.advMaxStreams},
   903  			{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
   904  			{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
   905  			{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
   906  		},
   907  	})
   908  	sc.unackedSettings++
   909  
   910  	// Each connection starts with initialWindowSize inflow tokens.
   911  	// If a higher value is configured, we add more tokens.
   912  	if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
   913  		sc.sendWindowUpdate(nil, int(diff))
   914  	}
   915  
   916  	if err := sc.readPreface(); err != nil {
   917  		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
   918  		return
   919  	}
   920  	// Now that we've got the preface, get us out of the
   921  	// "StateNew" state. We can't go directly to idle, though.
   922  	// Active means we read some data and anticipate a request. We'll
   923  	// do another Active when we get a HEADERS frame.
   924  	sc.setConnState(http.StateActive)
   925  	sc.setConnState(http.StateIdle)
   926  
   927  	if sc.srv.IdleTimeout != 0 {
   928  		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
   929  		defer sc.idleTimer.Stop()
   930  	}
   931  
   932  	go sc.readFrames() // closed by defer sc.conn.Close above
   933  
   934  	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
   935  	defer settingsTimer.Stop()
   936  
   937  	loopNum := 0
   938  	for {
   939  		loopNum++
   940  		select {
   941  		case wr := <-sc.wantWriteFrameCh:
   942  			if se, ok := wr.write.(StreamError); ok {
   943  				sc.resetStream(se)
   944  				break
   945  			}
   946  			sc.writeFrame(wr)
   947  		case res := <-sc.wroteFrameCh:
   948  			sc.wroteFrame(res)
   949  		case res := <-sc.readFrameCh:
   950  			// Process any written frames before reading new frames from the client since a
   951  			// written frame could have triggered a new stream to be started.
   952  			if sc.writingFrameAsync {
   953  				select {
   954  				case wroteRes := <-sc.wroteFrameCh:
   955  					sc.wroteFrame(wroteRes)
   956  				default:
   957  				}
   958  			}
   959  			if !sc.processFrameFromReader(res) {
   960  				return
   961  			}
   962  			res.readMore()
   963  			if settingsTimer != nil {
   964  				settingsTimer.Stop()
   965  				settingsTimer = nil
   966  			}
   967  		case m := <-sc.bodyReadCh:
   968  			sc.noteBodyRead(m.st, m.n)
   969  		case msg := <-sc.serveMsgCh:
   970  			switch v := msg.(type) {
   971  			case func(int):
   972  				v(loopNum) // for testing
   973  			case *serverMessage:
   974  				switch v {
   975  				case settingsTimerMsg:
   976  					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
   977  					return
   978  				case idleTimerMsg:
   979  					sc.vlogf("connection is idle")
   980  					sc.goAway(ErrCodeNo)
   981  				case shutdownTimerMsg:
   982  					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
   983  					return
   984  				case gracefulShutdownMsg:
   985  					sc.startGracefulShutdownInternal()
   986  				case handlerDoneMsg:
   987  					sc.handlerDone()
   988  				default:
   989  					panic("unknown timer")
   990  				}
   991  			case *startPushRequest:
   992  				sc.startPush(v)
   993  			case func(*serverConn):
   994  				v(sc)
   995  			default:
   996  				panic(fmt.Sprintf("unexpected type %T", v))
   997  			}
   998  		}
   999  
  1000  		// If the peer is causing us to generate a lot of control frames,
  1001  		// but not reading them from us, assume they are trying to make us
  1002  		// run out of memory.
  1003  		if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
  1004  			sc.vlogf("http2: too many control frames in send queue, closing connection")
  1005  			return
  1006  		}
  1007  
  1008  		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
  1009  		// with no error code (graceful shutdown), don't start the timer until
  1010  		// all open streams have been completed.
  1011  		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
  1012  		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
  1013  		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
  1014  			sc.shutDownIn(goAwayTimeout)
  1015  		}
  1016  	}
  1017  }
  1018  
  1019  type serverMessage int
  1020  
  1021  // Message values sent to serveMsgCh.
  1022  var (
  1023  	settingsTimerMsg    = new(serverMessage)
  1024  	idleTimerMsg        = new(serverMessage)
  1025  	shutdownTimerMsg    = new(serverMessage)
  1026  	gracefulShutdownMsg = new(serverMessage)
  1027  	handlerDoneMsg      = new(serverMessage)
  1028  )
  1029  
  1030  func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
  1031  func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
  1032  func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
  1033  
  1034  func (sc *serverConn) sendServeMsg(msg interface{}) {
  1035  	sc.serveG.checkNotOn() // NOT
  1036  	select {
  1037  	case sc.serveMsgCh <- msg:
  1038  	case <-sc.doneServing:
  1039  	}
  1040  }
  1041  
  1042  var errPrefaceTimeout = errors.New("timeout waiting for client preface")
  1043  
  1044  // readPreface reads the ClientPreface greeting from the peer or
  1045  // returns errPrefaceTimeout on timeout, or an error if the greeting
  1046  // is invalid.
  1047  func (sc *serverConn) readPreface() error {
  1048  	if sc.sawClientPreface {
  1049  		return nil
  1050  	}
  1051  	errc := make(chan error, 1)
  1052  	go func() {
  1053  		// Read the client preface
  1054  		buf := make([]byte, len(ClientPreface))
  1055  		if _, err := io.ReadFull(sc.conn, buf); err != nil {
  1056  			errc <- err
  1057  		} else if !bytes.Equal(buf, clientPreface) {
  1058  			errc <- fmt.Errorf("bogus greeting %q", buf)
  1059  		} else {
  1060  			errc <- nil
  1061  		}
  1062  	}()
  1063  	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
  1064  	defer timer.Stop()
  1065  	select {
  1066  	case <-timer.C:
  1067  		return errPrefaceTimeout
  1068  	case err := <-errc:
  1069  		if err == nil {
  1070  			if VerboseLogs {
  1071  				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
  1072  			}
  1073  		}
  1074  		return err
  1075  	}
  1076  }
  1077  
  1078  var errChanPool = sync.Pool{
  1079  	New: func() interface{} { return make(chan error, 1) },
  1080  }
  1081  
  1082  var writeDataPool = sync.Pool{
  1083  	New: func() interface{} { return new(writeData) },
  1084  }
  1085  
  1086  // writeDataFromHandler writes DATA response frames from a handler on
  1087  // the given stream.
  1088  func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
  1089  	ch := errChanPool.Get().(chan error)
  1090  	writeArg := writeDataPool.Get().(*writeData)
  1091  	*writeArg = writeData{stream.id, data, endStream}
  1092  	err := sc.writeFrameFromHandler(FrameWriteRequest{
  1093  		write:  writeArg,
  1094  		stream: stream,
  1095  		done:   ch,
  1096  	})
  1097  	if err != nil {
  1098  		return err
  1099  	}
  1100  	var frameWriteDone bool // the frame write is done (successfully or not)
  1101  	select {
  1102  	case err = <-ch:
  1103  		frameWriteDone = true
  1104  	case <-sc.doneServing:
  1105  		return errClientDisconnected
  1106  	case <-stream.cw:
  1107  		// If both ch and stream.cw were ready (as might
  1108  		// happen on the final Write after an http.Handler
  1109  		// ends), prefer the write result. Otherwise this
  1110  		// might just be us successfully closing the stream.
  1111  		// The writeFrameAsync and serve goroutines guarantee
  1112  		// that the ch send will happen before the stream.cw
  1113  		// close.
  1114  		select {
  1115  		case err = <-ch:
  1116  			frameWriteDone = true
  1117  		default:
  1118  			return errStreamClosed
  1119  		}
  1120  	}
  1121  	errChanPool.Put(ch)
  1122  	if frameWriteDone {
  1123  		writeDataPool.Put(writeArg)
  1124  	}
  1125  	return err
  1126  }
  1127  
  1128  // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
  1129  // if the connection has gone away.
  1130  //
  1131  // This must not be run from the serve goroutine itself, else it might
  1132  // deadlock writing to sc.wantWriteFrameCh (which is only mildly
  1133  // buffered and is read by serve itself). If you're on the serve
  1134  // goroutine, call writeFrame instead.
  1135  func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
  1136  	sc.serveG.checkNotOn() // NOT
  1137  	select {
  1138  	case sc.wantWriteFrameCh <- wr:
  1139  		return nil
  1140  	case <-sc.doneServing:
  1141  		// Serve loop is gone.
  1142  		// Client has closed their connection to the server.
  1143  		return errClientDisconnected
  1144  	}
  1145  }
  1146  
  1147  // writeFrame schedules a frame to write and sends it if there's nothing
  1148  // already being written.
  1149  //
  1150  // There is no pushback here (the serve goroutine never blocks). It's
  1151  // the http.Handlers that block, waiting for their previous frames to
  1152  // make it onto the wire
  1153  //
  1154  // If you're not on the serve goroutine, use writeFrameFromHandler instead.
  1155  func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
  1156  	sc.serveG.check()
  1157  
  1158  	// If true, wr will not be written and wr.done will not be signaled.
  1159  	var ignoreWrite bool
  1160  
  1161  	// We are not allowed to write frames on closed streams. RFC 7540 Section
  1162  	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
  1163  	// a closed stream." Our server never sends PRIORITY, so that exception
  1164  	// does not apply.
  1165  	//
  1166  	// The serverConn might close an open stream while the stream's handler
  1167  	// is still running. For example, the server might close a stream when it
  1168  	// receives bad data from the client. If this happens, the handler might
  1169  	// attempt to write a frame after the stream has been closed (since the
  1170  	// handler hasn't yet been notified of the close). In this case, we simply
  1171  	// ignore the frame. The handler will notice that the stream is closed when
  1172  	// it waits for the frame to be written.
  1173  	//
  1174  	// As an exception to this rule, we allow sending RST_STREAM after close.
  1175  	// This allows us to immediately reject new streams without tracking any
  1176  	// state for those streams (except for the queued RST_STREAM frame). This
  1177  	// may result in duplicate RST_STREAMs in some cases, but the client should
  1178  	// ignore those.
  1179  	if wr.StreamID() != 0 {
  1180  		_, isReset := wr.write.(StreamError)
  1181  		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
  1182  			ignoreWrite = true
  1183  		}
  1184  	}
  1185  
  1186  	// Don't send a 100-continue response if we've already sent headers.
  1187  	// See golang.org/issue/14030.
  1188  	switch wr.write.(type) {
  1189  	case *writeResHeaders:
  1190  		wr.stream.wroteHeaders = true
  1191  	case write100ContinueHeadersFrame:
  1192  		if wr.stream.wroteHeaders {
  1193  			// We do not need to notify wr.done because this frame is
  1194  			// never written with wr.done != nil.
  1195  			if wr.done != nil {
  1196  				panic("wr.done != nil for write100ContinueHeadersFrame")
  1197  			}
  1198  			ignoreWrite = true
  1199  		}
  1200  	}
  1201  
  1202  	if !ignoreWrite {
  1203  		if wr.isControl() {
  1204  			sc.queuedControlFrames++
  1205  			// For extra safety, detect wraparounds, which should not happen,
  1206  			// and pull the plug.
  1207  			if sc.queuedControlFrames < 0 {
  1208  				sc.conn.Close()
  1209  			}
  1210  		}
  1211  		sc.writeSched.Push(wr)
  1212  	}
  1213  	sc.scheduleFrameWrite()
  1214  }
  1215  
  1216  // startFrameWrite starts a goroutine to write wr (in a separate
  1217  // goroutine since that might block on the network), and updates the
  1218  // serve goroutine's state about the world, updated from info in wr.
  1219  func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
  1220  	sc.serveG.check()
  1221  	if sc.writingFrame {
  1222  		panic("internal error: can only be writing one frame at a time")
  1223  	}
  1224  
  1225  	st := wr.stream
  1226  	if st != nil {
  1227  		switch st.state {
  1228  		case stateHalfClosedLocal:
  1229  			switch wr.write.(type) {
  1230  			case StreamError, handlerPanicRST, writeWindowUpdate:
  1231  				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
  1232  				// in this state. (We never send PRIORITY from the server, so that is not checked.)
  1233  			default:
  1234  				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
  1235  			}
  1236  		case stateClosed:
  1237  			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
  1238  		}
  1239  	}
  1240  	if wpp, ok := wr.write.(*writePushPromise); ok {
  1241  		var err error
  1242  		wpp.promisedID, err = wpp.allocatePromisedID()
  1243  		if err != nil {
  1244  			sc.writingFrameAsync = false
  1245  			wr.replyToWriter(err)
  1246  			return
  1247  		}
  1248  	}
  1249  
  1250  	sc.writingFrame = true
  1251  	sc.needsFrameFlush = true
  1252  	if wr.write.staysWithinBuffer(sc.bw.Available()) {
  1253  		sc.writingFrameAsync = false
  1254  		err := wr.write.writeFrame(sc)
  1255  		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
  1256  	} else if wd, ok := wr.write.(*writeData); ok {
  1257  		// Encode the frame in the serve goroutine, to ensure we don't have
  1258  		// any lingering asynchronous references to data passed to Write.
  1259  		// See https://go.dev/issue/58446.
  1260  		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
  1261  		sc.writingFrameAsync = true
  1262  		go sc.writeFrameAsync(wr, wd)
  1263  	} else {
  1264  		sc.writingFrameAsync = true
  1265  		go sc.writeFrameAsync(wr, nil)
  1266  	}
  1267  }
  1268  
  1269  // errHandlerPanicked is the error given to any callers blocked in a read from
  1270  // Request.Body when the main goroutine panics. Since most handlers read in the
  1271  // main ServeHTTP goroutine, this will show up rarely.
  1272  var errHandlerPanicked = errors.New("http2: handler panicked")
  1273  
  1274  // wroteFrame is called on the serve goroutine with the result of
  1275  // whatever happened on writeFrameAsync.
  1276  func (sc *serverConn) wroteFrame(res frameWriteResult) {
  1277  	sc.serveG.check()
  1278  	if !sc.writingFrame {
  1279  		panic("internal error: expected to be already writing a frame")
  1280  	}
  1281  	sc.writingFrame = false
  1282  	sc.writingFrameAsync = false
  1283  
  1284  	wr := res.wr
  1285  
  1286  	if writeEndsStream(wr.write) {
  1287  		st := wr.stream
  1288  		if st == nil {
  1289  			panic("internal error: expecting non-nil stream")
  1290  		}
  1291  		switch st.state {
  1292  		case stateOpen:
  1293  			// Here we would go to stateHalfClosedLocal in
  1294  			// theory, but since our handler is done and
  1295  			// the net/http package provides no mechanism
  1296  			// for closing a ResponseWriter while still
  1297  			// reading data (see possible TODO at top of
  1298  			// this file), we go into closed state here
  1299  			// anyway, after telling the peer we're
  1300  			// hanging up on them. We'll transition to
  1301  			// stateClosed after the RST_STREAM frame is
  1302  			// written.
  1303  			st.state = stateHalfClosedLocal
  1304  			// Section 8.1: a server MAY request that the client abort
  1305  			// transmission of a request without error by sending a
  1306  			// RST_STREAM with an error code of NO_ERROR after sending
  1307  			// a complete response.
  1308  			sc.resetStream(streamError(st.id, ErrCodeNo))
  1309  		case stateHalfClosedRemote:
  1310  			sc.closeStream(st, errHandlerComplete)
  1311  		}
  1312  	} else {
  1313  		switch v := wr.write.(type) {
  1314  		case StreamError:
  1315  			// st may be unknown if the RST_STREAM was generated to reject bad input.
  1316  			if st, ok := sc.streams[v.StreamID]; ok {
  1317  				sc.closeStream(st, v)
  1318  			}
  1319  		case handlerPanicRST:
  1320  			sc.closeStream(wr.stream, errHandlerPanicked)
  1321  		}
  1322  	}
  1323  
  1324  	// Reply (if requested) to unblock the ServeHTTP goroutine.
  1325  	wr.replyToWriter(res.err)
  1326  
  1327  	sc.scheduleFrameWrite()
  1328  }
  1329  
  1330  // scheduleFrameWrite tickles the frame writing scheduler.
  1331  //
  1332  // If a frame is already being written, nothing happens. This will be called again
  1333  // when the frame is done being written.
  1334  //
  1335  // If a frame isn't being written and we need to send one, the best frame
  1336  // to send is selected by writeSched.
  1337  //
  1338  // If a frame isn't being written and there's nothing else to send, we
  1339  // flush the write buffer.
  1340  func (sc *serverConn) scheduleFrameWrite() {
  1341  	sc.serveG.check()
  1342  	if sc.writingFrame || sc.inFrameScheduleLoop {
  1343  		return
  1344  	}
  1345  	sc.inFrameScheduleLoop = true
  1346  	for !sc.writingFrameAsync {
  1347  		if sc.needToSendGoAway {
  1348  			sc.needToSendGoAway = false
  1349  			sc.startFrameWrite(FrameWriteRequest{
  1350  				write: &writeGoAway{
  1351  					maxStreamID: sc.maxClientStreamID,
  1352  					code:        sc.goAwayCode,
  1353  				},
  1354  			})
  1355  			continue
  1356  		}
  1357  		if sc.needToSendSettingsAck {
  1358  			sc.needToSendSettingsAck = false
  1359  			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
  1360  			continue
  1361  		}
  1362  		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
  1363  			if wr, ok := sc.writeSched.Pop(); ok {
  1364  				if wr.isControl() {
  1365  					sc.queuedControlFrames--
  1366  				}
  1367  				sc.startFrameWrite(wr)
  1368  				continue
  1369  			}
  1370  		}
  1371  		if sc.needsFrameFlush {
  1372  			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
  1373  			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
  1374  			continue
  1375  		}
  1376  		break
  1377  	}
  1378  	sc.inFrameScheduleLoop = false
  1379  }
  1380  
  1381  // startGracefulShutdown gracefully shuts down a connection. This
  1382  // sends GOAWAY with ErrCodeNo to tell the client we're gracefully
  1383  // shutting down. The connection isn't closed until all current
  1384  // streams are done.
  1385  //
  1386  // startGracefulShutdown returns immediately; it does not wait until
  1387  // the connection has shut down.
  1388  func (sc *serverConn) startGracefulShutdown() {
  1389  	sc.serveG.checkNotOn() // NOT
  1390  	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
  1391  }
  1392  
  1393  // After sending GOAWAY with an error code (non-graceful shutdown), the
  1394  // connection will close after goAwayTimeout.
  1395  //
  1396  // If we close the connection immediately after sending GOAWAY, there may
  1397  // be unsent data in our kernel receive buffer, which will cause the kernel
  1398  // to send a TCP RST on close() instead of a FIN. This RST will abort the
  1399  // connection immediately, whether or not the client had received the GOAWAY.
  1400  //
  1401  // Ideally we should delay for at least 1 RTT + epsilon so the client has
  1402  // a chance to read the GOAWAY and stop sending messages. Measuring RTT
  1403  // is hard, so we approximate with 1 second. See golang.org/issue/18701.
  1404  //
  1405  // This is a var so it can be shorter in tests, where all requests uses the
  1406  // loopback interface making the expected RTT very small.
  1407  //
  1408  // TODO: configurable?
  1409  var goAwayTimeout = 1 * time.Second
  1410  
  1411  func (sc *serverConn) startGracefulShutdownInternal() {
  1412  	sc.goAway(ErrCodeNo)
  1413  }
  1414  
  1415  func (sc *serverConn) goAway(code ErrCode) {
  1416  	sc.serveG.check()
  1417  	if sc.inGoAway {
  1418  		if sc.goAwayCode == ErrCodeNo {
  1419  			sc.goAwayCode = code
  1420  		}
  1421  		return
  1422  	}
  1423  	sc.inGoAway = true
  1424  	sc.needToSendGoAway = true
  1425  	sc.goAwayCode = code
  1426  	sc.scheduleFrameWrite()
  1427  }
  1428  
  1429  func (sc *serverConn) shutDownIn(d time.Duration) {
  1430  	sc.serveG.check()
  1431  	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
  1432  }
  1433  
  1434  func (sc *serverConn) resetStream(se StreamError) {
  1435  	sc.serveG.check()
  1436  	sc.writeFrame(FrameWriteRequest{write: se})
  1437  	if st, ok := sc.streams[se.StreamID]; ok {
  1438  		st.resetQueued = true
  1439  	}
  1440  }
  1441  
  1442  // processFrameFromReader processes the serve loop's read from readFrameCh from the
  1443  // frame-reading goroutine.
  1444  // processFrameFromReader returns whether the connection should be kept open.
  1445  func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
  1446  	sc.serveG.check()
  1447  	err := res.err
  1448  	if err != nil {
  1449  		if err == ErrFrameTooLarge {
  1450  			sc.goAway(ErrCodeFrameSize)
  1451  			return true // goAway will close the loop
  1452  		}
  1453  		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
  1454  		if clientGone {
  1455  			// TODO: could we also get into this state if
  1456  			// the peer does a half close
  1457  			// (e.g. CloseWrite) because they're done
  1458  			// sending frames but they're still wanting
  1459  			// our open replies?  Investigate.
  1460  			// TODO: add CloseWrite to crypto/tls.Conn first
  1461  			// so we have a way to test this? I suppose
  1462  			// just for testing we could have a non-TLS mode.
  1463  			return false
  1464  		}
  1465  	} else {
  1466  		f := res.f
  1467  		if VerboseLogs {
  1468  			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
  1469  		}
  1470  		err = sc.processFrame(f)
  1471  		if err == nil {
  1472  			return true
  1473  		}
  1474  	}
  1475  
  1476  	switch ev := err.(type) {
  1477  	case StreamError:
  1478  		sc.resetStream(ev)
  1479  		return true
  1480  	case goAwayFlowError:
  1481  		sc.goAway(ErrCodeFlowControl)
  1482  		return true
  1483  	case ConnectionError:
  1484  		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
  1485  		sc.goAway(ErrCode(ev))
  1486  		return true // goAway will handle shutdown
  1487  	default:
  1488  		if res.err != nil {
  1489  			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
  1490  		} else {
  1491  			sc.logf("http2: server closing client connection: %v", err)
  1492  		}
  1493  		return false
  1494  	}
  1495  }
  1496  
  1497  func (sc *serverConn) processFrame(f Frame) error {
  1498  	sc.serveG.check()
  1499  
  1500  	// First frame received must be SETTINGS.
  1501  	if !sc.sawFirstSettings {
  1502  		if _, ok := f.(*SettingsFrame); !ok {
  1503  			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
  1504  		}
  1505  		sc.sawFirstSettings = true
  1506  	}
  1507  
  1508  	// Discard frames for streams initiated after the identified last
  1509  	// stream sent in a GOAWAY, or all frames after sending an error.
  1510  	// We still need to return connection-level flow control for DATA frames.
  1511  	// RFC 9113 Section 6.8.
  1512  	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
  1513  
  1514  		if f, ok := f.(*DataFrame); ok {
  1515  			if !sc.inflow.take(f.Length) {
  1516  				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
  1517  			}
  1518  			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1519  		}
  1520  		return nil
  1521  	}
  1522  
  1523  	switch f := f.(type) {
  1524  	case *SettingsFrame:
  1525  		return sc.processSettings(f)
  1526  	case *MetaHeadersFrame:
  1527  		return sc.processHeaders(f)
  1528  	case *WindowUpdateFrame:
  1529  		return sc.processWindowUpdate(f)
  1530  	case *PingFrame:
  1531  		return sc.processPing(f)
  1532  	case *DataFrame:
  1533  		return sc.processData(f)
  1534  	case *RSTStreamFrame:
  1535  		return sc.processResetStream(f)
  1536  	case *PriorityFrame:
  1537  		return sc.processPriority(f)
  1538  	case *GoAwayFrame:
  1539  		return sc.processGoAway(f)
  1540  	case *PushPromiseFrame:
  1541  		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
  1542  		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1543  		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
  1544  	default:
  1545  		sc.vlogf("http2: server ignoring frame: %v", f.Header())
  1546  		return nil
  1547  	}
  1548  }
  1549  
  1550  func (sc *serverConn) processPing(f *PingFrame) error {
  1551  	sc.serveG.check()
  1552  	if f.IsAck() {
  1553  		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
  1554  		// containing this flag."
  1555  		return nil
  1556  	}
  1557  	if f.StreamID != 0 {
  1558  		// "PING frames are not associated with any individual
  1559  		// stream. If a PING frame is received with a stream
  1560  		// identifier field value other than 0x0, the recipient MUST
  1561  		// respond with a connection error (Section 5.4.1) of type
  1562  		// PROTOCOL_ERROR."
  1563  		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
  1564  	}
  1565  	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
  1566  	return nil
  1567  }
  1568  
  1569  func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
  1570  	sc.serveG.check()
  1571  	switch {
  1572  	case f.StreamID != 0: // stream-level flow control
  1573  		state, st := sc.state(f.StreamID)
  1574  		if state == stateIdle {
  1575  			// Section 5.1: "Receiving any frame other than HEADERS
  1576  			// or PRIORITY on a stream in this state MUST be
  1577  			// treated as a connection error (Section 5.4.1) of
  1578  			// type PROTOCOL_ERROR."
  1579  			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
  1580  		}
  1581  		if st == nil {
  1582  			// "WINDOW_UPDATE can be sent by a peer that has sent a
  1583  			// frame bearing the END_STREAM flag. This means that a
  1584  			// receiver could receive a WINDOW_UPDATE frame on a "half
  1585  			// closed (remote)" or "closed" stream. A receiver MUST
  1586  			// NOT treat this as an error, see Section 5.1."
  1587  			return nil
  1588  		}
  1589  		if !st.flow.add(int32(f.Increment)) {
  1590  			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
  1591  		}
  1592  	default: // connection-level flow control
  1593  		if !sc.flow.add(int32(f.Increment)) {
  1594  			return goAwayFlowError{}
  1595  		}
  1596  	}
  1597  	sc.scheduleFrameWrite()
  1598  	return nil
  1599  }
  1600  
  1601  func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
  1602  	sc.serveG.check()
  1603  
  1604  	state, st := sc.state(f.StreamID)
  1605  	if state == stateIdle {
  1606  		// 6.4 "RST_STREAM frames MUST NOT be sent for a
  1607  		// stream in the "idle" state. If a RST_STREAM frame
  1608  		// identifying an idle stream is received, the
  1609  		// recipient MUST treat this as a connection error
  1610  		// (Section 5.4.1) of type PROTOCOL_ERROR.
  1611  		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
  1612  	}
  1613  	if st != nil {
  1614  		st.cancelCtx()
  1615  		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
  1616  	}
  1617  	return nil
  1618  }
  1619  
  1620  func (sc *serverConn) closeStream(st *stream, err error) {
  1621  	sc.serveG.check()
  1622  	if st.state == stateIdle || st.state == stateClosed {
  1623  		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
  1624  	}
  1625  	st.state = stateClosed
  1626  	if st.readDeadline != nil {
  1627  		st.readDeadline.Stop()
  1628  	}
  1629  	if st.writeDeadline != nil {
  1630  		st.writeDeadline.Stop()
  1631  	}
  1632  	if st.isPushed() {
  1633  		sc.curPushedStreams--
  1634  	} else {
  1635  		sc.curClientStreams--
  1636  	}
  1637  	delete(sc.streams, st.id)
  1638  	if len(sc.streams) == 0 {
  1639  		sc.setConnState(http.StateIdle)
  1640  		if sc.srv.IdleTimeout != 0 {
  1641  			sc.idleTimer.Reset(sc.srv.IdleTimeout)
  1642  		}
  1643  		if h1ServerKeepAlivesDisabled(sc.hs) {
  1644  			sc.startGracefulShutdownInternal()
  1645  		}
  1646  	}
  1647  	if p := st.body; p != nil {
  1648  		// Return any buffered unread bytes worth of conn-level flow control.
  1649  		// See golang.org/issue/16481
  1650  		sc.sendWindowUpdate(nil, p.Len())
  1651  
  1652  		p.CloseWithError(err)
  1653  	}
  1654  	if e, ok := err.(StreamError); ok {
  1655  		if e.Cause != nil {
  1656  			err = e.Cause
  1657  		} else {
  1658  			err = errStreamClosed
  1659  		}
  1660  	}
  1661  	st.closeErr = err
  1662  	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
  1663  	sc.writeSched.CloseStream(st.id)
  1664  }
  1665  
  1666  func (sc *serverConn) processSettings(f *SettingsFrame) error {
  1667  	sc.serveG.check()
  1668  	if f.IsAck() {
  1669  		sc.unackedSettings--
  1670  		if sc.unackedSettings < 0 {
  1671  			// Why is the peer ACKing settings we never sent?
  1672  			// The spec doesn't mention this case, but
  1673  			// hang up on them anyway.
  1674  			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
  1675  		}
  1676  		return nil
  1677  	}
  1678  	if f.NumSettings() > 100 || f.HasDuplicates() {
  1679  		// This isn't actually in the spec, but hang up on
  1680  		// suspiciously large settings frames or those with
  1681  		// duplicate entries.
  1682  		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
  1683  	}
  1684  	if err := f.ForeachSetting(sc.processSetting); err != nil {
  1685  		return err
  1686  	}
  1687  	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
  1688  	// acknowledged individually, even if multiple are received before the ACK.
  1689  	sc.needToSendSettingsAck = true
  1690  	sc.scheduleFrameWrite()
  1691  	return nil
  1692  }
  1693  
  1694  func (sc *serverConn) processSetting(s Setting) error {
  1695  	sc.serveG.check()
  1696  	if err := s.Valid(); err != nil {
  1697  		return err
  1698  	}
  1699  	if VerboseLogs {
  1700  		sc.vlogf("http2: server processing setting %v", s)
  1701  	}
  1702  	switch s.ID {
  1703  	case SettingHeaderTableSize:
  1704  		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
  1705  	case SettingEnablePush:
  1706  		sc.pushEnabled = s.Val != 0
  1707  	case SettingMaxConcurrentStreams:
  1708  		sc.clientMaxStreams = s.Val
  1709  	case SettingInitialWindowSize:
  1710  		return sc.processSettingInitialWindowSize(s.Val)
  1711  	case SettingMaxFrameSize:
  1712  		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
  1713  	case SettingMaxHeaderListSize:
  1714  		sc.peerMaxHeaderListSize = s.Val
  1715  	default:
  1716  		// Unknown setting: "An endpoint that receives a SETTINGS
  1717  		// frame with any unknown or unsupported identifier MUST
  1718  		// ignore that setting."
  1719  		if VerboseLogs {
  1720  			sc.vlogf("http2: server ignoring unknown setting %v", s)
  1721  		}
  1722  	}
  1723  	return nil
  1724  }
  1725  
  1726  func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
  1727  	sc.serveG.check()
  1728  	// Note: val already validated to be within range by
  1729  	// processSetting's Valid call.
  1730  
  1731  	// "A SETTINGS frame can alter the initial flow control window
  1732  	// size for all current streams. When the value of
  1733  	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
  1734  	// adjust the size of all stream flow control windows that it
  1735  	// maintains by the difference between the new value and the
  1736  	// old value."
  1737  	old := sc.initialStreamSendWindowSize
  1738  	sc.initialStreamSendWindowSize = int32(val)
  1739  	growth := int32(val) - old // may be negative
  1740  	for _, st := range sc.streams {
  1741  		if !st.flow.add(growth) {
  1742  			// 6.9.2 Initial Flow Control Window Size
  1743  			// "An endpoint MUST treat a change to
  1744  			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
  1745  			// control window to exceed the maximum size as a
  1746  			// connection error (Section 5.4.1) of type
  1747  			// FLOW_CONTROL_ERROR."
  1748  			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
  1749  		}
  1750  	}
  1751  	return nil
  1752  }
  1753  
  1754  func (sc *serverConn) processData(f *DataFrame) error {
  1755  	sc.serveG.check()
  1756  	id := f.Header().StreamID
  1757  
  1758  	data := f.Data()
  1759  	state, st := sc.state(id)
  1760  	if id == 0 || state == stateIdle {
  1761  		// Section 6.1: "DATA frames MUST be associated with a
  1762  		// stream. If a DATA frame is received whose stream
  1763  		// identifier field is 0x0, the recipient MUST respond
  1764  		// with a connection error (Section 5.4.1) of type
  1765  		// PROTOCOL_ERROR."
  1766  		//
  1767  		// Section 5.1: "Receiving any frame other than HEADERS
  1768  		// or PRIORITY on a stream in this state MUST be
  1769  		// treated as a connection error (Section 5.4.1) of
  1770  		// type PROTOCOL_ERROR."
  1771  		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
  1772  	}
  1773  
  1774  	// "If a DATA frame is received whose stream is not in "open"
  1775  	// or "half closed (local)" state, the recipient MUST respond
  1776  	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
  1777  	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
  1778  		// This includes sending a RST_STREAM if the stream is
  1779  		// in stateHalfClosedLocal (which currently means that
  1780  		// the http.Handler returned, so it's done reading &
  1781  		// done writing). Try to stop the client from sending
  1782  		// more DATA.
  1783  
  1784  		// But still enforce their connection-level flow control,
  1785  		// and return any flow control bytes since we're not going
  1786  		// to consume them.
  1787  		if !sc.inflow.take(f.Length) {
  1788  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1789  		}
  1790  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1791  
  1792  		if st != nil && st.resetQueued {
  1793  			// Already have a stream error in flight. Don't send another.
  1794  			return nil
  1795  		}
  1796  		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
  1797  	}
  1798  	if st.body == nil {
  1799  		panic("internal error: should have a body in this state")
  1800  	}
  1801  
  1802  	// Sender sending more than they'd declared?
  1803  	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
  1804  		if !sc.inflow.take(f.Length) {
  1805  			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
  1806  		}
  1807  		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
  1808  
  1809  		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
  1810  		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
  1811  		// value of a content-length header field does not equal the sum of the
  1812  		// DATA frame payload lengths that form the body.
  1813  		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
  1814  	}
  1815  	if f.Length > 0 {
  1816  		// Check whether the client has flow control quota.
  1817  		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
  1818  			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
  1819  		}
  1820  
  1821  		if len(data) > 0 {
  1822  			st.bodyBytes += int64(len(data))
  1823  			wrote, err := st.body.Write(data)
  1824  			if err != nil {
  1825  				// The handler has closed the request body.
  1826  				// Return the connection-level flow control for the discarded data,
  1827  				// but not the stream-level flow control.
  1828  				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
  1829  				return nil
  1830  			}
  1831  			if wrote != len(data) {
  1832  				panic("internal error: bad Writer")
  1833  			}
  1834  		}
  1835  
  1836  		// Return any padded flow control now, since we won't
  1837  		// refund it later on body reads.
  1838  		// Call sendWindowUpdate even if there is no padding,
  1839  		// to return buffered flow control credit if the sent
  1840  		// window has shrunk.
  1841  		pad := int32(f.Length) - int32(len(data))
  1842  		sc.sendWindowUpdate32(nil, pad)
  1843  		sc.sendWindowUpdate32(st, pad)
  1844  	}
  1845  	if f.StreamEnded() {
  1846  		st.endStream()
  1847  	}
  1848  	return nil
  1849  }
  1850  
  1851  func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
  1852  	sc.serveG.check()
  1853  	if f.ErrCode != ErrCodeNo {
  1854  		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1855  	} else {
  1856  		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
  1857  	}
  1858  	sc.startGracefulShutdownInternal()
  1859  	// http://tools.ietf.org/html/rfc7540#section-6.8
  1860  	// We should not create any new streams, which means we should disable push.
  1861  	sc.pushEnabled = false
  1862  	return nil
  1863  }
  1864  
  1865  // isPushed reports whether the stream is server-initiated.
  1866  func (st *stream) isPushed() bool {
  1867  	return st.id%2 == 0
  1868  }
  1869  
  1870  // endStream closes a Request.Body's pipe. It is called when a DATA
  1871  // frame says a request body is over (or after trailers).
  1872  func (st *stream) endStream() {
  1873  	sc := st.sc
  1874  	sc.serveG.check()
  1875  
  1876  	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
  1877  		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
  1878  			st.declBodyBytes, st.bodyBytes))
  1879  	} else {
  1880  		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
  1881  		st.body.CloseWithError(io.EOF)
  1882  	}
  1883  	st.state = stateHalfClosedRemote
  1884  }
  1885  
  1886  // copyTrailersToHandlerRequest is run in the Handler's goroutine in
  1887  // its Request.Body.Read just before it gets io.EOF.
  1888  func (st *stream) copyTrailersToHandlerRequest() {
  1889  	for k, vv := range st.trailer {
  1890  		if _, ok := st.reqTrailer[k]; ok {
  1891  			// Only copy it over it was pre-declared.
  1892  			st.reqTrailer[k] = vv
  1893  		}
  1894  	}
  1895  }
  1896  
  1897  // onReadTimeout is run on its own goroutine (from time.AfterFunc)
  1898  // when the stream's ReadTimeout has fired.
  1899  func (st *stream) onReadTimeout() {
  1900  	if st.body != nil {
  1901  		// Wrap the ErrDeadlineExceeded to avoid callers depending on us
  1902  		// returning the bare error.
  1903  		st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
  1904  	}
  1905  }
  1906  
  1907  // onWriteTimeout is run on its own goroutine (from time.AfterFunc)
  1908  // when the stream's WriteTimeout has fired.
  1909  func (st *stream) onWriteTimeout() {
  1910  	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
  1911  		StreamID: st.id,
  1912  		Code:     ErrCodeInternal,
  1913  		Cause:    os.ErrDeadlineExceeded,
  1914  	}})
  1915  }
  1916  
  1917  func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
  1918  	sc.serveG.check()
  1919  	id := f.StreamID
  1920  	// http://tools.ietf.org/html/rfc7540#section-5.1.1
  1921  	// Streams initiated by a client MUST use odd-numbered stream
  1922  	// identifiers. [...] An endpoint that receives an unexpected
  1923  	// stream identifier MUST respond with a connection error
  1924  	// (Section 5.4.1) of type PROTOCOL_ERROR.
  1925  	if id%2 != 1 {
  1926  		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
  1927  	}
  1928  	// A HEADERS frame can be used to create a new stream or
  1929  	// send a trailer for an open one. If we already have a stream
  1930  	// open, let it process its own HEADERS frame (trailers at this
  1931  	// point, if it's valid).
  1932  	if st := sc.streams[f.StreamID]; st != nil {
  1933  		if st.resetQueued {
  1934  			// We're sending RST_STREAM to close the stream, so don't bother
  1935  			// processing this frame.
  1936  			return nil
  1937  		}
  1938  		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
  1939  		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
  1940  		// this state, it MUST respond with a stream error (Section 5.4.2) of
  1941  		// type STREAM_CLOSED.
  1942  		if st.state == stateHalfClosedRemote {
  1943  			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
  1944  		}
  1945  		return st.processTrailerHeaders(f)
  1946  	}
  1947  
  1948  	// [...] The identifier of a newly established stream MUST be
  1949  	// numerically greater than all streams that the initiating
  1950  	// endpoint has opened or reserved. [...]  An endpoint that
  1951  	// receives an unexpected stream identifier MUST respond with
  1952  	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
  1953  	if id <= sc.maxClientStreamID {
  1954  		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
  1955  	}
  1956  	sc.maxClientStreamID = id
  1957  
  1958  	if sc.idleTimer != nil {
  1959  		sc.idleTimer.Stop()
  1960  	}
  1961  
  1962  	// http://tools.ietf.org/html/rfc7540#section-5.1.2
  1963  	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
  1964  	// endpoint that receives a HEADERS frame that causes their
  1965  	// advertised concurrent stream limit to be exceeded MUST treat
  1966  	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
  1967  	// or REFUSED_STREAM.
  1968  	if sc.curClientStreams+1 > sc.advMaxStreams {
  1969  		if sc.unackedSettings == 0 {
  1970  			// They should know better.
  1971  			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
  1972  		}
  1973  		// Assume it's a network race, where they just haven't
  1974  		// received our last SETTINGS update. But actually
  1975  		// this can't happen yet, because we don't yet provide
  1976  		// a way for users to adjust server parameters at
  1977  		// runtime.
  1978  		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
  1979  	}
  1980  
  1981  	initialState := stateOpen
  1982  	if f.StreamEnded() {
  1983  		initialState = stateHalfClosedRemote
  1984  	}
  1985  	st := sc.newStream(id, 0, initialState)
  1986  
  1987  	if f.HasPriority() {
  1988  		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
  1989  			return err
  1990  		}
  1991  		sc.writeSched.AdjustStream(st.id, f.Priority)
  1992  	}
  1993  
  1994  	rw, req, err := sc.newWriterAndRequest(st, f)
  1995  	if err != nil {
  1996  		return err
  1997  	}
  1998  	st.reqTrailer = req.Trailer
  1999  	if st.reqTrailer != nil {
  2000  		st.trailer = make(http.Header)
  2001  	}
  2002  	st.body = req.Body.(*requestBody).pipe // may be nil
  2003  	st.declBodyBytes = req.ContentLength
  2004  
  2005  	handler := sc.handler.ServeHTTP
  2006  	if f.Truncated {
  2007  		// Their header list was too long. Send a 431 error.
  2008  		handler = handleHeaderListTooLong
  2009  	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
  2010  		handler = new400Handler(err)
  2011  	}
  2012  
  2013  	// The net/http package sets the read deadline from the
  2014  	// http.Server.ReadTimeout during the TLS handshake, but then
  2015  	// passes the connection off to us with the deadline already
  2016  	// set. Disarm it here after the request headers are read,
  2017  	// similar to how the http1 server works. Here it's
  2018  	// technically more like the http1 Server's ReadHeaderTimeout
  2019  	// (in Go 1.8), though. That's a more sane option anyway.
  2020  	if sc.hs.ReadTimeout != 0 {
  2021  		sc.conn.SetReadDeadline(time.Time{})
  2022  		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
  2023  	}
  2024  
  2025  	return sc.scheduleHandler(id, rw, req, handler)
  2026  }
  2027  
  2028  func (sc *serverConn) upgradeRequest(req *http.Request) {
  2029  	sc.serveG.check()
  2030  	id := uint32(1)
  2031  	sc.maxClientStreamID = id
  2032  	st := sc.newStream(id, 0, stateHalfClosedRemote)
  2033  	st.reqTrailer = req.Trailer
  2034  	if st.reqTrailer != nil {
  2035  		st.trailer = make(http.Header)
  2036  	}
  2037  	rw := sc.newResponseWriter(st, req)
  2038  
  2039  	// Disable any read deadline set by the net/http package
  2040  	// prior to the upgrade.
  2041  	if sc.hs.ReadTimeout != 0 {
  2042  		sc.conn.SetReadDeadline(time.Time{})
  2043  	}
  2044  
  2045  	// This is the first request on the connection,
  2046  	// so start the handler directly rather than going
  2047  	// through scheduleHandler.
  2048  	sc.curHandlers++
  2049  	go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  2050  }
  2051  
  2052  func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
  2053  	sc := st.sc
  2054  	sc.serveG.check()
  2055  	if st.gotTrailerHeader {
  2056  		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
  2057  	}
  2058  	st.gotTrailerHeader = true
  2059  	if !f.StreamEnded() {
  2060  		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
  2061  	}
  2062  
  2063  	if len(f.PseudoFields()) > 0 {
  2064  		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
  2065  	}
  2066  	if st.trailer != nil {
  2067  		for _, hf := range f.RegularFields() {
  2068  			key := sc.canonicalHeader(hf.Name)
  2069  			if !httpguts.ValidTrailerHeader(key) {
  2070  				// TODO: send more details to the peer somehow. But http2 has
  2071  				// no way to send debug data at a stream level. Discuss with
  2072  				// HTTP folk.
  2073  				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
  2074  			}
  2075  			st.trailer[key] = append(st.trailer[key], hf.Value)
  2076  		}
  2077  	}
  2078  	st.endStream()
  2079  	return nil
  2080  }
  2081  
  2082  func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
  2083  	if streamID == p.StreamDep {
  2084  		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
  2085  		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
  2086  		// Section 5.3.3 says that a stream can depend on one of its dependencies,
  2087  		// so it's only self-dependencies that are forbidden.
  2088  		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
  2089  	}
  2090  	return nil
  2091  }
  2092  
  2093  func (sc *serverConn) processPriority(f *PriorityFrame) error {
  2094  	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
  2095  		return err
  2096  	}
  2097  	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
  2098  	return nil
  2099  }
  2100  
  2101  func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
  2102  	sc.serveG.check()
  2103  	if id == 0 {
  2104  		panic("internal error: cannot create stream with id 0")
  2105  	}
  2106  
  2107  	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
  2108  	st := &stream{
  2109  		sc:        sc,
  2110  		id:        id,
  2111  		state:     state,
  2112  		ctx:       ctx,
  2113  		cancelCtx: cancelCtx,
  2114  	}
  2115  	st.cw.Init()
  2116  	st.flow.conn = &sc.flow // link to conn-level counter
  2117  	st.flow.add(sc.initialStreamSendWindowSize)
  2118  	st.inflow.init(sc.srv.initialStreamRecvWindowSize())
  2119  	if sc.hs.WriteTimeout != 0 {
  2120  		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
  2121  	}
  2122  
  2123  	sc.streams[id] = st
  2124  	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
  2125  	if st.isPushed() {
  2126  		sc.curPushedStreams++
  2127  	} else {
  2128  		sc.curClientStreams++
  2129  	}
  2130  	if sc.curOpenStreams() == 1 {
  2131  		sc.setConnState(http.StateActive)
  2132  	}
  2133  
  2134  	return st
  2135  }
  2136  
  2137  func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
  2138  	sc.serveG.check()
  2139  
  2140  	rp := requestParam{
  2141  		method:    f.PseudoValue("method"),
  2142  		scheme:    f.PseudoValue("scheme"),
  2143  		authority: f.PseudoValue("authority"),
  2144  		path:      f.PseudoValue("path"),
  2145  	}
  2146  
  2147  	isConnect := rp.method == "CONNECT"
  2148  	if isConnect {
  2149  		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
  2150  			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
  2151  		}
  2152  	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
  2153  		// See 8.1.2.6 Malformed Requests and Responses:
  2154  		//
  2155  		// Malformed requests or responses that are detected
  2156  		// MUST be treated as a stream error (Section 5.4.2)
  2157  		// of type PROTOCOL_ERROR."
  2158  		//
  2159  		// 8.1.2.3 Request Pseudo-Header Fields
  2160  		// "All HTTP/2 requests MUST include exactly one valid
  2161  		// value for the :method, :scheme, and :path
  2162  		// pseudo-header fields"
  2163  		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
  2164  	}
  2165  
  2166  	rp.header = make(http.Header)
  2167  	for _, hf := range f.RegularFields() {
  2168  		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
  2169  	}
  2170  	if rp.authority == "" {
  2171  		rp.authority = rp.header.Get("Host")
  2172  	}
  2173  
  2174  	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
  2175  	if err != nil {
  2176  		return nil, nil, err
  2177  	}
  2178  	bodyOpen := !f.StreamEnded()
  2179  	if bodyOpen {
  2180  		if vv, ok := rp.header["Content-Length"]; ok {
  2181  			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
  2182  				req.ContentLength = int64(cl)
  2183  			} else {
  2184  				req.ContentLength = 0
  2185  			}
  2186  		} else {
  2187  			req.ContentLength = -1
  2188  		}
  2189  		req.Body.(*requestBody).pipe = &pipe{
  2190  			b: &dataBuffer{expected: req.ContentLength},
  2191  		}
  2192  	}
  2193  	return rw, req, nil
  2194  }
  2195  
  2196  type requestParam struct {
  2197  	method                  string
  2198  	scheme, authority, path string
  2199  	header                  http.Header
  2200  }
  2201  
  2202  func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
  2203  	sc.serveG.check()
  2204  
  2205  	var tlsState *tls.ConnectionState // nil if not scheme https
  2206  	if rp.scheme == "https" {
  2207  		tlsState = sc.tlsState
  2208  	}
  2209  
  2210  	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
  2211  	if needsContinue {
  2212  		rp.header.Del("Expect")
  2213  	}
  2214  	// Merge Cookie headers into one "; "-delimited value.
  2215  	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
  2216  		rp.header.Set("Cookie", strings.Join(cookies, "; "))
  2217  	}
  2218  
  2219  	// Setup Trailers
  2220  	var trailer http.Header
  2221  	for _, v := range rp.header["Trailer"] {
  2222  		for _, key := range strings.Split(v, ",") {
  2223  			key = http.CanonicalHeaderKey(textproto.TrimString(key))
  2224  			switch key {
  2225  			case "Transfer-Encoding", "Trailer", "Content-Length":
  2226  				// Bogus. (copy of http1 rules)
  2227  				// Ignore.
  2228  			default:
  2229  				if trailer == nil {
  2230  					trailer = make(http.Header)
  2231  				}
  2232  				trailer[key] = nil
  2233  			}
  2234  		}
  2235  	}
  2236  	delete(rp.header, "Trailer")
  2237  
  2238  	var url_ *url.URL
  2239  	var requestURI string
  2240  	if rp.method == "CONNECT" {
  2241  		url_ = &url.URL{Host: rp.authority}
  2242  		requestURI = rp.authority // mimic HTTP/1 server behavior
  2243  	} else {
  2244  		var err error
  2245  		url_, err = url.ParseRequestURI(rp.path)
  2246  		if err != nil {
  2247  			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
  2248  		}
  2249  		requestURI = rp.path
  2250  	}
  2251  
  2252  	body := &requestBody{
  2253  		conn:          sc,
  2254  		stream:        st,
  2255  		needsContinue: needsContinue,
  2256  	}
  2257  	req := &http.Request{
  2258  		Method:     rp.method,
  2259  		URL:        url_,
  2260  		RemoteAddr: sc.remoteAddrStr,
  2261  		Header:     rp.header,
  2262  		RequestURI: requestURI,
  2263  		Proto:      "HTTP/2.0",
  2264  		ProtoMajor: 2,
  2265  		ProtoMinor: 0,
  2266  		TLS:        tlsState,
  2267  		Host:       rp.authority,
  2268  		Body:       body,
  2269  		Trailer:    trailer,
  2270  	}
  2271  	req = req.WithContext(st.ctx)
  2272  
  2273  	rw := sc.newResponseWriter(st, req)
  2274  	return rw, req, nil
  2275  }
  2276  
  2277  func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
  2278  	rws := responseWriterStatePool.Get().(*responseWriterState)
  2279  	bwSave := rws.bw
  2280  	*rws = responseWriterState{} // zero all the fields
  2281  	rws.conn = sc
  2282  	rws.bw = bwSave
  2283  	rws.bw.Reset(chunkWriter{rws})
  2284  	rws.stream = st
  2285  	rws.req = req
  2286  	return &responseWriter{rws: rws}
  2287  }
  2288  
  2289  type unstartedHandler struct {
  2290  	streamID uint32
  2291  	rw       *responseWriter
  2292  	req      *http.Request
  2293  	handler  func(http.ResponseWriter, *http.Request)
  2294  }
  2295  
  2296  // scheduleHandler starts a handler goroutine,
  2297  // or schedules one to start as soon as an existing handler finishes.
  2298  func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
  2299  	sc.serveG.check()
  2300  	maxHandlers := sc.advMaxStreams
  2301  	if sc.curHandlers < maxHandlers {
  2302  		sc.curHandlers++
  2303  		go sc.runHandler(rw, req, handler)
  2304  		return nil
  2305  	}
  2306  	if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
  2307  		return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
  2308  	}
  2309  	sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
  2310  		streamID: streamID,
  2311  		rw:       rw,
  2312  		req:      req,
  2313  		handler:  handler,
  2314  	})
  2315  	return nil
  2316  }
  2317  
  2318  func (sc *serverConn) handlerDone() {
  2319  	sc.serveG.check()
  2320  	sc.curHandlers--
  2321  	i := 0
  2322  	maxHandlers := sc.advMaxStreams
  2323  	for ; i < len(sc.unstartedHandlers); i++ {
  2324  		u := sc.unstartedHandlers[i]
  2325  		if sc.streams[u.streamID] == nil {
  2326  			// This stream was reset before its goroutine had a chance to start.
  2327  			continue
  2328  		}
  2329  		if sc.curHandlers >= maxHandlers {
  2330  			break
  2331  		}
  2332  		sc.curHandlers++
  2333  		go sc.runHandler(u.rw, u.req, u.handler)
  2334  		sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
  2335  	}
  2336  	sc.unstartedHandlers = sc.unstartedHandlers[i:]
  2337  	if len(sc.unstartedHandlers) == 0 {
  2338  		sc.unstartedHandlers = nil
  2339  	}
  2340  }
  2341  
  2342  // Run on its own goroutine.
  2343  func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
  2344  	defer sc.sendServeMsg(handlerDoneMsg)
  2345  	didPanic := true
  2346  	defer func() {
  2347  		rw.rws.stream.cancelCtx()
  2348  		if req.MultipartForm != nil {
  2349  			req.MultipartForm.RemoveAll()
  2350  		}
  2351  		if didPanic {
  2352  			e := recover()
  2353  			sc.writeFrameFromHandler(FrameWriteRequest{
  2354  				write:  handlerPanicRST{rw.rws.stream.id},
  2355  				stream: rw.rws.stream,
  2356  			})
  2357  			// Same as net/http:
  2358  			if e != nil && e != http.ErrAbortHandler {
  2359  				const size = 64 << 10
  2360  				buf := make([]byte, size)
  2361  				buf = buf[:runtime.Stack(buf, false)]
  2362  				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
  2363  			}
  2364  			return
  2365  		}
  2366  		rw.handlerDone()
  2367  	}()
  2368  	handler(rw, req)
  2369  	didPanic = false
  2370  }
  2371  
  2372  func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
  2373  	// 10.5.1 Limits on Header Block Size:
  2374  	// .. "A server that receives a larger header block than it is
  2375  	// willing to handle can send an HTTP 431 (Request Header Fields Too
  2376  	// Large) status code"
  2377  	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
  2378  	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
  2379  	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
  2380  }
  2381  
  2382  // called from handler goroutines.
  2383  // h may be nil.
  2384  func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
  2385  	sc.serveG.checkNotOn() // NOT on
  2386  	var errc chan error
  2387  	if headerData.h != nil {
  2388  		// If there's a header map (which we don't own), so we have to block on
  2389  		// waiting for this frame to be written, so an http.Flush mid-handler
  2390  		// writes out the correct value of keys, before a handler later potentially
  2391  		// mutates it.
  2392  		errc = errChanPool.Get().(chan error)
  2393  	}
  2394  	if err := sc.writeFrameFromHandler(FrameWriteRequest{
  2395  		write:  headerData,
  2396  		stream: st,
  2397  		done:   errc,
  2398  	}); err != nil {
  2399  		return err
  2400  	}
  2401  	if errc != nil {
  2402  		select {
  2403  		case err := <-errc:
  2404  			errChanPool.Put(errc)
  2405  			return err
  2406  		case <-sc.doneServing:
  2407  			return errClientDisconnected
  2408  		case <-st.cw:
  2409  			return errStreamClosed
  2410  		}
  2411  	}
  2412  	return nil
  2413  }
  2414  
  2415  // called from handler goroutines.
  2416  func (sc *serverConn) write100ContinueHeaders(st *stream) {
  2417  	sc.writeFrameFromHandler(FrameWriteRequest{
  2418  		write:  write100ContinueHeadersFrame{st.id},
  2419  		stream: st,
  2420  	})
  2421  }
  2422  
  2423  // A bodyReadMsg tells the server loop that the http.Handler read n
  2424  // bytes of the DATA from the client on the given stream.
  2425  type bodyReadMsg struct {
  2426  	st *stream
  2427  	n  int
  2428  }
  2429  
  2430  // called from handler goroutines.
  2431  // Notes that the handler for the given stream ID read n bytes of its body
  2432  // and schedules flow control tokens to be sent.
  2433  func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
  2434  	sc.serveG.checkNotOn() // NOT on
  2435  	if n > 0 {
  2436  		select {
  2437  		case sc.bodyReadCh <- bodyReadMsg{st, n}:
  2438  		case <-sc.doneServing:
  2439  		}
  2440  	}
  2441  }
  2442  
  2443  func (sc *serverConn) noteBodyRead(st *stream, n int) {
  2444  	sc.serveG.check()
  2445  	sc.sendWindowUpdate(nil, n) // conn-level
  2446  	if st.state != stateHalfClosedRemote && st.state != stateClosed {
  2447  		// Don't send this WINDOW_UPDATE if the stream is closed
  2448  		// remotely.
  2449  		sc.sendWindowUpdate(st, n)
  2450  	}
  2451  }
  2452  
  2453  // st may be nil for conn-level
  2454  func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
  2455  	sc.sendWindowUpdate(st, int(n))
  2456  }
  2457  
  2458  // st may be nil for conn-level
  2459  func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
  2460  	sc.serveG.check()
  2461  	var streamID uint32
  2462  	var send int32
  2463  	if st == nil {
  2464  		send = sc.inflow.add(n)
  2465  	} else {
  2466  		streamID = st.id
  2467  		send = st.inflow.add(n)
  2468  	}
  2469  	if send == 0 {
  2470  		return
  2471  	}
  2472  	sc.writeFrame(FrameWriteRequest{
  2473  		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
  2474  		stream: st,
  2475  	})
  2476  }
  2477  
  2478  // requestBody is the Handler's Request.Body type.
  2479  // Read and Close may be called concurrently.
  2480  type requestBody struct {
  2481  	_             incomparable
  2482  	stream        *stream
  2483  	conn          *serverConn
  2484  	closeOnce     sync.Once // for use by Close only
  2485  	sawEOF        bool      // for use by Read only
  2486  	pipe          *pipe     // non-nil if we have an HTTP entity message body
  2487  	needsContinue bool      // need to send a 100-continue
  2488  }
  2489  
  2490  func (b *requestBody) Close() error {
  2491  	b.closeOnce.Do(func() {
  2492  		if b.pipe != nil {
  2493  			b.pipe.BreakWithError(errClosedBody)
  2494  		}
  2495  	})
  2496  	return nil
  2497  }
  2498  
  2499  func (b *requestBody) Read(p []byte) (n int, err error) {
  2500  	if b.needsContinue {
  2501  		b.needsContinue = false
  2502  		b.conn.write100ContinueHeaders(b.stream)
  2503  	}
  2504  	if b.pipe == nil || b.sawEOF {
  2505  		return 0, io.EOF
  2506  	}
  2507  	n, err = b.pipe.Read(p)
  2508  	if err == io.EOF {
  2509  		b.sawEOF = true
  2510  	}
  2511  	if b.conn == nil && inTests {
  2512  		return
  2513  	}
  2514  	b.conn.noteBodyReadFromHandler(b.stream, n, err)
  2515  	return
  2516  }
  2517  
  2518  // responseWriter is the http.ResponseWriter implementation. It's
  2519  // intentionally small (1 pointer wide) to minimize garbage. The
  2520  // responseWriterState pointer inside is zeroed at the end of a
  2521  // request (in handlerDone) and calls on the responseWriter thereafter
  2522  // simply crash (caller's mistake), but the much larger responseWriterState
  2523  // and buffers are reused between multiple requests.
  2524  type responseWriter struct {
  2525  	rws *responseWriterState
  2526  }
  2527  
  2528  // Optional http.ResponseWriter interfaces implemented.
  2529  var (
  2530  	_ http.CloseNotifier = (*responseWriter)(nil)
  2531  	_ http.Flusher       = (*responseWriter)(nil)
  2532  	_ stringWriter       = (*responseWriter)(nil)
  2533  )
  2534  
  2535  type responseWriterState struct {
  2536  	// immutable within a request:
  2537  	stream *stream
  2538  	req    *http.Request
  2539  	conn   *serverConn
  2540  
  2541  	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
  2542  	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
  2543  
  2544  	// mutated by http.Handler goroutine:
  2545  	handlerHeader http.Header // nil until called
  2546  	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
  2547  	trailers      []string    // set in writeChunk
  2548  	status        int         // status code passed to WriteHeader
  2549  	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
  2550  	sentHeader    bool        // have we sent the header frame?
  2551  	handlerDone   bool        // handler has finished
  2552  
  2553  	sentContentLen int64 // non-zero if handler set a Content-Length header
  2554  	wroteBytes     int64
  2555  
  2556  	closeNotifierMu sync.Mutex // guards closeNotifierCh
  2557  	closeNotifierCh chan bool  // nil until first used
  2558  }
  2559  
  2560  type chunkWriter struct{ rws *responseWriterState }
  2561  
  2562  func (cw chunkWriter) Write(p []byte) (n int, err error) {
  2563  	n, err = cw.rws.writeChunk(p)
  2564  	if err == errStreamClosed {
  2565  		// If writing failed because the stream has been closed,
  2566  		// return the reason it was closed.
  2567  		err = cw.rws.stream.closeErr
  2568  	}
  2569  	return n, err
  2570  }
  2571  
  2572  func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
  2573  
  2574  func (rws *responseWriterState) hasNonemptyTrailers() bool {
  2575  	for _, trailer := range rws.trailers {
  2576  		if _, ok := rws.handlerHeader[trailer]; ok {
  2577  			return true
  2578  		}
  2579  	}
  2580  	return false
  2581  }
  2582  
  2583  // declareTrailer is called for each Trailer header when the
  2584  // response header is written. It notes that a header will need to be
  2585  // written in the trailers at the end of the response.
  2586  func (rws *responseWriterState) declareTrailer(k string) {
  2587  	k = http.CanonicalHeaderKey(k)
  2588  	if !httpguts.ValidTrailerHeader(k) {
  2589  		// Forbidden by RFC 7230, section 4.1.2.
  2590  		rws.conn.logf("ignoring invalid trailer %q", k)
  2591  		return
  2592  	}
  2593  	if !strSliceContains(rws.trailers, k) {
  2594  		rws.trailers = append(rws.trailers, k)
  2595  	}
  2596  }
  2597  
  2598  // writeChunk writes chunks from the bufio.Writer. But because
  2599  // bufio.Writer may bypass its chunking, sometimes p may be
  2600  // arbitrarily large.
  2601  //
  2602  // writeChunk is also responsible (on the first chunk) for sending the
  2603  // HEADER response.
  2604  func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
  2605  	if !rws.wroteHeader {
  2606  		rws.writeHeader(200)
  2607  	}
  2608  
  2609  	if rws.handlerDone {
  2610  		rws.promoteUndeclaredTrailers()
  2611  	}
  2612  
  2613  	isHeadResp := rws.req.Method == "HEAD"
  2614  	if !rws.sentHeader {
  2615  		rws.sentHeader = true
  2616  		var ctype, clen string
  2617  		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
  2618  			rws.snapHeader.Del("Content-Length")
  2619  			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
  2620  				rws.sentContentLen = int64(cl)
  2621  			} else {
  2622  				clen = ""
  2623  			}
  2624  		}
  2625  		_, hasContentLength := rws.snapHeader["Content-Length"]
  2626  		if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
  2627  			clen = strconv.Itoa(len(p))
  2628  		}
  2629  		_, hasContentType := rws.snapHeader["Content-Type"]
  2630  		// If the Content-Encoding is non-blank, we shouldn't
  2631  		// sniff the body. See Issue golang.org/issue/31753.
  2632  		ce := rws.snapHeader.Get("Content-Encoding")
  2633  		hasCE := len(ce) > 0
  2634  		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
  2635  			ctype = http.DetectContentType(p)
  2636  		}
  2637  		var date string
  2638  		if _, ok := rws.snapHeader["Date"]; !ok {
  2639  			// TODO(bradfitz): be faster here, like net/http? measure.
  2640  			date = time.Now().UTC().Format(http.TimeFormat)
  2641  		}
  2642  
  2643  		for _, v := range rws.snapHeader["Trailer"] {
  2644  			foreachHeaderElement(v, rws.declareTrailer)
  2645  		}
  2646  
  2647  		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
  2648  		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
  2649  		// down the TCP connection when idle, like we do for HTTP/1.
  2650  		// TODO: remove more Connection-specific header fields here, in addition
  2651  		// to "Connection".
  2652  		if _, ok := rws.snapHeader["Connection"]; ok {
  2653  			v := rws.snapHeader.Get("Connection")
  2654  			delete(rws.snapHeader, "Connection")
  2655  			if v == "close" {
  2656  				rws.conn.startGracefulShutdown()
  2657  			}
  2658  		}
  2659  
  2660  		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
  2661  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2662  			streamID:      rws.stream.id,
  2663  			httpResCode:   rws.status,
  2664  			h:             rws.snapHeader,
  2665  			endStream:     endStream,
  2666  			contentType:   ctype,
  2667  			contentLength: clen,
  2668  			date:          date,
  2669  		})
  2670  		if err != nil {
  2671  			return 0, err
  2672  		}
  2673  		if endStream {
  2674  			return 0, nil
  2675  		}
  2676  	}
  2677  	if isHeadResp {
  2678  		return len(p), nil
  2679  	}
  2680  	if len(p) == 0 && !rws.handlerDone {
  2681  		return 0, nil
  2682  	}
  2683  
  2684  	// only send trailers if they have actually been defined by the
  2685  	// server handler.
  2686  	hasNonemptyTrailers := rws.hasNonemptyTrailers()
  2687  	endStream := rws.handlerDone && !hasNonemptyTrailers
  2688  	if len(p) > 0 || endStream {
  2689  		// only send a 0 byte DATA frame if we're ending the stream.
  2690  		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
  2691  			return 0, err
  2692  		}
  2693  	}
  2694  
  2695  	if rws.handlerDone && hasNonemptyTrailers {
  2696  		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2697  			streamID:  rws.stream.id,
  2698  			h:         rws.handlerHeader,
  2699  			trailers:  rws.trailers,
  2700  			endStream: true,
  2701  		})
  2702  		return len(p), err
  2703  	}
  2704  	return len(p), nil
  2705  }
  2706  
  2707  // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
  2708  // that, if present, signals that the map entry is actually for
  2709  // the response trailers, and not the response headers. The prefix
  2710  // is stripped after the ServeHTTP call finishes and the values are
  2711  // sent in the trailers.
  2712  //
  2713  // This mechanism is intended only for trailers that are not known
  2714  // prior to the headers being written. If the set of trailers is fixed
  2715  // or known before the header is written, the normal Go trailers mechanism
  2716  // is preferred:
  2717  //
  2718  //	https://golang.org/pkg/net/http/#ResponseWriter
  2719  //	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
  2720  const TrailerPrefix = "Trailer:"
  2721  
  2722  // promoteUndeclaredTrailers permits http.Handlers to set trailers
  2723  // after the header has already been flushed. Because the Go
  2724  // ResponseWriter interface has no way to set Trailers (only the
  2725  // Header), and because we didn't want to expand the ResponseWriter
  2726  // interface, and because nobody used trailers, and because RFC 7230
  2727  // says you SHOULD (but not must) predeclare any trailers in the
  2728  // header, the official ResponseWriter rules said trailers in Go must
  2729  // be predeclared, and then we reuse the same ResponseWriter.Header()
  2730  // map to mean both Headers and Trailers. When it's time to write the
  2731  // Trailers, we pick out the fields of Headers that were declared as
  2732  // trailers. That worked for a while, until we found the first major
  2733  // user of Trailers in the wild: gRPC (using them only over http2),
  2734  // and gRPC libraries permit setting trailers mid-stream without
  2735  // predeclaring them. So: change of plans. We still permit the old
  2736  // way, but we also permit this hack: if a Header() key begins with
  2737  // "Trailer:", the suffix of that key is a Trailer. Because ':' is an
  2738  // invalid token byte anyway, there is no ambiguity. (And it's already
  2739  // filtered out) It's mildly hacky, but not terrible.
  2740  //
  2741  // This method runs after the Handler is done and promotes any Header
  2742  // fields to be trailers.
  2743  func (rws *responseWriterState) promoteUndeclaredTrailers() {
  2744  	for k, vv := range rws.handlerHeader {
  2745  		if !strings.HasPrefix(k, TrailerPrefix) {
  2746  			continue
  2747  		}
  2748  		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
  2749  		rws.declareTrailer(trailerKey)
  2750  		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
  2751  	}
  2752  
  2753  	if len(rws.trailers) > 1 {
  2754  		sorter := sorterPool.Get().(*sorter)
  2755  		sorter.SortStrings(rws.trailers)
  2756  		sorterPool.Put(sorter)
  2757  	}
  2758  }
  2759  
  2760  func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
  2761  	st := w.rws.stream
  2762  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2763  		// If we're setting a deadline in the past, reset the stream immediately
  2764  		// so writes after SetWriteDeadline returns will fail.
  2765  		st.onReadTimeout()
  2766  		return nil
  2767  	}
  2768  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2769  		if st.readDeadline != nil {
  2770  			if !st.readDeadline.Stop() {
  2771  				// Deadline already exceeded, or stream has been closed.
  2772  				return
  2773  			}
  2774  		}
  2775  		if deadline.IsZero() {
  2776  			st.readDeadline = nil
  2777  		} else if st.readDeadline == nil {
  2778  			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
  2779  		} else {
  2780  			st.readDeadline.Reset(deadline.Sub(time.Now()))
  2781  		}
  2782  	})
  2783  	return nil
  2784  }
  2785  
  2786  func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
  2787  	st := w.rws.stream
  2788  	if !deadline.IsZero() && deadline.Before(time.Now()) {
  2789  		// If we're setting a deadline in the past, reset the stream immediately
  2790  		// so writes after SetWriteDeadline returns will fail.
  2791  		st.onWriteTimeout()
  2792  		return nil
  2793  	}
  2794  	w.rws.conn.sendServeMsg(func(sc *serverConn) {
  2795  		if st.writeDeadline != nil {
  2796  			if !st.writeDeadline.Stop() {
  2797  				// Deadline already exceeded, or stream has been closed.
  2798  				return
  2799  			}
  2800  		}
  2801  		if deadline.IsZero() {
  2802  			st.writeDeadline = nil
  2803  		} else if st.writeDeadline == nil {
  2804  			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
  2805  		} else {
  2806  			st.writeDeadline.Reset(deadline.Sub(time.Now()))
  2807  		}
  2808  	})
  2809  	return nil
  2810  }
  2811  
  2812  func (w *responseWriter) Flush() {
  2813  	w.FlushError()
  2814  }
  2815  
  2816  func (w *responseWriter) FlushError() error {
  2817  	rws := w.rws
  2818  	if rws == nil {
  2819  		panic("Header called after Handler finished")
  2820  	}
  2821  	var err error
  2822  	if rws.bw.Buffered() > 0 {
  2823  		err = rws.bw.Flush()
  2824  	} else {
  2825  		// The bufio.Writer won't call chunkWriter.Write
  2826  		// (writeChunk with zero bytes), so we have to do it
  2827  		// ourselves to force the HTTP response header and/or
  2828  		// final DATA frame (with END_STREAM) to be sent.
  2829  		_, err = chunkWriter{rws}.Write(nil)
  2830  		if err == nil {
  2831  			select {
  2832  			case <-rws.stream.cw:
  2833  				err = rws.stream.closeErr
  2834  			default:
  2835  			}
  2836  		}
  2837  	}
  2838  	return err
  2839  }
  2840  
  2841  func (w *responseWriter) CloseNotify() <-chan bool {
  2842  	rws := w.rws
  2843  	if rws == nil {
  2844  		panic("CloseNotify called after Handler finished")
  2845  	}
  2846  	rws.closeNotifierMu.Lock()
  2847  	ch := rws.closeNotifierCh
  2848  	if ch == nil {
  2849  		ch = make(chan bool, 1)
  2850  		rws.closeNotifierCh = ch
  2851  		cw := rws.stream.cw
  2852  		go func() {
  2853  			cw.Wait() // wait for close
  2854  			ch <- true
  2855  		}()
  2856  	}
  2857  	rws.closeNotifierMu.Unlock()
  2858  	return ch
  2859  }
  2860  
  2861  func (w *responseWriter) Header() http.Header {
  2862  	rws := w.rws
  2863  	if rws == nil {
  2864  		panic("Header called after Handler finished")
  2865  	}
  2866  	if rws.handlerHeader == nil {
  2867  		rws.handlerHeader = make(http.Header)
  2868  	}
  2869  	return rws.handlerHeader
  2870  }
  2871  
  2872  // checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
  2873  func checkWriteHeaderCode(code int) {
  2874  	// Issue 22880: require valid WriteHeader status codes.
  2875  	// For now we only enforce that it's three digits.
  2876  	// In the future we might block things over 599 (600 and above aren't defined
  2877  	// at http://httpwg.org/specs/rfc7231.html#status.codes).
  2878  	// But for now any three digits.
  2879  	//
  2880  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  2881  	// no equivalent bogus thing we can realistically send in HTTP/2,
  2882  	// so we'll consistently panic instead and help people find their bugs
  2883  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  2884  	if code < 100 || code > 999 {
  2885  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  2886  	}
  2887  }
  2888  
  2889  func (w *responseWriter) WriteHeader(code int) {
  2890  	rws := w.rws
  2891  	if rws == nil {
  2892  		panic("WriteHeader called after Handler finished")
  2893  	}
  2894  	rws.writeHeader(code)
  2895  }
  2896  
  2897  func (rws *responseWriterState) writeHeader(code int) {
  2898  	if rws.wroteHeader {
  2899  		return
  2900  	}
  2901  
  2902  	checkWriteHeaderCode(code)
  2903  
  2904  	// Handle informational headers
  2905  	if code >= 100 && code <= 199 {
  2906  		// Per RFC 8297 we must not clear the current header map
  2907  		h := rws.handlerHeader
  2908  
  2909  		_, cl := h["Content-Length"]
  2910  		_, te := h["Transfer-Encoding"]
  2911  		if cl || te {
  2912  			h = h.Clone()
  2913  			h.Del("Content-Length")
  2914  			h.Del("Transfer-Encoding")
  2915  		}
  2916  
  2917  		rws.conn.writeHeaders(rws.stream, &writeResHeaders{
  2918  			streamID:    rws.stream.id,
  2919  			httpResCode: code,
  2920  			h:           h,
  2921  			endStream:   rws.handlerDone && !rws.hasTrailers(),
  2922  		})
  2923  
  2924  		return
  2925  	}
  2926  
  2927  	rws.wroteHeader = true
  2928  	rws.status = code
  2929  	if len(rws.handlerHeader) > 0 {
  2930  		rws.snapHeader = cloneHeader(rws.handlerHeader)
  2931  	}
  2932  }
  2933  
  2934  func cloneHeader(h http.Header) http.Header {
  2935  	h2 := make(http.Header, len(h))
  2936  	for k, vv := range h {
  2937  		vv2 := make([]string, len(vv))
  2938  		copy(vv2, vv)
  2939  		h2[k] = vv2
  2940  	}
  2941  	return h2
  2942  }
  2943  
  2944  // The Life Of A Write is like this:
  2945  //
  2946  // * Handler calls w.Write or w.WriteString ->
  2947  // * -> rws.bw (*bufio.Writer) ->
  2948  // * (Handler might call Flush)
  2949  // * -> chunkWriter{rws}
  2950  // * -> responseWriterState.writeChunk(p []byte)
  2951  // * -> responseWriterState.writeChunk (most of the magic; see comment there)
  2952  func (w *responseWriter) Write(p []byte) (n int, err error) {
  2953  	return w.write(len(p), p, "")
  2954  }
  2955  
  2956  func (w *responseWriter) WriteString(s string) (n int, err error) {
  2957  	return w.write(len(s), nil, s)
  2958  }
  2959  
  2960  // either dataB or dataS is non-zero.
  2961  func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  2962  	rws := w.rws
  2963  	if rws == nil {
  2964  		panic("Write called after Handler finished")
  2965  	}
  2966  	if !rws.wroteHeader {
  2967  		w.WriteHeader(200)
  2968  	}
  2969  	if !bodyAllowedForStatus(rws.status) {
  2970  		return 0, http.ErrBodyNotAllowed
  2971  	}
  2972  	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
  2973  	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
  2974  		// TODO: send a RST_STREAM
  2975  		return 0, errors.New("http2: handler wrote more than declared Content-Length")
  2976  	}
  2977  
  2978  	if dataB != nil {
  2979  		return rws.bw.Write(dataB)
  2980  	} else {
  2981  		return rws.bw.WriteString(dataS)
  2982  	}
  2983  }
  2984  
  2985  func (w *responseWriter) handlerDone() {
  2986  	rws := w.rws
  2987  	rws.handlerDone = true
  2988  	w.Flush()
  2989  	w.rws = nil
  2990  	responseWriterStatePool.Put(rws)
  2991  }
  2992  
  2993  // Push errors.
  2994  var (
  2995  	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
  2996  	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
  2997  )
  2998  
  2999  var _ http.Pusher = (*responseWriter)(nil)
  3000  
  3001  func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
  3002  	st := w.rws.stream
  3003  	sc := st.sc
  3004  	sc.serveG.checkNotOn()
  3005  
  3006  	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
  3007  	// http://tools.ietf.org/html/rfc7540#section-6.6
  3008  	if st.isPushed() {
  3009  		return ErrRecursivePush
  3010  	}
  3011  
  3012  	if opts == nil {
  3013  		opts = new(http.PushOptions)
  3014  	}
  3015  
  3016  	// Default options.
  3017  	if opts.Method == "" {
  3018  		opts.Method = "GET"
  3019  	}
  3020  	if opts.Header == nil {
  3021  		opts.Header = http.Header{}
  3022  	}
  3023  	wantScheme := "http"
  3024  	if w.rws.req.TLS != nil {
  3025  		wantScheme = "https"
  3026  	}
  3027  
  3028  	// Validate the request.
  3029  	u, err := url.Parse(target)
  3030  	if err != nil {
  3031  		return err
  3032  	}
  3033  	if u.Scheme == "" {
  3034  		if !strings.HasPrefix(target, "/") {
  3035  			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
  3036  		}
  3037  		u.Scheme = wantScheme
  3038  		u.Host = w.rws.req.Host
  3039  	} else {
  3040  		if u.Scheme != wantScheme {
  3041  			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
  3042  		}
  3043  		if u.Host == "" {
  3044  			return errors.New("URL must have a host")
  3045  		}
  3046  	}
  3047  	for k := range opts.Header {
  3048  		if strings.HasPrefix(k, ":") {
  3049  			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
  3050  		}
  3051  		// These headers are meaningful only if the request has a body,
  3052  		// but PUSH_PROMISE requests cannot have a body.
  3053  		// http://tools.ietf.org/html/rfc7540#section-8.2
  3054  		// Also disallow Host, since the promised URL must be absolute.
  3055  		if asciiEqualFold(k, "content-length") ||
  3056  			asciiEqualFold(k, "content-encoding") ||
  3057  			asciiEqualFold(k, "trailer") ||
  3058  			asciiEqualFold(k, "te") ||
  3059  			asciiEqualFold(k, "expect") ||
  3060  			asciiEqualFold(k, "host") {
  3061  			return fmt.Errorf("promised request headers cannot include %q", k)
  3062  		}
  3063  	}
  3064  	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
  3065  		return err
  3066  	}
  3067  
  3068  	// The RFC effectively limits promised requests to GET and HEAD:
  3069  	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
  3070  	// http://tools.ietf.org/html/rfc7540#section-8.2
  3071  	if opts.Method != "GET" && opts.Method != "HEAD" {
  3072  		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
  3073  	}
  3074  
  3075  	msg := &startPushRequest{
  3076  		parent: st,
  3077  		method: opts.Method,
  3078  		url:    u,
  3079  		header: cloneHeader(opts.Header),
  3080  		done:   errChanPool.Get().(chan error),
  3081  	}
  3082  
  3083  	select {
  3084  	case <-sc.doneServing:
  3085  		return errClientDisconnected
  3086  	case <-st.cw:
  3087  		return errStreamClosed
  3088  	case sc.serveMsgCh <- msg:
  3089  	}
  3090  
  3091  	select {
  3092  	case <-sc.doneServing:
  3093  		return errClientDisconnected
  3094  	case <-st.cw:
  3095  		return errStreamClosed
  3096  	case err := <-msg.done:
  3097  		errChanPool.Put(msg.done)
  3098  		return err
  3099  	}
  3100  }
  3101  
  3102  type startPushRequest struct {
  3103  	parent *stream
  3104  	method string
  3105  	url    *url.URL
  3106  	header http.Header
  3107  	done   chan error
  3108  }
  3109  
  3110  func (sc *serverConn) startPush(msg *startPushRequest) {
  3111  	sc.serveG.check()
  3112  
  3113  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3114  	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
  3115  	// is in either the "open" or "half-closed (remote)" state.
  3116  	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
  3117  		// responseWriter.Push checks that the stream is peer-initiated.
  3118  		msg.done <- errStreamClosed
  3119  		return
  3120  	}
  3121  
  3122  	// http://tools.ietf.org/html/rfc7540#section-6.6.
  3123  	if !sc.pushEnabled {
  3124  		msg.done <- http.ErrNotSupported
  3125  		return
  3126  	}
  3127  
  3128  	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
  3129  	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
  3130  	// is written. Once the ID is allocated, we start the request handler.
  3131  	allocatePromisedID := func() (uint32, error) {
  3132  		sc.serveG.check()
  3133  
  3134  		// Check this again, just in case. Technically, we might have received
  3135  		// an updated SETTINGS by the time we got around to writing this frame.
  3136  		if !sc.pushEnabled {
  3137  			return 0, http.ErrNotSupported
  3138  		}
  3139  		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
  3140  		if sc.curPushedStreams+1 > sc.clientMaxStreams {
  3141  			return 0, ErrPushLimitReached
  3142  		}
  3143  
  3144  		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
  3145  		// Streams initiated by the server MUST use even-numbered identifiers.
  3146  		// A server that is unable to establish a new stream identifier can send a GOAWAY
  3147  		// frame so that the client is forced to open a new connection for new streams.
  3148  		if sc.maxPushPromiseID+2 >= 1<<31 {
  3149  			sc.startGracefulShutdownInternal()
  3150  			return 0, ErrPushLimitReached
  3151  		}
  3152  		sc.maxPushPromiseID += 2
  3153  		promisedID := sc.maxPushPromiseID
  3154  
  3155  		// http://tools.ietf.org/html/rfc7540#section-8.2.
  3156  		// Strictly speaking, the new stream should start in "reserved (local)", then
  3157  		// transition to "half closed (remote)" after sending the initial HEADERS, but
  3158  		// we start in "half closed (remote)" for simplicity.
  3159  		// See further comments at the definition of stateHalfClosedRemote.
  3160  		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
  3161  		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
  3162  			method:    msg.method,
  3163  			scheme:    msg.url.Scheme,
  3164  			authority: msg.url.Host,
  3165  			path:      msg.url.RequestURI(),
  3166  			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
  3167  		})
  3168  		if err != nil {
  3169  			// Should not happen, since we've already validated msg.url.
  3170  			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
  3171  		}
  3172  
  3173  		sc.curHandlers++
  3174  		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
  3175  		return promisedID, nil
  3176  	}
  3177  
  3178  	sc.writeFrame(FrameWriteRequest{
  3179  		write: &writePushPromise{
  3180  			streamID:           msg.parent.id,
  3181  			method:             msg.method,
  3182  			url:                msg.url,
  3183  			h:                  msg.header,
  3184  			allocatePromisedID: allocatePromisedID,
  3185  		},
  3186  		stream: msg.parent,
  3187  		done:   msg.done,
  3188  	})
  3189  }
  3190  
  3191  // foreachHeaderElement splits v according to the "#rule" construction
  3192  // in RFC 7230 section 7 and calls fn for each non-empty element.
  3193  func foreachHeaderElement(v string, fn func(string)) {
  3194  	v = textproto.TrimString(v)
  3195  	if v == "" {
  3196  		return
  3197  	}
  3198  	if !strings.Contains(v, ",") {
  3199  		fn(v)
  3200  		return
  3201  	}
  3202  	for _, f := range strings.Split(v, ",") {
  3203  		if f = textproto.TrimString(f); f != "" {
  3204  			fn(f)
  3205  		}
  3206  	}
  3207  }
  3208  
  3209  // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
  3210  var connHeaders = []string{
  3211  	"Connection",
  3212  	"Keep-Alive",
  3213  	"Proxy-Connection",
  3214  	"Transfer-Encoding",
  3215  	"Upgrade",
  3216  }
  3217  
  3218  // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
  3219  // per RFC 7540 Section 8.1.2.2.
  3220  // The returned error is reported to users.
  3221  func checkValidHTTP2RequestHeaders(h http.Header) error {
  3222  	for _, k := range connHeaders {
  3223  		if _, ok := h[k]; ok {
  3224  			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
  3225  		}
  3226  	}
  3227  	te := h["Te"]
  3228  	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
  3229  		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
  3230  	}
  3231  	return nil
  3232  }
  3233  
  3234  func new400Handler(err error) http.HandlerFunc {
  3235  	return func(w http.ResponseWriter, r *http.Request) {
  3236  		http.Error(w, err.Error(), http.StatusBadRequest)
  3237  	}
  3238  }
  3239  
  3240  // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
  3241  // disabled. See comments on h1ServerShutdownChan above for why
  3242  // the code is written this way.
  3243  func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
  3244  	var x interface{} = hs
  3245  	type I interface {
  3246  		doKeepAlives() bool
  3247  	}
  3248  	if hs, ok := x.(I); ok {
  3249  		return !hs.doKeepAlives()
  3250  	}
  3251  	return false
  3252  }
  3253  
  3254  func (sc *serverConn) countError(name string, err error) error {
  3255  	if sc == nil || sc.srv == nil {
  3256  		return err
  3257  	}
  3258  	f := sc.srv.CountError
  3259  	if f == nil {
  3260  		return err
  3261  	}
  3262  	var typ string
  3263  	var code ErrCode
  3264  	switch e := err.(type) {
  3265  	case ConnectionError:
  3266  		typ = "conn"
  3267  		code = ErrCode(e)
  3268  	case StreamError:
  3269  		typ = "stream"
  3270  		code = ErrCode(e.Code)
  3271  	default:
  3272  		return err
  3273  	}
  3274  	codeStr := errCodeName[code]
  3275  	if codeStr == "" {
  3276  		codeStr = strconv.Itoa(int(code))
  3277  	}
  3278  	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
  3279  	return err
  3280  }