github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/net/http/server.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // HTTP server. See RFC 7230 through 7235.
     6  
     7  package http
     8  
     9  import (
    10  	"bufio"
    11  	"bytes"
    12  	"context"
    13  	"crypto/tls"
    14  	"errors"
    15  	"fmt"
    16  	"io"
    17  	"log"
    18  	"math/rand"
    19  	"net"
    20  	"net/http"
    21  	"net/textproto"
    22  	"net/url"
    23  	urlpkg "net/url"
    24  	"path"
    25  	"runtime"
    26  	"slices"
    27  	"strconv"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"golang.org/x/net/http/httpguts"
    34  )
    35  
    36  // Errors used by the HTTP server.
    37  var (
    38  	// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
    39  	// when the HTTP method or response code does not permit a
    40  	// body.
    41  	ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
    42  
    43  	// ErrHijacked is returned by ResponseWriter.Write calls when
    44  	// the underlying connection has been hijacked using the
    45  	// Hijacker interface. A zero-byte write on a hijacked
    46  	// connection will return ErrHijacked without any other side
    47  	// effects.
    48  	ErrHijacked = errors.New("http: connection has been hijacked")
    49  
    50  	// ErrContentLength is returned by ResponseWriter.Write calls
    51  	// when a Handler set a Content-Length response header with a
    52  	// declared size and then attempted to write more bytes than
    53  	// declared.
    54  	ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
    55  
    56  	// Deprecated: ErrWriteAfterFlush is no longer returned by
    57  	// anything in the net/http package. Callers should not
    58  	// compare errors against this variable.
    59  	ErrWriteAfterFlush = errors.New("unused")
    60  )
    61  
    62  // A Handler responds to an HTTP request.
    63  //
    64  // [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter]
    65  // and then return. Returning signals that the request is finished; it
    66  // is not valid to use the [ResponseWriter] or read from the
    67  // [Request.Body] after or concurrently with the completion of the
    68  // ServeHTTP call.
    69  //
    70  // Depending on the HTTP client software, HTTP protocol version, and
    71  // any intermediaries between the client and the Go server, it may not
    72  // be possible to read from the [Request.Body] after writing to the
    73  // [ResponseWriter]. Cautious handlers should read the [Request.Body]
    74  // first, and then reply.
    75  //
    76  // Except for reading the body, handlers should not modify the
    77  // provided Request.
    78  //
    79  // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
    80  // that the effect of the panic was isolated to the active request.
    81  // It recovers the panic, logs a stack trace to the server error log,
    82  // and either closes the network connection or sends an HTTP/2
    83  // RST_STREAM, depending on the HTTP protocol. To abort a handler so
    84  // the client sees an interrupted response but the server doesn't log
    85  // an error, panic with the value [ErrAbortHandler].
    86  type Handler = http.Handler
    87  
    88  // A ResponseWriter interface is used by an HTTP handler to
    89  // construct an HTTP response.
    90  //
    91  // A ResponseWriter may not be used after [Handler.ServeHTTP] has returned.
    92  type ResponseWriter = http.ResponseWriter
    93  
    94  // The Flusher interface is implemented by ResponseWriters that allow
    95  // an HTTP handler to flush buffered data to the client.
    96  //
    97  // The default HTTP/1.x and HTTP/2 [ResponseWriter] implementations
    98  // support [Flusher], but ResponseWriter wrappers may not. Handlers
    99  // should always test for this ability at runtime.
   100  //
   101  // Note that even for ResponseWriters that support Flush,
   102  // if the client is connected through an HTTP proxy,
   103  // the buffered data may not reach the client until the response
   104  // completes.
   105  type Flusher = http.Flusher
   106  
   107  // The Hijacker interface is implemented by ResponseWriters that allow
   108  // an HTTP handler to take over the connection.
   109  //
   110  // The default [ResponseWriter] for HTTP/1.x connections supports
   111  // Hijacker, but HTTP/2 connections intentionally do not.
   112  // ResponseWriter wrappers may also not support Hijacker. Handlers
   113  // should always test for this ability at runtime.
   114  type Hijacker = http.Hijacker
   115  
   116  // The CloseNotifier interface is implemented by ResponseWriters which
   117  // allow detecting when the underlying connection has gone away.
   118  //
   119  // This mechanism can be used to cancel long operations on the server
   120  // if the client has disconnected before the response is ready.
   121  //
   122  // Deprecated: the CloseNotifier interface predates Go's context package.
   123  // New code should use [Request.Context] instead.
   124  type CloseNotifier interface {
   125  	// CloseNotify returns a channel that receives at most a
   126  	// single value (true) when the client connection has gone
   127  	// away.
   128  	//
   129  	// CloseNotify may wait to notify until Request.Body has been
   130  	// fully read.
   131  	//
   132  	// After the Handler has returned, there is no guarantee
   133  	// that the channel receives a value.
   134  	//
   135  	// If the protocol is HTTP/1.1 and CloseNotify is called while
   136  	// processing an idempotent request (such a GET) while
   137  	// HTTP/1.1 pipelining is in use, the arrival of a subsequent
   138  	// pipelined request may cause a value to be sent on the
   139  	// returned channel. In practice HTTP/1.1 pipelining is not
   140  	// enabled in browsers and not seen often in the wild. If this
   141  	// is a problem, use HTTP/2 or only use CloseNotify on methods
   142  	// such as POST.
   143  	CloseNotify() <-chan bool
   144  }
   145  
   146  var (
   147  	// ServerContextKey is a context key. It can be used in HTTP
   148  	// handlers with Context.Value to access the server that
   149  	// started the handler. The associated value will be of
   150  	// type *Server.
   151  	ServerContextKey = &contextKey{"http-server"}
   152  
   153  	// LocalAddrContextKey is a context key. It can be used in
   154  	// HTTP handlers with Context.Value to access the local
   155  	// address the connection arrived on.
   156  	// The associated value will be of type net.Addr.
   157  	LocalAddrContextKey = &contextKey{"local-addr"}
   158  )
   159  
   160  // A conn represents the server side of an HTTP connection.
   161  type conn struct {
   162  	// server is the server on which the connection arrived.
   163  	// Immutable; never nil.
   164  	server *Server
   165  
   166  	// cancelCtx cancels the connection-level context.
   167  	cancelCtx context.CancelFunc
   168  
   169  	// rwc is the underlying network connection.
   170  	// This is never wrapped by other types and is the value given out
   171  	// to CloseNotifier callers. It is usually of type *net.TCPConn or
   172  	// *tls.Conn.
   173  	rwc net.Conn
   174  
   175  	// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
   176  	// inside the Listener's Accept goroutine, as some implementations block.
   177  	// It is populated immediately inside the (*conn).serve goroutine.
   178  	// This is the value of a Handler's (*Request).RemoteAddr.
   179  	remoteAddr string
   180  
   181  	// tlsState is the TLS connection state when using TLS.
   182  	// nil means not TLS.
   183  	tlsState *tls.ConnectionState
   184  
   185  	// werr is set to the first write error to rwc.
   186  	// It is set via checkConnErrorWriter{w}, where bufw writes.
   187  	werr error
   188  
   189  	// r is bufr's read source. It's a wrapper around rwc that provides
   190  	// io.LimitedReader-style limiting (while reading request headers)
   191  	// and functionality to support CloseNotifier. See *connReader docs.
   192  	r *connReader
   193  
   194  	// bufr reads from r.
   195  	bufr *bufio.Reader
   196  
   197  	// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
   198  	bufw *bufio.Writer
   199  
   200  	// lastMethod is the method of the most recent request
   201  	// on this connection, if any.
   202  	lastMethod string
   203  
   204  	curReq atomic.Pointer[response] // (which has a Request in it)
   205  
   206  	curState atomic.Uint64 // packed (unixtime<<8|uint8(ConnState))
   207  
   208  	// mu guards hijackedv
   209  	mu sync.Mutex
   210  
   211  	// hijackedv is whether this connection has been hijacked
   212  	// by a Handler with the Hijacker interface.
   213  	// It is guarded by mu.
   214  	hijackedv bool
   215  }
   216  
   217  func (c *conn) hijacked() bool {
   218  	c.mu.Lock()
   219  	defer c.mu.Unlock()
   220  	return c.hijackedv
   221  }
   222  
   223  // c.mu must be held.
   224  func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
   225  	if c.hijackedv {
   226  		return nil, nil, ErrHijacked
   227  	}
   228  	c.r.abortPendingRead()
   229  
   230  	c.hijackedv = true
   231  	rwc = c.rwc
   232  	rwc.SetDeadline(time.Time{})
   233  
   234  	buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
   235  	if c.r.hasByte {
   236  		if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
   237  			return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
   238  		}
   239  	}
   240  	c.setState(rwc, StateHijacked, runHooks)
   241  	return
   242  }
   243  
   244  // This should be >= 512 bytes for DetectContentType,
   245  // but otherwise it's somewhat arbitrary.
   246  const bufferBeforeChunkingSize = 2048
   247  
   248  // chunkWriter writes to a response's conn buffer, and is the writer
   249  // wrapped by the response.w buffered writer.
   250  //
   251  // chunkWriter also is responsible for finalizing the Header, including
   252  // conditionally setting the Content-Type and setting a Content-Length
   253  // in cases where the handler's final output is smaller than the buffer
   254  // size. It also conditionally adds chunk headers, when in chunking mode.
   255  //
   256  // See the comment above (*response).Write for the entire write flow.
   257  type chunkWriter struct {
   258  	res *response
   259  
   260  	// header is either nil or a deep clone of res.handlerHeader
   261  	// at the time of res.writeHeader, if res.writeHeader is
   262  	// called and extra buffering is being done to calculate
   263  	// Content-Type and/or Content-Length.
   264  	header Header
   265  
   266  	// wroteHeader tells whether the header's been written to "the
   267  	// wire" (or rather: w.conn.buf). this is unlike
   268  	// (*response).wroteHeader, which tells only whether it was
   269  	// logically written.
   270  	wroteHeader bool
   271  
   272  	// set by the writeHeader method:
   273  	chunking bool // using chunked transfer encoding for reply body
   274  }
   275  
   276  var (
   277  	crlf       = []byte("\r\n")
   278  	colonSpace = []byte(": ")
   279  )
   280  
   281  func (cw *chunkWriter) Write(p []byte) (n int, err error) {
   282  	if !cw.wroteHeader {
   283  		cw.writeHeader(p)
   284  	}
   285  	if cw.res.req.Method == "HEAD" {
   286  		// Eat writes.
   287  		return len(p), nil
   288  	}
   289  	if cw.chunking {
   290  		_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
   291  		if err != nil {
   292  			cw.res.conn.rwc.Close()
   293  			return
   294  		}
   295  	}
   296  	n, err = cw.res.conn.bufw.Write(p)
   297  	if cw.chunking && err == nil {
   298  		_, err = cw.res.conn.bufw.Write(crlf)
   299  	}
   300  	if err != nil {
   301  		cw.res.conn.rwc.Close()
   302  	}
   303  	return
   304  }
   305  
   306  func (cw *chunkWriter) flush() error {
   307  	if !cw.wroteHeader {
   308  		cw.writeHeader(nil)
   309  	}
   310  	return cw.res.conn.bufw.Flush()
   311  }
   312  
   313  func (cw *chunkWriter) close() {
   314  	if !cw.wroteHeader {
   315  		cw.writeHeader(nil)
   316  	}
   317  	if cw.chunking {
   318  		bw := cw.res.conn.bufw // conn's bufio writer
   319  		// zero chunk to mark EOF
   320  		bw.WriteString("0\r\n")
   321  		if trailers := cw.res.finalTrailers(); trailers != nil {
   322  			trailers.Write(bw) // the writer handles noting errors
   323  		}
   324  		// final blank line after the trailers (whether
   325  		// present or not)
   326  		bw.WriteString("\r\n")
   327  	}
   328  }
   329  
   330  // A response represents the server side of an HTTP response.
   331  type response struct {
   332  	conn             *conn
   333  	req              *Request // request for this response
   334  	reqBody          io.ReadCloser
   335  	cancelCtx        context.CancelFunc // when ServeHTTP exits
   336  	wroteHeader      bool               // a non-1xx header has been (logically) written
   337  	wroteContinue    bool               // 100 Continue response was written
   338  	wants10KeepAlive bool               // HTTP/1.0 w/ Connection "keep-alive"
   339  	wantsClose       bool               // HTTP request has Connection "close"
   340  
   341  	// canWriteContinue is an atomic boolean that says whether or
   342  	// not a 100 Continue header can be written to the
   343  	// connection.
   344  	// writeContinueMu must be held while writing the header.
   345  	// These two fields together synchronize the body reader (the
   346  	// expectContinueReader, which wants to write 100 Continue)
   347  	// against the main writer.
   348  	canWriteContinue atomic.Bool
   349  	writeContinueMu  sync.Mutex
   350  
   351  	w  *bufio.Writer // buffers output in chunks to chunkWriter
   352  	cw chunkWriter
   353  
   354  	// handlerHeader is the Header that Handlers get access to,
   355  	// which may be retained and mutated even after WriteHeader.
   356  	// handlerHeader is copied into cw.header at WriteHeader
   357  	// time, and privately mutated thereafter.
   358  	handlerHeader Header
   359  	calledHeader  bool // handler accessed handlerHeader via Header
   360  
   361  	written       int64 // number of bytes written in body
   362  	contentLength int64 // explicitly-declared Content-Length; or -1
   363  	status        int   // status code passed to WriteHeader
   364  
   365  	// close connection after this reply.  set on request and
   366  	// updated after response from handler if there's a
   367  	// "Connection: keep-alive" response header and a
   368  	// Content-Length.
   369  	closeAfterReply bool
   370  
   371  	// When fullDuplex is false (the default), we consume any remaining
   372  	// request body before starting to write a response.
   373  	fullDuplex bool
   374  
   375  	// requestBodyLimitHit is set by requestTooLarge when
   376  	// maxBytesReader hits its max size. It is checked in
   377  	// WriteHeader, to make sure we don't consume the
   378  	// remaining request body to try to advance to the next HTTP
   379  	// request. Instead, when this is set, we stop reading
   380  	// subsequent requests on this connection and stop reading
   381  	// input from it.
   382  	requestBodyLimitHit bool
   383  
   384  	// trailers are the headers to be sent after the handler
   385  	// finishes writing the body. This field is initialized from
   386  	// the Trailer response header when the response header is
   387  	// written.
   388  	trailers []string
   389  
   390  	handlerDone atomic.Bool // set true when the handler exits
   391  
   392  	// Buffers for Date, Content-Length, and status code
   393  	dateBuf   [len(TimeFormat)]byte
   394  	clenBuf   [10]byte
   395  	statusBuf [3]byte
   396  
   397  	// closeNotifyCh is the channel returned by CloseNotify.
   398  	// TODO(bradfitz): this is currently (for Go 1.8) always
   399  	// non-nil. Make this lazily-created again as it used to be?
   400  	closeNotifyCh  chan bool
   401  	didCloseNotify atomic.Bool // atomic (only false->true winner should send)
   402  }
   403  
   404  func (c *response) SetReadDeadline(deadline time.Time) error {
   405  	return c.conn.rwc.SetReadDeadline(deadline)
   406  }
   407  
   408  func (c *response) SetWriteDeadline(deadline time.Time) error {
   409  	return c.conn.rwc.SetWriteDeadline(deadline)
   410  }
   411  
   412  func (c *response) EnableFullDuplex() error {
   413  	c.fullDuplex = true
   414  	return nil
   415  }
   416  
   417  // TrailerPrefix is a magic prefix for [ResponseWriter.Header] map keys
   418  // that, if present, signals that the map entry is actually for
   419  // the response trailers, and not the response headers. The prefix
   420  // is stripped after the ServeHTTP call finishes and the values are
   421  // sent in the trailers.
   422  //
   423  // This mechanism is intended only for trailers that are not known
   424  // prior to the headers being written. If the set of trailers is fixed
   425  // or known before the header is written, the normal Go trailers mechanism
   426  // is preferred:
   427  //
   428  //	https://pkg.go.dev/net/http#ResponseWriter
   429  //	https://pkg.go.dev/net/http#example-ResponseWriter-Trailers
   430  const TrailerPrefix = "Trailer:"
   431  
   432  // finalTrailers is called after the Handler exits and returns a non-nil
   433  // value if the Handler set any trailers.
   434  func (w *response) finalTrailers() Header {
   435  	var t Header
   436  	for k, vv := range w.handlerHeader {
   437  		if kk, found := strings.CutPrefix(k, TrailerPrefix); found {
   438  			if t == nil {
   439  				t = make(Header)
   440  			}
   441  			t[kk] = vv
   442  		}
   443  	}
   444  	for _, k := range w.trailers {
   445  		if t == nil {
   446  			t = make(Header)
   447  		}
   448  		for _, v := range w.handlerHeader[k] {
   449  			t.Add(k, v)
   450  		}
   451  	}
   452  	return t
   453  }
   454  
   455  // declareTrailer is called for each Trailer header when the
   456  // response header is written. It notes that a header will need to be
   457  // written in the trailers at the end of the response.
   458  func (w *response) declareTrailer(k string) {
   459  	k = CanonicalHeaderKey(k)
   460  	if !httpguts.ValidTrailerHeader(k) {
   461  		// Forbidden by RFC 7230, section 4.1.2
   462  		return
   463  	}
   464  	w.trailers = append(w.trailers, k)
   465  }
   466  
   467  // requestTooLarge is called by maxBytesReader when too much input has
   468  // been read from the client.
   469  func (w *response) requestTooLarge() {
   470  	w.closeAfterReply = true
   471  	w.requestBodyLimitHit = true
   472  	if !w.wroteHeader {
   473  		w.Header().Set("Connection", "close")
   474  	}
   475  }
   476  
   477  // writerOnly hides an io.Writer value's optional ReadFrom method
   478  // from io.Copy.
   479  type writerOnly struct {
   480  	io.Writer
   481  }
   482  
   483  // ReadFrom is here to optimize copying from an [*os.File] regular file
   484  // to a [*net.TCPConn] with sendfile, or from a supported src type such
   485  // as a *net.TCPConn on Linux with splice.
   486  func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
   487  	buf := getCopyBuf()
   488  	defer putCopyBuf(buf)
   489  
   490  	// Our underlying w.conn.rwc is usually a *TCPConn (with its
   491  	// own ReadFrom method). If not, just fall back to the normal
   492  	// copy method.
   493  	rf, ok := w.conn.rwc.(io.ReaderFrom)
   494  	if !ok {
   495  		return io.CopyBuffer(writerOnly{w}, src, buf)
   496  	}
   497  
   498  	// Copy the first sniffLen bytes before switching to ReadFrom.
   499  	// This ensures we don't start writing the response before the
   500  	// source is available (see golang.org/issue/5660) and provides
   501  	// enough bytes to perform Content-Type sniffing when required.
   502  	if !w.cw.wroteHeader {
   503  		n0, err := io.CopyBuffer(writerOnly{w}, io.LimitReader(src, sniffLen), buf)
   504  		n += n0
   505  		if err != nil || n0 < sniffLen {
   506  			return n, err
   507  		}
   508  	}
   509  
   510  	w.w.Flush()  // get rid of any previous writes
   511  	w.cw.flush() // make sure Header is written; flush data to rwc
   512  
   513  	// Now that cw has been flushed, its chunking field is guaranteed initialized.
   514  	if !w.cw.chunking && w.bodyAllowed() {
   515  		n0, err := rf.ReadFrom(src)
   516  		n += n0
   517  		w.written += n0
   518  		return n, err
   519  	}
   520  
   521  	n0, err := io.CopyBuffer(writerOnly{w}, src, buf)
   522  	n += n0
   523  	return n, err
   524  }
   525  
   526  // debugServerConnections controls whether all server connections are wrapped
   527  // with a verbose logging wrapper.
   528  const debugServerConnections = false
   529  
   530  // Create new connection from rwc.
   531  func (srv *Server) newConn(rwc net.Conn) *conn {
   532  	c := &conn{
   533  		server: srv,
   534  		rwc:    rwc,
   535  	}
   536  	if debugServerConnections {
   537  		c.rwc = newLoggingConn("server", c.rwc)
   538  	}
   539  	return c
   540  }
   541  
   542  type readResult struct {
   543  	_   incomparable
   544  	n   int
   545  	err error
   546  	b   byte // byte read, if n == 1
   547  }
   548  
   549  // connReader is the io.Reader wrapper used by *conn. It combines a
   550  // selectively-activated io.LimitedReader (to bound request header
   551  // read sizes) with support for selectively keeping an io.Reader.Read
   552  // call blocked in a background goroutine to wait for activity and
   553  // trigger a CloseNotifier channel.
   554  type connReader struct {
   555  	conn *conn
   556  
   557  	mu      sync.Mutex // guards following
   558  	hasByte bool
   559  	byteBuf [1]byte
   560  	cond    *sync.Cond
   561  	inRead  bool
   562  	aborted bool  // set true before conn.rwc deadline is set to past
   563  	remain  int64 // bytes remaining
   564  }
   565  
   566  func (cr *connReader) lock() {
   567  	cr.mu.Lock()
   568  	if cr.cond == nil {
   569  		cr.cond = sync.NewCond(&cr.mu)
   570  	}
   571  }
   572  
   573  func (cr *connReader) unlock() { cr.mu.Unlock() }
   574  
   575  func (cr *connReader) startBackgroundRead() {
   576  	cr.lock()
   577  	defer cr.unlock()
   578  	if cr.inRead {
   579  		panic("invalid concurrent Body.Read call")
   580  	}
   581  	if cr.hasByte {
   582  		return
   583  	}
   584  	cr.inRead = true
   585  	cr.conn.rwc.SetReadDeadline(time.Time{})
   586  	go cr.backgroundRead()
   587  }
   588  
   589  func (cr *connReader) backgroundRead() {
   590  	n, err := cr.conn.rwc.Read(cr.byteBuf[:])
   591  	cr.lock()
   592  	if n == 1 {
   593  		cr.hasByte = true
   594  		// We were past the end of the previous request's body already
   595  		// (since we wouldn't be in a background read otherwise), so
   596  		// this is a pipelined HTTP request. Prior to Go 1.11 we used to
   597  		// send on the CloseNotify channel and cancel the context here,
   598  		// but the behavior was documented as only "may", and we only
   599  		// did that because that's how CloseNotify accidentally behaved
   600  		// in very early Go releases prior to context support. Once we
   601  		// added context support, people used a Handler's
   602  		// Request.Context() and passed it along. Having that context
   603  		// cancel on pipelined HTTP requests caused problems.
   604  		// Fortunately, almost nothing uses HTTP/1.x pipelining.
   605  		// Unfortunately, apt-get does, or sometimes does.
   606  		// New Go 1.11 behavior: don't fire CloseNotify or cancel
   607  		// contexts on pipelined requests. Shouldn't affect people, but
   608  		// fixes cases like Issue 23921. This does mean that a client
   609  		// closing their TCP connection after sending a pipelined
   610  		// request won't cancel the context, but we'll catch that on any
   611  		// write failure (in checkConnErrorWriter.Write).
   612  		// If the server never writes, yes, there are still contrived
   613  		// server & client behaviors where this fails to ever cancel the
   614  		// context, but that's kinda why HTTP/1.x pipelining died
   615  		// anyway.
   616  	}
   617  	if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
   618  		// Ignore this error. It's the expected error from
   619  		// another goroutine calling abortPendingRead.
   620  	} else if err != nil {
   621  		cr.handleReadError(err)
   622  	}
   623  	cr.aborted = false
   624  	cr.inRead = false
   625  	cr.unlock()
   626  	cr.cond.Broadcast()
   627  }
   628  
   629  func (cr *connReader) abortPendingRead() {
   630  	cr.lock()
   631  	defer cr.unlock()
   632  	if !cr.inRead {
   633  		return
   634  	}
   635  	cr.aborted = true
   636  	cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
   637  	for cr.inRead {
   638  		cr.cond.Wait()
   639  	}
   640  	cr.conn.rwc.SetReadDeadline(time.Time{})
   641  }
   642  
   643  func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
   644  func (cr *connReader) setInfiniteReadLimit()     { cr.remain = maxInt64 }
   645  func (cr *connReader) hitReadLimit() bool        { return cr.remain <= 0 }
   646  
   647  // handleReadError is called whenever a Read from the client returns a
   648  // non-nil error.
   649  //
   650  // The provided non-nil err is almost always io.EOF or a "use of
   651  // closed network connection". In any case, the error is not
   652  // particularly interesting, except perhaps for debugging during
   653  // development. Any error means the connection is dead and we should
   654  // down its context.
   655  //
   656  // It may be called from multiple goroutines.
   657  func (cr *connReader) handleReadError(_ error) {
   658  	cr.conn.cancelCtx()
   659  	cr.closeNotify()
   660  }
   661  
   662  // may be called from multiple goroutines.
   663  func (cr *connReader) closeNotify() {
   664  	res := cr.conn.curReq.Load()
   665  	if res != nil && !res.didCloseNotify.Swap(true) {
   666  		res.closeNotifyCh <- true
   667  	}
   668  }
   669  
   670  func (cr *connReader) Read(p []byte) (n int, err error) {
   671  	cr.lock()
   672  	if cr.inRead {
   673  		cr.unlock()
   674  		if cr.conn.hijacked() {
   675  			panic("invalid Body.Read call. After hijacked, the original Request must not be used")
   676  		}
   677  		panic("invalid concurrent Body.Read call")
   678  	}
   679  	if cr.hitReadLimit() {
   680  		cr.unlock()
   681  		return 0, io.EOF
   682  	}
   683  	if len(p) == 0 {
   684  		cr.unlock()
   685  		return 0, nil
   686  	}
   687  	if int64(len(p)) > cr.remain {
   688  		p = p[:cr.remain]
   689  	}
   690  	if cr.hasByte {
   691  		p[0] = cr.byteBuf[0]
   692  		cr.hasByte = false
   693  		cr.unlock()
   694  		return 1, nil
   695  	}
   696  	cr.inRead = true
   697  	cr.unlock()
   698  	n, err = cr.conn.rwc.Read(p)
   699  
   700  	cr.lock()
   701  	cr.inRead = false
   702  	if err != nil {
   703  		cr.handleReadError(err)
   704  	}
   705  	cr.remain -= int64(n)
   706  	cr.unlock()
   707  
   708  	cr.cond.Broadcast()
   709  	return n, err
   710  }
   711  
   712  var (
   713  	bufioReaderPool   sync.Pool
   714  	bufioWriter2kPool sync.Pool
   715  	bufioWriter4kPool sync.Pool
   716  )
   717  
   718  const copyBufPoolSize = 32 * 1024
   719  
   720  var copyBufPool = sync.Pool{New: func() any { return new([copyBufPoolSize]byte) }}
   721  
   722  func getCopyBuf() []byte {
   723  	return copyBufPool.Get().(*[copyBufPoolSize]byte)[:]
   724  }
   725  func putCopyBuf(b []byte) {
   726  	if len(b) != copyBufPoolSize {
   727  		panic("trying to put back buffer of the wrong size in the copyBufPool")
   728  	}
   729  	copyBufPool.Put((*[copyBufPoolSize]byte)(b))
   730  }
   731  
   732  func bufioWriterPool(size int) *sync.Pool {
   733  	switch size {
   734  	case 2 << 10:
   735  		return &bufioWriter2kPool
   736  	case 4 << 10:
   737  		return &bufioWriter4kPool
   738  	}
   739  	return nil
   740  }
   741  
   742  func newBufioReader(r io.Reader) *bufio.Reader {
   743  	if v := bufioReaderPool.Get(); v != nil {
   744  		br := v.(*bufio.Reader)
   745  		br.Reset(r)
   746  		return br
   747  	}
   748  	// Note: if this reader size is ever changed, update
   749  	// TestHandlerBodyClose's assumptions.
   750  	return bufio.NewReader(r)
   751  }
   752  
   753  func putBufioReader(br *bufio.Reader) {
   754  	br.Reset(nil)
   755  	bufioReaderPool.Put(br)
   756  }
   757  
   758  func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
   759  	pool := bufioWriterPool(size)
   760  	if pool != nil {
   761  		if v := pool.Get(); v != nil {
   762  			bw := v.(*bufio.Writer)
   763  			bw.Reset(w)
   764  			return bw
   765  		}
   766  	}
   767  	return bufio.NewWriterSize(w, size)
   768  }
   769  
   770  func putBufioWriter(bw *bufio.Writer) {
   771  	bw.Reset(nil)
   772  	if pool := bufioWriterPool(bw.Available()); pool != nil {
   773  		pool.Put(bw)
   774  	}
   775  }
   776  
   777  // DefaultMaxHeaderBytes is the maximum permitted size of the headers
   778  // in an HTTP request.
   779  // This can be overridden by setting [Server.MaxHeaderBytes].
   780  const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
   781  
   782  func (srv *Server) maxHeaderBytes() int {
   783  	if srv.MaxHeaderBytes > 0 {
   784  		return srv.MaxHeaderBytes
   785  	}
   786  	return DefaultMaxHeaderBytes
   787  }
   788  
   789  func (srv *Server) initialReadLimitSize() int64 {
   790  	return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
   791  }
   792  
   793  // tlsHandshakeTimeout returns the time limit permitted for the TLS
   794  // handshake, or zero for unlimited.
   795  //
   796  // It returns the minimum of any positive ReadHeaderTimeout,
   797  // ReadTimeout, or WriteTimeout.
   798  func (srv *Server) tlsHandshakeTimeout() time.Duration {
   799  	var ret time.Duration
   800  	for _, v := range [...]time.Duration{
   801  		srv.ReadHeaderTimeout,
   802  		srv.ReadTimeout,
   803  		srv.WriteTimeout,
   804  	} {
   805  		if v <= 0 {
   806  			continue
   807  		}
   808  		if ret == 0 || v < ret {
   809  			ret = v
   810  		}
   811  	}
   812  	return ret
   813  }
   814  
   815  // wrapper around io.ReadCloser which on first read, sends an
   816  // HTTP/1.1 100 Continue header
   817  type expectContinueReader struct {
   818  	resp       *response
   819  	readCloser io.ReadCloser
   820  	closed     atomic.Bool
   821  	sawEOF     atomic.Bool
   822  }
   823  
   824  func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
   825  	if ecr.closed.Load() {
   826  		return 0, ErrBodyReadAfterClose
   827  	}
   828  	w := ecr.resp
   829  	if !w.wroteContinue && w.canWriteContinue.Load() && !w.conn.hijacked() {
   830  		w.wroteContinue = true
   831  		w.writeContinueMu.Lock()
   832  		if w.canWriteContinue.Load() {
   833  			w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
   834  			w.conn.bufw.Flush()
   835  			w.canWriteContinue.Store(false)
   836  		}
   837  		w.writeContinueMu.Unlock()
   838  	}
   839  	n, err = ecr.readCloser.Read(p)
   840  	if err == io.EOF {
   841  		ecr.sawEOF.Store(true)
   842  	}
   843  	return
   844  }
   845  
   846  func (ecr *expectContinueReader) Close() error {
   847  	ecr.closed.Store(true)
   848  	return ecr.readCloser.Close()
   849  }
   850  
   851  // TimeFormat is the time format to use when generating times in HTTP
   852  // headers. It is like [time.RFC1123] but hard-codes GMT as the time
   853  // zone. The time being formatted must be in UTC for Format to
   854  // generate the correct format.
   855  //
   856  // For parsing this time format, see [ParseTime].
   857  const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
   858  
   859  // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
   860  func appendTime(b []byte, t time.Time) []byte {
   861  	const days = "SunMonTueWedThuFriSat"
   862  	const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
   863  
   864  	t = t.UTC()
   865  	yy, mm, dd := t.Date()
   866  	hh, mn, ss := t.Clock()
   867  	day := days[3*t.Weekday():]
   868  	mon := months[3*(mm-1):]
   869  
   870  	return append(b,
   871  		day[0], day[1], day[2], ',', ' ',
   872  		byte('0'+dd/10), byte('0'+dd%10), ' ',
   873  		mon[0], mon[1], mon[2], ' ',
   874  		byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
   875  		byte('0'+hh/10), byte('0'+hh%10), ':',
   876  		byte('0'+mn/10), byte('0'+mn%10), ':',
   877  		byte('0'+ss/10), byte('0'+ss%10), ' ',
   878  		'G', 'M', 'T')
   879  }
   880  
   881  var errTooLarge = errors.New("http: request too large")
   882  
   883  // Read next request from connection.
   884  func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
   885  	if c.hijacked() {
   886  		return nil, ErrHijacked
   887  	}
   888  
   889  	var (
   890  		wholeReqDeadline time.Time // or zero if none
   891  		hdrDeadline      time.Time // or zero if none
   892  	)
   893  	t0 := time.Now()
   894  	if d := c.server.readHeaderTimeout(); d > 0 {
   895  		hdrDeadline = t0.Add(d)
   896  	}
   897  	if d := c.server.ReadTimeout; d > 0 {
   898  		wholeReqDeadline = t0.Add(d)
   899  	}
   900  	c.rwc.SetReadDeadline(hdrDeadline)
   901  	if d := c.server.WriteTimeout; d > 0 {
   902  		defer func() {
   903  			c.rwc.SetWriteDeadline(time.Now().Add(d))
   904  		}()
   905  	}
   906  
   907  	c.r.setReadLimit(c.server.initialReadLimitSize())
   908  	if c.lastMethod == "POST" {
   909  		// RFC 7230 section 3 tolerance for old buggy clients.
   910  		peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
   911  		c.bufr.Discard(numLeadingCRorLF(peek))
   912  	}
   913  	req, err := readRequest(c.bufr)
   914  	if err != nil {
   915  		if c.r.hitReadLimit() {
   916  			return nil, errTooLarge
   917  		}
   918  		return nil, err
   919  	}
   920  
   921  	if !http1ServerSupportsRequest(req) {
   922  		return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
   923  	}
   924  
   925  	c.lastMethod = req.Method
   926  	c.r.setInfiniteReadLimit()
   927  
   928  	hosts, haveHost := req.Header["Host"]
   929  	isH2Upgrade := req.isH2Upgrade()
   930  	if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
   931  		return nil, badRequestError("missing required Host header")
   932  	}
   933  	if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
   934  		return nil, badRequestError("malformed Host header")
   935  	}
   936  	for k, vv := range req.Header {
   937  		if !httpguts.ValidHeaderFieldName(k) {
   938  			return nil, badRequestError("invalid header name")
   939  		}
   940  		for _, v := range vv {
   941  			if !httpguts.ValidHeaderFieldValue(v) {
   942  				return nil, badRequestError("invalid header value")
   943  			}
   944  		}
   945  	}
   946  	delete(req.Header, "Host")
   947  
   948  	ctx, cancelCtx := context.WithCancel(ctx)
   949  	req.ctx = ctx
   950  	req.RemoteAddr = c.remoteAddr
   951  	req.TLS = c.tlsState
   952  	if body, ok := req.Body.(*body); ok {
   953  		body.doEarlyClose = true
   954  	}
   955  
   956  	// Adjust the read deadline if necessary.
   957  	if !hdrDeadline.Equal(wholeReqDeadline) {
   958  		c.rwc.SetReadDeadline(wholeReqDeadline)
   959  	}
   960  
   961  	w = &response{
   962  		conn:          c,
   963  		cancelCtx:     cancelCtx,
   964  		req:           req,
   965  		reqBody:       req.Body,
   966  		handlerHeader: make(Header),
   967  		contentLength: -1,
   968  		closeNotifyCh: make(chan bool, 1),
   969  
   970  		// We populate these ahead of time so we're not
   971  		// reading from req.Header after their Handler starts
   972  		// and maybe mutates it (Issue 14940)
   973  		wants10KeepAlive: req.wantsHttp10KeepAlive(),
   974  		wantsClose:       req.wantsClose(),
   975  	}
   976  	if isH2Upgrade {
   977  		w.closeAfterReply = true
   978  	}
   979  	w.cw.res = w
   980  	w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
   981  	return w, nil
   982  }
   983  
   984  // http1ServerSupportsRequest reports whether Go's HTTP/1.x server
   985  // supports the given request.
   986  func http1ServerSupportsRequest(req *Request) bool {
   987  	if req.ProtoMajor == 1 {
   988  		return true
   989  	}
   990  	// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
   991  	// wire up their own HTTP/2 upgrades.
   992  	if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
   993  		req.Method == "PRI" && req.RequestURI == "*" {
   994  		return true
   995  	}
   996  	// Reject HTTP/0.x, and all other HTTP/2+ requests (which
   997  	// aren't encoded in ASCII anyway).
   998  	return false
   999  }
  1000  
  1001  func (w *response) Header() http.Header {
  1002  	if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
  1003  		// Accessing the header between logically writing it
  1004  		// and physically writing it means we need to allocate
  1005  		// a clone to snapshot the logically written state.
  1006  		w.cw.header = w.handlerHeader.Clone()
  1007  	}
  1008  	w.calledHeader = true
  1009  	return w.handlerHeader.toHttp()
  1010  }
  1011  
  1012  // maxPostHandlerReadBytes is the max number of Request.Body bytes not
  1013  // consumed by a handler that the server will read from the client
  1014  // in order to keep a connection alive. If there are more bytes than
  1015  // this then the server to be paranoid instead sends a "Connection:
  1016  // close" response.
  1017  //
  1018  // This number is approximately what a typical machine's TCP buffer
  1019  // size is anyway.  (if we have the bytes on the machine, we might as
  1020  // well read them)
  1021  const maxPostHandlerReadBytes = 256 << 10
  1022  
  1023  func checkWriteHeaderCode(code int) {
  1024  	// Issue 22880: require valid WriteHeader status codes.
  1025  	// For now we only enforce that it's three digits.
  1026  	// In the future we might block things over 599 (600 and above aren't defined
  1027  	// at https://httpwg.org/specs/rfc7231.html#status.codes).
  1028  	// But for now any three digits.
  1029  	//
  1030  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  1031  	// no equivalent bogus thing we can realistically send in HTTP/2,
  1032  	// so we'll consistently panic instead and help people find their bugs
  1033  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  1034  	if code < 100 || code > 999 {
  1035  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  1036  	}
  1037  }
  1038  
  1039  // relevantCaller searches the call stack for the first function outside of net/http.
  1040  // The purpose of this function is to provide more helpful error messages.
  1041  func relevantCaller() runtime.Frame {
  1042  	pc := make([]uintptr, 16)
  1043  	n := runtime.Callers(1, pc)
  1044  	frames := runtime.CallersFrames(pc[:n])
  1045  	var frame runtime.Frame
  1046  	for {
  1047  		frame, more := frames.Next()
  1048  		if !strings.HasPrefix(frame.Function, "net/http.") {
  1049  			return frame
  1050  		}
  1051  		if !more {
  1052  			break
  1053  		}
  1054  	}
  1055  	return frame
  1056  }
  1057  
  1058  func (w *response) WriteHeader(code int) {
  1059  	if w.conn.hijacked() {
  1060  		caller := relevantCaller()
  1061  		w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1062  		return
  1063  	}
  1064  	if w.wroteHeader {
  1065  		caller := relevantCaller()
  1066  		w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1067  		return
  1068  	}
  1069  	checkWriteHeaderCode(code)
  1070  
  1071  	// Handle informational headers.
  1072  	//
  1073  	// We shouldn't send any further headers after 101 Switching Protocols,
  1074  	// so it takes the non-informational path.
  1075  	if code >= 100 && code <= 199 && code != StatusSwitchingProtocols {
  1076  		// Prevent a potential race with an automatically-sent 100 Continue triggered by Request.Body.Read()
  1077  		if code == 100 && w.canWriteContinue.Load() {
  1078  			w.writeContinueMu.Lock()
  1079  			w.canWriteContinue.Store(false)
  1080  			w.writeContinueMu.Unlock()
  1081  		}
  1082  
  1083  		writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
  1084  
  1085  		// Per RFC 8297 we must not clear the current header map
  1086  		w.handlerHeader.WriteSubset(w.conn.bufw, excludedHeadersNoBody)
  1087  		w.conn.bufw.Write(crlf)
  1088  		w.conn.bufw.Flush()
  1089  
  1090  		return
  1091  	}
  1092  
  1093  	w.wroteHeader = true
  1094  	w.status = code
  1095  
  1096  	if w.calledHeader && w.cw.header == nil {
  1097  		w.cw.header = w.handlerHeader.Clone()
  1098  	}
  1099  
  1100  	if cl := w.handlerHeader.get("Content-Length"); cl != "" {
  1101  		v, err := strconv.ParseInt(cl, 10, 64)
  1102  		if err == nil && v >= 0 {
  1103  			w.contentLength = v
  1104  		} else {
  1105  			w.conn.server.logf("http: invalid Content-Length of %q", cl)
  1106  			w.handlerHeader.Del("Content-Length")
  1107  		}
  1108  	}
  1109  }
  1110  
  1111  // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
  1112  // This type is used to avoid extra allocations from cloning and/or populating
  1113  // the response Header map and all its 1-element slices.
  1114  type extraHeader struct {
  1115  	contentType      string
  1116  	connection       string
  1117  	transferEncoding string
  1118  	date             []byte // written if not nil
  1119  	contentLength    []byte // written if not nil
  1120  }
  1121  
  1122  // Sorted the same as extraHeader.Write's loop.
  1123  var extraHeaderKeys = [][]byte{
  1124  	[]byte("Content-Type"),
  1125  	[]byte("Connection"),
  1126  	[]byte("Transfer-Encoding"),
  1127  }
  1128  
  1129  var (
  1130  	headerContentLength = []byte("Content-Length: ")
  1131  	headerDate          = []byte("Date: ")
  1132  )
  1133  
  1134  // Write writes the headers described in h to w.
  1135  //
  1136  // This method has a value receiver, despite the somewhat large size
  1137  // of h, because it prevents an allocation. The escape analysis isn't
  1138  // smart enough to realize this function doesn't mutate h.
  1139  func (h extraHeader) Write(w *bufio.Writer) {
  1140  	if h.date != nil {
  1141  		w.Write(headerDate)
  1142  		w.Write(h.date)
  1143  		w.Write(crlf)
  1144  	}
  1145  	if h.contentLength != nil {
  1146  		w.Write(headerContentLength)
  1147  		w.Write(h.contentLength)
  1148  		w.Write(crlf)
  1149  	}
  1150  	for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
  1151  		if v != "" {
  1152  			w.Write(extraHeaderKeys[i])
  1153  			w.Write(colonSpace)
  1154  			w.WriteString(v)
  1155  			w.Write(crlf)
  1156  		}
  1157  	}
  1158  }
  1159  
  1160  // writeHeader finalizes the header sent to the client and writes it
  1161  // to cw.res.conn.bufw.
  1162  //
  1163  // p is not written by writeHeader, but is the first chunk of the body
  1164  // that will be written. It is sniffed for a Content-Type if none is
  1165  // set explicitly. It's also used to set the Content-Length, if the
  1166  // total body size was small and the handler has already finished
  1167  // running.
  1168  func (cw *chunkWriter) writeHeader(p []byte) {
  1169  	if cw.wroteHeader {
  1170  		return
  1171  	}
  1172  	cw.wroteHeader = true
  1173  
  1174  	w := cw.res
  1175  	keepAlivesEnabled := w.conn.server.doKeepAlives()
  1176  	isHEAD := w.req.Method == "HEAD"
  1177  
  1178  	// header is written out to w.conn.buf below. Depending on the
  1179  	// state of the handler, we either own the map or not. If we
  1180  	// don't own it, the exclude map is created lazily for
  1181  	// WriteSubset to remove headers. The setHeader struct holds
  1182  	// headers we need to add.
  1183  	header := cw.header
  1184  	owned := header != nil
  1185  	if !owned {
  1186  		header = w.handlerHeader
  1187  	}
  1188  	var excludeHeader map[string]bool
  1189  	delHeader := func(key string) {
  1190  		if owned {
  1191  			header.Del(key)
  1192  			return
  1193  		}
  1194  		if _, ok := header[key]; !ok {
  1195  			return
  1196  		}
  1197  		if excludeHeader == nil {
  1198  			excludeHeader = make(map[string]bool)
  1199  		}
  1200  		excludeHeader[key] = true
  1201  	}
  1202  	var setHeader extraHeader
  1203  
  1204  	// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
  1205  	trailers := false
  1206  	for k := range cw.header {
  1207  		if strings.HasPrefix(k, TrailerPrefix) {
  1208  			if excludeHeader == nil {
  1209  				excludeHeader = make(map[string]bool)
  1210  			}
  1211  			excludeHeader[k] = true
  1212  			trailers = true
  1213  		}
  1214  	}
  1215  	for _, v := range cw.header["Trailer"] {
  1216  		trailers = true
  1217  		foreachHeaderElement(v, cw.res.declareTrailer)
  1218  	}
  1219  
  1220  	te := header.get("Transfer-Encoding")
  1221  	hasTE := te != ""
  1222  
  1223  	// If the handler is done but never sent a Content-Length
  1224  	// response header and this is our first (and last) write, set
  1225  	// it, even to zero. This helps HTTP/1.0 clients keep their
  1226  	// "keep-alive" connections alive.
  1227  	// Exceptions: 304/204/1xx responses never get Content-Length, and if
  1228  	// it was a HEAD request, we don't know the difference between
  1229  	// 0 actual bytes and 0 bytes because the handler noticed it
  1230  	// was a HEAD request and chose not to write anything. So for
  1231  	// HEAD, the handler should either write the Content-Length or
  1232  	// write non-zero bytes. If it's actually 0 bytes and the
  1233  	// handler never looked at the Request.Method, we just don't
  1234  	// send a Content-Length header.
  1235  	// Further, we don't send an automatic Content-Length if they
  1236  	// set a Transfer-Encoding, because they're generally incompatible.
  1237  	if w.handlerDone.Load() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && !header.has("Content-Length") && (!isHEAD || len(p) > 0) {
  1238  		w.contentLength = int64(len(p))
  1239  		setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
  1240  	}
  1241  
  1242  	// If this was an HTTP/1.0 request with keep-alive and we sent a
  1243  	// Content-Length back, we can make this a keep-alive response ...
  1244  	if w.wants10KeepAlive && keepAlivesEnabled {
  1245  		sentLength := header.get("Content-Length") != ""
  1246  		if sentLength && header.get("Connection") == "keep-alive" {
  1247  			w.closeAfterReply = false
  1248  		}
  1249  	}
  1250  
  1251  	// Check for an explicit (and valid) Content-Length header.
  1252  	hasCL := w.contentLength != -1
  1253  
  1254  	if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
  1255  		_, connectionHeaderSet := header["Connection"]
  1256  		if !connectionHeaderSet {
  1257  			setHeader.connection = "keep-alive"
  1258  		}
  1259  	} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
  1260  		w.closeAfterReply = true
  1261  	}
  1262  
  1263  	if header.get("Connection") == "close" || !keepAlivesEnabled {
  1264  		w.closeAfterReply = true
  1265  	}
  1266  
  1267  	// If the client wanted a 100-continue but we never sent it to
  1268  	// them (or, more strictly: we never finished reading their
  1269  	// request body), don't reuse this connection because it's now
  1270  	// in an unknown state: we might be sending this response at
  1271  	// the same time the client is now sending its request body
  1272  	// after a timeout.  (Some HTTP clients send Expect:
  1273  	// 100-continue but knowing that some servers don't support
  1274  	// it, the clients set a timer and send the body later anyway)
  1275  	// If we haven't seen EOF, we can't skip over the unread body
  1276  	// because we don't know if the next bytes on the wire will be
  1277  	// the body-following-the-timer or the subsequent request.
  1278  	// See Issue 11549.
  1279  	if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.Load() {
  1280  		w.closeAfterReply = true
  1281  	}
  1282  
  1283  	// We do this by default because there are a number of clients that
  1284  	// send a full request before starting to read the response, and they
  1285  	// can deadlock if we start writing the response with unconsumed body
  1286  	// remaining. See Issue 15527 for some history.
  1287  	//
  1288  	// If full duplex mode has been enabled with ResponseController.EnableFullDuplex,
  1289  	// then leave the request body alone.
  1290  	if w.req.ContentLength != 0 && !w.closeAfterReply && !w.fullDuplex {
  1291  		var discard, tooBig bool
  1292  
  1293  		switch bdy := w.req.Body.(type) {
  1294  		case *expectContinueReader:
  1295  			if bdy.resp.wroteContinue {
  1296  				discard = true
  1297  			}
  1298  		case *body:
  1299  			bdy.mu.Lock()
  1300  			switch {
  1301  			case bdy.closed:
  1302  				if !bdy.sawEOF {
  1303  					// Body was closed in handler with non-EOF error.
  1304  					w.closeAfterReply = true
  1305  				}
  1306  			case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
  1307  				tooBig = true
  1308  			default:
  1309  				discard = true
  1310  			}
  1311  			bdy.mu.Unlock()
  1312  		default:
  1313  			discard = true
  1314  		}
  1315  
  1316  		if discard {
  1317  			_, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
  1318  			switch err {
  1319  			case nil:
  1320  				// There must be even more data left over.
  1321  				tooBig = true
  1322  			case ErrBodyReadAfterClose:
  1323  				// Body was already consumed and closed.
  1324  			case io.EOF:
  1325  				// The remaining body was just consumed, close it.
  1326  				err = w.reqBody.Close()
  1327  				if err != nil {
  1328  					w.closeAfterReply = true
  1329  				}
  1330  			default:
  1331  				// Some other kind of error occurred, like a read timeout, or
  1332  				// corrupt chunked encoding. In any case, whatever remains
  1333  				// on the wire must not be parsed as another HTTP request.
  1334  				w.closeAfterReply = true
  1335  			}
  1336  		}
  1337  
  1338  		if tooBig {
  1339  			w.requestTooLarge()
  1340  			delHeader("Connection")
  1341  			setHeader.connection = "close"
  1342  		}
  1343  	}
  1344  
  1345  	code := w.status
  1346  	if bodyAllowedForStatus(code) {
  1347  		// If no content type, apply sniffing algorithm to body.
  1348  		_, haveType := header["Content-Type"]
  1349  
  1350  		// If the Content-Encoding was set and is non-blank,
  1351  		// we shouldn't sniff the body. See Issue 31753.
  1352  		ce := header.Get("Content-Encoding")
  1353  		hasCE := len(ce) > 0
  1354  		if !hasCE && !haveType && !hasTE && len(p) > 0 {
  1355  			setHeader.contentType = DetectContentType(p)
  1356  		}
  1357  	} else {
  1358  		for _, k := range suppressedHeaders(code) {
  1359  			delHeader(k)
  1360  		}
  1361  	}
  1362  
  1363  	if !header.has("Date") {
  1364  		setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
  1365  	}
  1366  
  1367  	if hasCL && hasTE && te != "identity" {
  1368  		// TODO: return an error if WriteHeader gets a return parameter
  1369  		// For now just ignore the Content-Length.
  1370  		w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
  1371  			te, w.contentLength)
  1372  		delHeader("Content-Length")
  1373  		hasCL = false
  1374  	}
  1375  
  1376  	if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == StatusNoContent {
  1377  		// Response has no body.
  1378  		delHeader("Transfer-Encoding")
  1379  	} else if hasCL {
  1380  		// Content-Length has been provided, so no chunking is to be done.
  1381  		delHeader("Transfer-Encoding")
  1382  	} else if w.req.ProtoAtLeast(1, 1) {
  1383  		// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
  1384  		// content-length has been provided. The connection must be closed after the
  1385  		// reply is written, and no chunking is to be done. This is the setup
  1386  		// recommended in the Server-Sent Events candidate recommendation 11,
  1387  		// section 8.
  1388  		if hasTE && te == "identity" {
  1389  			cw.chunking = false
  1390  			w.closeAfterReply = true
  1391  			delHeader("Transfer-Encoding")
  1392  		} else {
  1393  			// HTTP/1.1 or greater: use chunked transfer encoding
  1394  			// to avoid closing the connection at EOF.
  1395  			cw.chunking = true
  1396  			setHeader.transferEncoding = "chunked"
  1397  			if hasTE && te == "chunked" {
  1398  				// We will send the chunked Transfer-Encoding header later.
  1399  				delHeader("Transfer-Encoding")
  1400  			}
  1401  		}
  1402  	} else {
  1403  		// HTTP version < 1.1: cannot do chunked transfer
  1404  		// encoding and we don't know the Content-Length so
  1405  		// signal EOF by closing connection.
  1406  		w.closeAfterReply = true
  1407  		delHeader("Transfer-Encoding") // in case already set
  1408  	}
  1409  
  1410  	// Cannot use Content-Length with non-identity Transfer-Encoding.
  1411  	if cw.chunking {
  1412  		delHeader("Content-Length")
  1413  	}
  1414  	if !w.req.ProtoAtLeast(1, 0) {
  1415  		return
  1416  	}
  1417  
  1418  	// Only override the Connection header if it is not a successful
  1419  	// protocol switch response and if KeepAlives are not enabled.
  1420  	// See https://golang.org/issue/36381.
  1421  	delConnectionHeader := w.closeAfterReply &&
  1422  		(!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
  1423  		!isProtocolSwitchResponse(w.status, header)
  1424  	if delConnectionHeader {
  1425  		delHeader("Connection")
  1426  		if w.req.ProtoAtLeast(1, 1) {
  1427  			setHeader.connection = "close"
  1428  		}
  1429  	}
  1430  
  1431  	writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
  1432  	cw.header.WriteSubset(w.conn.bufw, excludeHeader)
  1433  	setHeader.Write(w.conn.bufw)
  1434  	w.conn.bufw.Write(crlf)
  1435  }
  1436  
  1437  // foreachHeaderElement splits v according to the "#rule" construction
  1438  // in RFC 7230 section 7 and calls fn for each non-empty element.
  1439  func foreachHeaderElement(v string, fn func(string)) {
  1440  	v = textproto.TrimString(v)
  1441  	if v == "" {
  1442  		return
  1443  	}
  1444  	if !strings.Contains(v, ",") {
  1445  		fn(v)
  1446  		return
  1447  	}
  1448  	for _, f := range strings.Split(v, ",") {
  1449  		if f = textproto.TrimString(f); f != "" {
  1450  			fn(f)
  1451  		}
  1452  	}
  1453  }
  1454  
  1455  // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
  1456  // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
  1457  // code is the response status code.
  1458  // scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
  1459  func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
  1460  	if is11 {
  1461  		bw.WriteString("HTTP/1.1 ")
  1462  	} else {
  1463  		bw.WriteString("HTTP/1.0 ")
  1464  	}
  1465  	if text := StatusText(code); text != "" {
  1466  		bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
  1467  		bw.WriteByte(' ')
  1468  		bw.WriteString(text)
  1469  		bw.WriteString("\r\n")
  1470  	} else {
  1471  		// don't worry about performance
  1472  		fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
  1473  	}
  1474  }
  1475  
  1476  // bodyAllowed reports whether a Write is allowed for this response type.
  1477  // It's illegal to call this before the header has been flushed.
  1478  func (w *response) bodyAllowed() bool {
  1479  	if !w.wroteHeader {
  1480  		panic("")
  1481  	}
  1482  	return bodyAllowedForStatus(w.status)
  1483  }
  1484  
  1485  // The Life Of A Write is like this:
  1486  //
  1487  // Handler starts. No header has been sent. The handler can either
  1488  // write a header, or just start writing. Writing before sending a header
  1489  // sends an implicitly empty 200 OK header.
  1490  //
  1491  // If the handler didn't declare a Content-Length up front, we either
  1492  // go into chunking mode or, if the handler finishes running before
  1493  // the chunking buffer size, we compute a Content-Length and send that
  1494  // in the header instead.
  1495  //
  1496  // Likewise, if the handler didn't set a Content-Type, we sniff that
  1497  // from the initial chunk of output.
  1498  //
  1499  // The Writers are wired together like:
  1500  //
  1501  //  1. *response (the ResponseWriter) ->
  1502  //  2. (*response).w, a [*bufio.Writer] of bufferBeforeChunkingSize bytes ->
  1503  //  3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
  1504  //     and which writes the chunk headers, if needed ->
  1505  //  4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
  1506  //  5. checkConnErrorWriter{c}, which notes any non-nil error on Write
  1507  //     and populates c.werr with it if so, but otherwise writes to ->
  1508  //  6. the rwc, the [net.Conn].
  1509  //
  1510  // TODO(bradfitz): short-circuit some of the buffering when the
  1511  // initial header contains both a Content-Type and Content-Length.
  1512  // Also short-circuit in (1) when the header's been sent and not in
  1513  // chunking mode, writing directly to (4) instead, if (2) has no
  1514  // buffered data. More generally, we could short-circuit from (1) to
  1515  // (3) even in chunking mode if the write size from (1) is over some
  1516  // threshold and nothing is in (2).  The answer might be mostly making
  1517  // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
  1518  // with this instead.
  1519  func (w *response) Write(data []byte) (n int, err error) {
  1520  	return w.write(len(data), data, "")
  1521  }
  1522  
  1523  func (w *response) WriteString(data string) (n int, err error) {
  1524  	return w.write(len(data), nil, data)
  1525  }
  1526  
  1527  // either dataB or dataS is non-zero.
  1528  func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  1529  	if w.conn.hijacked() {
  1530  		if lenData > 0 {
  1531  			caller := relevantCaller()
  1532  			w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1533  		}
  1534  		return 0, ErrHijacked
  1535  	}
  1536  
  1537  	if w.canWriteContinue.Load() {
  1538  		// Body reader wants to write 100 Continue but hasn't yet.
  1539  		// Tell it not to. The store must be done while holding the lock
  1540  		// because the lock makes sure that there is not an active write
  1541  		// this very moment.
  1542  		w.writeContinueMu.Lock()
  1543  		w.canWriteContinue.Store(false)
  1544  		w.writeContinueMu.Unlock()
  1545  	}
  1546  
  1547  	if !w.wroteHeader {
  1548  		w.WriteHeader(StatusOK)
  1549  	}
  1550  	if lenData == 0 {
  1551  		return 0, nil
  1552  	}
  1553  	if !w.bodyAllowed() {
  1554  		return 0, ErrBodyNotAllowed
  1555  	}
  1556  
  1557  	w.written += int64(lenData) // ignoring errors, for errorKludge
  1558  	if w.contentLength != -1 && w.written > w.contentLength {
  1559  		return 0, ErrContentLength
  1560  	}
  1561  	if dataB != nil {
  1562  		return w.w.Write(dataB)
  1563  	} else {
  1564  		return w.w.WriteString(dataS)
  1565  	}
  1566  }
  1567  
  1568  func (w *response) finishRequest() {
  1569  	w.handlerDone.Store(true)
  1570  
  1571  	if !w.wroteHeader {
  1572  		w.WriteHeader(StatusOK)
  1573  	}
  1574  
  1575  	w.w.Flush()
  1576  	putBufioWriter(w.w)
  1577  	w.cw.close()
  1578  	w.conn.bufw.Flush()
  1579  
  1580  	w.conn.r.abortPendingRead()
  1581  
  1582  	// Close the body (regardless of w.closeAfterReply) so we can
  1583  	// re-use its bufio.Reader later safely.
  1584  	w.reqBody.Close()
  1585  
  1586  	if w.req.MultipartForm != nil {
  1587  		w.req.MultipartForm.RemoveAll()
  1588  	}
  1589  }
  1590  
  1591  // shouldReuseConnection reports whether the underlying TCP connection can be reused.
  1592  // It must only be called after the handler is done executing.
  1593  func (w *response) shouldReuseConnection() bool {
  1594  	if w.closeAfterReply {
  1595  		// The request or something set while executing the
  1596  		// handler indicated we shouldn't reuse this
  1597  		// connection.
  1598  		return false
  1599  	}
  1600  
  1601  	if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
  1602  		// Did not write enough. Avoid getting out of sync.
  1603  		return false
  1604  	}
  1605  
  1606  	// There was some error writing to the underlying connection
  1607  	// during the request, so don't re-use this conn.
  1608  	if w.conn.werr != nil {
  1609  		return false
  1610  	}
  1611  
  1612  	if w.closedRequestBodyEarly() {
  1613  		return false
  1614  	}
  1615  
  1616  	return true
  1617  }
  1618  
  1619  func (w *response) closedRequestBodyEarly() bool {
  1620  	body, ok := w.req.Body.(*body)
  1621  	return ok && body.didEarlyClose()
  1622  }
  1623  
  1624  func (w *response) Flush() {
  1625  	w.FlushError()
  1626  }
  1627  
  1628  func (w *response) FlushError() error {
  1629  	if !w.wroteHeader {
  1630  		w.WriteHeader(StatusOK)
  1631  	}
  1632  	err := w.w.Flush()
  1633  	e2 := w.cw.flush()
  1634  	if err == nil {
  1635  		err = e2
  1636  	}
  1637  	return err
  1638  }
  1639  
  1640  func (c *conn) finalFlush() {
  1641  	if c.bufr != nil {
  1642  		// Steal the bufio.Reader (~4KB worth of memory) and its associated
  1643  		// reader for a future connection.
  1644  		putBufioReader(c.bufr)
  1645  		c.bufr = nil
  1646  	}
  1647  
  1648  	if c.bufw != nil {
  1649  		c.bufw.Flush()
  1650  		// Steal the bufio.Writer (~4KB worth of memory) and its associated
  1651  		// writer for a future connection.
  1652  		putBufioWriter(c.bufw)
  1653  		c.bufw = nil
  1654  	}
  1655  }
  1656  
  1657  // Close the connection.
  1658  func (c *conn) close() {
  1659  	c.finalFlush()
  1660  	c.rwc.Close()
  1661  }
  1662  
  1663  // rstAvoidanceDelay is the amount of time we sleep after closing the
  1664  // write side of a TCP connection before closing the entire socket.
  1665  // By sleeping, we increase the chances that the client sees our FIN
  1666  // and processes its final data before they process the subsequent RST
  1667  // from closing a connection with known unread data.
  1668  // This RST seems to occur mostly on BSD systems. (And Windows?)
  1669  // This timeout is somewhat arbitrary (~latency around the planet),
  1670  // and may be modified by tests.
  1671  //
  1672  // TODO(bcmills): This should arguably be a server configuration parameter,
  1673  // not a hard-coded value.
  1674  var rstAvoidanceDelay = 500 * time.Millisecond
  1675  
  1676  type closeWriter interface {
  1677  	CloseWrite() error
  1678  }
  1679  
  1680  var _ closeWriter = (*net.TCPConn)(nil)
  1681  
  1682  // closeWriteAndWait flushes any outstanding data and sends a FIN packet (if
  1683  // client is connected via TCP), signaling that we're done. We then
  1684  // pause for a bit, hoping the client processes it before any
  1685  // subsequent RST.
  1686  //
  1687  // See https://golang.org/issue/3595
  1688  func (c *conn) closeWriteAndWait() {
  1689  	c.finalFlush()
  1690  	if tcp, ok := c.rwc.(closeWriter); ok {
  1691  		tcp.CloseWrite()
  1692  	}
  1693  
  1694  	// When we return from closeWriteAndWait, the caller will fully close the
  1695  	// connection. If client is still writing to the connection, this will cause
  1696  	// the write to fail with ECONNRESET or similar. Unfortunately, many TCP
  1697  	// implementations will also drop unread packets from the client's read buffer
  1698  	// when a write fails, causing our final response to be truncated away too.
  1699  	//
  1700  	// As a result, https://www.rfc-editor.org/rfc/rfc7230#section-6.6 recommends
  1701  	// that “[t]he server … continues to read from the connection until it
  1702  	// receives a corresponding close by the client, or until the server is
  1703  	// reasonably certain that its own TCP stack has received the client's
  1704  	// acknowledgement of the packet(s) containing the server's last response.”
  1705  	//
  1706  	// Unfortunately, we have no straightforward way to be “reasonably certain”
  1707  	// that we have received the client's ACK, and at any rate we don't want to
  1708  	// allow a misbehaving client to soak up server connections indefinitely by
  1709  	// withholding an ACK, nor do we want to go through the complexity or overhead
  1710  	// of using low-level APIs to figure out when a TCP round-trip has completed.
  1711  	//
  1712  	// Instead, we declare that we are “reasonably certain” that we received the
  1713  	// ACK if maxRSTAvoidanceDelay has elapsed.
  1714  	time.Sleep(rstAvoidanceDelay)
  1715  }
  1716  
  1717  // validNextProto reports whether the proto is a valid ALPN protocol name.
  1718  // Everything is valid except the empty string and built-in protocol types,
  1719  // so that those can't be overridden with alternate implementations.
  1720  func validNextProto(proto string) bool {
  1721  	switch proto {
  1722  	case "", "http/1.1", "http/1.0":
  1723  		return false
  1724  	}
  1725  	return true
  1726  }
  1727  
  1728  const (
  1729  	runHooks  = true
  1730  	skipHooks = false
  1731  )
  1732  
  1733  func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
  1734  	srv := c.server
  1735  	switch state {
  1736  	case StateNew:
  1737  		srv.trackConn(c, true)
  1738  	case StateHijacked, StateClosed:
  1739  		srv.trackConn(c, false)
  1740  	}
  1741  	if state > 0xff || state < 0 {
  1742  		panic("internal error")
  1743  	}
  1744  	packedState := uint64(time.Now().Unix()<<8) | uint64(state)
  1745  	c.curState.Store(packedState)
  1746  	if !runHook {
  1747  		return
  1748  	}
  1749  	if hook := srv.ConnState; hook != nil {
  1750  		hook(nc, state)
  1751  	}
  1752  }
  1753  
  1754  func (c *conn) getState() (state ConnState, unixSec int64) {
  1755  	packedState := c.curState.Load()
  1756  	return ConnState(packedState & 0xff), int64(packedState >> 8)
  1757  }
  1758  
  1759  // badRequestError is a literal string (used by in the server in HTML,
  1760  // unescaped) to tell the user why their request was bad. It should
  1761  // be plain text without user info or other embedded errors.
  1762  func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
  1763  
  1764  // statusError is an error used to respond to a request with an HTTP status.
  1765  // The text should be plain text without user info or other embedded errors.
  1766  type statusError struct {
  1767  	code int
  1768  	text string
  1769  }
  1770  
  1771  func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
  1772  
  1773  // ErrAbortHandler is a sentinel panic value to abort a handler.
  1774  // While any panic from ServeHTTP aborts the response to the client,
  1775  // panicking with ErrAbortHandler also suppresses logging of a stack
  1776  // trace to the server's error log.
  1777  var ErrAbortHandler = errors.New("net/http: abort Handler")
  1778  
  1779  // isCommonNetReadError reports whether err is a common error
  1780  // encountered during reading a request off the network when the
  1781  // client has gone away or had its read fail somehow. This is used to
  1782  // determine which logs are interesting enough to log about.
  1783  func isCommonNetReadError(err error) bool {
  1784  	if err == io.EOF {
  1785  		return true
  1786  	}
  1787  	if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
  1788  		return true
  1789  	}
  1790  	if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
  1791  		return true
  1792  	}
  1793  	return false
  1794  }
  1795  
  1796  // Serve a new connection.
  1797  func (c *conn) serve(ctx context.Context) {
  1798  	if ra := c.rwc.RemoteAddr(); ra != nil {
  1799  		c.remoteAddr = ra.String()
  1800  	}
  1801  	ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
  1802  	var inFlightResponse *response
  1803  	defer func() {
  1804  		if err := recover(); err != nil && err != ErrAbortHandler {
  1805  			const size = 64 << 10
  1806  			buf := make([]byte, size)
  1807  			buf = buf[:runtime.Stack(buf, false)]
  1808  			c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
  1809  		}
  1810  		if inFlightResponse != nil {
  1811  			inFlightResponse.cancelCtx()
  1812  		}
  1813  		if !c.hijacked() {
  1814  			if inFlightResponse != nil {
  1815  				inFlightResponse.conn.r.abortPendingRead()
  1816  				inFlightResponse.reqBody.Close()
  1817  			}
  1818  			c.close()
  1819  			c.setState(c.rwc, StateClosed, runHooks)
  1820  		}
  1821  	}()
  1822  
  1823  	if tlsConn, ok := c.rwc.(*tls.Conn); ok {
  1824  		tlsTO := c.server.tlsHandshakeTimeout()
  1825  		if tlsTO > 0 {
  1826  			dl := time.Now().Add(tlsTO)
  1827  			c.rwc.SetReadDeadline(dl)
  1828  			c.rwc.SetWriteDeadline(dl)
  1829  		}
  1830  		if err := tlsConn.HandshakeContext(ctx); err != nil {
  1831  			// If the handshake failed due to the client not speaking
  1832  			// TLS, assume they're speaking plaintext HTTP and write a
  1833  			// 400 response on the TLS conn's underlying net.Conn.
  1834  			if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
  1835  				io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
  1836  				re.Conn.Close()
  1837  				return
  1838  			}
  1839  			c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
  1840  			return
  1841  		}
  1842  		// Restore Conn-level deadlines.
  1843  		if tlsTO > 0 {
  1844  			c.rwc.SetReadDeadline(time.Time{})
  1845  			c.rwc.SetWriteDeadline(time.Time{})
  1846  		}
  1847  		c.tlsState = new(tls.ConnectionState)
  1848  		*c.tlsState = tlsConn.ConnectionState()
  1849  		if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
  1850  			if fn := c.server.TLSNextProto[proto]; fn != nil {
  1851  				h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
  1852  				// Mark freshly created HTTP/2 as active and prevent any server state hooks
  1853  				// from being run on these connections. This prevents closeIdleConns from
  1854  				// closing such connections. See issue https://golang.org/issue/39776.
  1855  				c.setState(c.rwc, StateActive, skipHooks)
  1856  				fn(c.server, tlsConn, h)
  1857  			}
  1858  			return
  1859  		}
  1860  	}
  1861  
  1862  	// HTTP/1.x from here on.
  1863  
  1864  	ctx, cancelCtx := context.WithCancel(ctx)
  1865  	c.cancelCtx = cancelCtx
  1866  	defer cancelCtx()
  1867  
  1868  	c.r = &connReader{conn: c}
  1869  	c.bufr = newBufioReader(c.r)
  1870  	c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
  1871  
  1872  	for {
  1873  		w, err := c.readRequest(ctx)
  1874  		if c.r.remain != c.server.initialReadLimitSize() {
  1875  			// If we read any bytes off the wire, we're active.
  1876  			c.setState(c.rwc, StateActive, runHooks)
  1877  		}
  1878  		if err != nil {
  1879  			const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
  1880  
  1881  			switch {
  1882  			case err == errTooLarge:
  1883  				// Their HTTP client may or may not be
  1884  				// able to read this if we're
  1885  				// responding to them and hanging up
  1886  				// while they're still writing their
  1887  				// request. Undefined behavior.
  1888  				const publicErr = "431 Request Header Fields Too Large"
  1889  				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
  1890  				c.closeWriteAndWait()
  1891  				return
  1892  
  1893  			case isUnsupportedTEError(err):
  1894  				// Respond as per RFC 7230 Section 3.3.1 which says,
  1895  				//      A server that receives a request message with a
  1896  				//      transfer coding it does not understand SHOULD
  1897  				//      respond with 501 (Unimplemented).
  1898  				code := StatusNotImplemented
  1899  
  1900  				// We purposefully aren't echoing back the transfer-encoding's value,
  1901  				// so as to mitigate the risk of cross side scripting by an attacker.
  1902  				fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
  1903  				return
  1904  
  1905  			case isCommonNetReadError(err):
  1906  				return // don't reply
  1907  
  1908  			default:
  1909  				if v, ok := err.(statusError); ok {
  1910  					fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
  1911  					return
  1912  				}
  1913  				const publicErr = "400 Bad Request"
  1914  				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
  1915  				return
  1916  			}
  1917  		}
  1918  
  1919  		// Expect 100 Continue support
  1920  		req := w.req
  1921  		if req.expectsContinue() {
  1922  			if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
  1923  				// Wrap the Body reader with one that replies on the connection
  1924  				req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
  1925  				w.canWriteContinue.Store(true)
  1926  			}
  1927  		} else if req.Header.get("Expect") != "" {
  1928  			w.sendExpectationFailed()
  1929  			return
  1930  		}
  1931  
  1932  		c.curReq.Store(w)
  1933  
  1934  		if requestBodyRemains(req.Body) {
  1935  			registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
  1936  		} else {
  1937  			w.conn.r.startBackgroundRead()
  1938  		}
  1939  
  1940  		// HTTP cannot have multiple simultaneous active requests.[*]
  1941  		// Until the server replies to this request, it can't read another,
  1942  		// so we might as well run the handler in this goroutine.
  1943  		// [*] Not strictly true: HTTP pipelining. We could let them all process
  1944  		// in parallel even if their responses need to be serialized.
  1945  		// But we're not going to implement HTTP pipelining because it
  1946  		// was never deployed in the wild and the answer is HTTP/2.
  1947  		inFlightResponse = w
  1948  		serverHandler{c.server}.ServeHTTP(w, w.req.toHttp())
  1949  		inFlightResponse = nil
  1950  		w.cancelCtx()
  1951  		if c.hijacked() {
  1952  			return
  1953  		}
  1954  		w.finishRequest()
  1955  		c.rwc.SetWriteDeadline(time.Time{})
  1956  		if !w.shouldReuseConnection() {
  1957  			if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
  1958  				c.closeWriteAndWait()
  1959  			}
  1960  			return
  1961  		}
  1962  		c.setState(c.rwc, StateIdle, runHooks)
  1963  		c.curReq.Store(nil)
  1964  
  1965  		if !w.conn.server.doKeepAlives() {
  1966  			// We're in shutdown mode. We might've replied
  1967  			// to the user without "Connection: close" and
  1968  			// they might think they can send another
  1969  			// request, but such is life with HTTP/1.1.
  1970  			return
  1971  		}
  1972  
  1973  		if d := c.server.idleTimeout(); d > 0 {
  1974  			c.rwc.SetReadDeadline(time.Now().Add(d))
  1975  		} else {
  1976  			c.rwc.SetReadDeadline(time.Time{})
  1977  		}
  1978  
  1979  		// Wait for the connection to become readable again before trying to
  1980  		// read the next request. This prevents a ReadHeaderTimeout or
  1981  		// ReadTimeout from starting until the first bytes of the next request
  1982  		// have been received.
  1983  		if _, err := c.bufr.Peek(4); err != nil {
  1984  			return
  1985  		}
  1986  
  1987  		c.rwc.SetReadDeadline(time.Time{})
  1988  	}
  1989  }
  1990  
  1991  func (w *response) sendExpectationFailed() {
  1992  	// TODO(bradfitz): let ServeHTTP handlers handle
  1993  	// requests with non-standard expectation[s]? Seems
  1994  	// theoretical at best, and doesn't fit into the
  1995  	// current ServeHTTP model anyway. We'd need to
  1996  	// make the ResponseWriter an optional
  1997  	// "ExpectReplier" interface or something.
  1998  	//
  1999  	// For now we'll just obey RFC 7231 5.1.1 which says
  2000  	// "A server that receives an Expect field-value other
  2001  	// than 100-continue MAY respond with a 417 (Expectation
  2002  	// Failed) status code to indicate that the unexpected
  2003  	// expectation cannot be met."
  2004  	w.Header().Set("Connection", "close")
  2005  	w.WriteHeader(StatusExpectationFailed)
  2006  	w.finishRequest()
  2007  }
  2008  
  2009  // Hijack implements the [Hijacker.Hijack] method. Our response is both a [ResponseWriter]
  2010  // and a [Hijacker].
  2011  func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
  2012  	if w.handlerDone.Load() {
  2013  		panic("net/http: Hijack called after ServeHTTP finished")
  2014  	}
  2015  	if w.wroteHeader {
  2016  		w.cw.flush()
  2017  	}
  2018  
  2019  	c := w.conn
  2020  	c.mu.Lock()
  2021  	defer c.mu.Unlock()
  2022  
  2023  	// Release the bufioWriter that writes to the chunk writer, it is not
  2024  	// used after a connection has been hijacked.
  2025  	rwc, buf, err = c.hijackLocked()
  2026  	if err == nil {
  2027  		putBufioWriter(w.w)
  2028  		w.w = nil
  2029  	}
  2030  	return rwc, buf, err
  2031  }
  2032  
  2033  func (w *response) CloseNotify() <-chan bool {
  2034  	if w.handlerDone.Load() {
  2035  		panic("net/http: CloseNotify called after ServeHTTP finished")
  2036  	}
  2037  	return w.closeNotifyCh
  2038  }
  2039  
  2040  func registerOnHitEOF(rc io.ReadCloser, fn func()) {
  2041  	switch v := rc.(type) {
  2042  	case *expectContinueReader:
  2043  		registerOnHitEOF(v.readCloser, fn)
  2044  	case *body:
  2045  		v.registerOnHitEOF(fn)
  2046  	default:
  2047  		panic("unexpected type " + fmt.Sprintf("%T", rc))
  2048  	}
  2049  }
  2050  
  2051  // requestBodyRemains reports whether future calls to Read
  2052  // on rc might yield more data.
  2053  func requestBodyRemains(rc io.ReadCloser) bool {
  2054  	if rc == NoBody {
  2055  		return false
  2056  	}
  2057  	switch v := rc.(type) {
  2058  	case *expectContinueReader:
  2059  		return requestBodyRemains(v.readCloser)
  2060  	case *body:
  2061  		return v.bodyRemains()
  2062  	default:
  2063  		panic("unexpected type " + fmt.Sprintf("%T", rc))
  2064  	}
  2065  }
  2066  
  2067  // The HandlerFunc type is an adapter to allow the use of
  2068  // ordinary functions as HTTP handlers. If f is a function
  2069  // with the appropriate signature, HandlerFunc(f) is a
  2070  // [Handler] that calls f.
  2071  type HandlerFunc http.HandlerFunc
  2072  
  2073  // ServeHTTP calls f(w, r).
  2074  func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *http.Request) {
  2075  	f(w, r)
  2076  }
  2077  
  2078  // Helper handlers
  2079  
  2080  // Error replies to the request with the specified error message and HTTP code.
  2081  // It does not otherwise end the request; the caller should ensure no further
  2082  // writes are done to w.
  2083  // The error message should be plain text.
  2084  func Error(w ResponseWriter, error string, code int) {
  2085  	w.Header().Del("Content-Length")
  2086  	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
  2087  	w.Header().Set("X-Content-Type-Options", "nosniff")
  2088  	w.WriteHeader(code)
  2089  	fmt.Fprintln(w, error)
  2090  }
  2091  
  2092  // NotFound replies to the request with an HTTP 404 not found error.
  2093  func NotFound(w ResponseWriter, r *http.Request) { Error(w, "404 page not found", StatusNotFound) }
  2094  
  2095  // NotFoundHandler returns a simple request handler
  2096  // that replies to each request with a “404 page not found” reply.
  2097  func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
  2098  
  2099  // StripPrefix returns a handler that serves HTTP requests by removing the
  2100  // given prefix from the request URL's Path (and RawPath if set) and invoking
  2101  // the handler h. StripPrefix handles a request for a path that doesn't begin
  2102  // with prefix by replying with an HTTP 404 not found error. The prefix must
  2103  // match exactly: if the prefix in the request contains escaped characters
  2104  // the reply is also an HTTP 404 not found error.
  2105  func StripPrefix(prefix string, h Handler) Handler {
  2106  	if prefix == "" {
  2107  		return h
  2108  	}
  2109  	return HandlerFunc(func(w ResponseWriter, r *http.Request) {
  2110  		p := strings.TrimPrefix(r.URL.Path, prefix)
  2111  		rp := strings.TrimPrefix(r.URL.RawPath, prefix)
  2112  		if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
  2113  			r2 := new(http.Request)
  2114  			*r2 = *r
  2115  			r2.URL = new(url.URL)
  2116  			*r2.URL = *r.URL
  2117  			r2.URL.Path = p
  2118  			r2.URL.RawPath = rp
  2119  			h.ServeHTTP(w, r2)
  2120  		} else {
  2121  			NotFound(w, r)
  2122  		}
  2123  	})
  2124  }
  2125  
  2126  // Redirect replies to the request with a redirect to url,
  2127  // which may be a path relative to the request path.
  2128  //
  2129  // The provided code should be in the 3xx range and is usually
  2130  // [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
  2131  //
  2132  // If the Content-Type header has not been set, [Redirect] sets it
  2133  // to "text/html; charset=utf-8" and writes a small HTML body.
  2134  // Setting the Content-Type header to any value, including nil,
  2135  // disables that behavior.
  2136  func Redirect(w ResponseWriter, r *http.Request, url string, code int) {
  2137  	if u, err := urlpkg.Parse(url); err == nil {
  2138  		// If url was relative, make its path absolute by
  2139  		// combining with request path.
  2140  		// The client would probably do this for us,
  2141  		// but doing it ourselves is more reliable.
  2142  		// See RFC 7231, section 7.1.2
  2143  		if u.Scheme == "" && u.Host == "" {
  2144  			oldpath := r.URL.Path
  2145  			if oldpath == "" { // should not happen, but avoid a crash if it does
  2146  				oldpath = "/"
  2147  			}
  2148  
  2149  			// no leading http://server
  2150  			if url == "" || url[0] != '/' {
  2151  				// make relative path absolute
  2152  				olddir, _ := path.Split(oldpath)
  2153  				url = olddir + url
  2154  			}
  2155  
  2156  			var query string
  2157  			if i := strings.Index(url, "?"); i != -1 {
  2158  				url, query = url[:i], url[i:]
  2159  			}
  2160  
  2161  			// clean up but preserve trailing slash
  2162  			trailing := strings.HasSuffix(url, "/")
  2163  			url = path.Clean(url)
  2164  			if trailing && !strings.HasSuffix(url, "/") {
  2165  				url += "/"
  2166  			}
  2167  			url += query
  2168  		}
  2169  	}
  2170  
  2171  	h := w.Header()
  2172  
  2173  	// RFC 7231 notes that a short HTML body is usually included in
  2174  	// the response because older user agents may not understand 301/307.
  2175  	// Do it only if the request didn't already have a Content-Type header.
  2176  	_, hadCT := h["Content-Type"]
  2177  
  2178  	h.Set("Location", hexEscapeNonASCII(url))
  2179  	if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
  2180  		h.Set("Content-Type", "text/html; charset=utf-8")
  2181  	}
  2182  	w.WriteHeader(code)
  2183  
  2184  	// Shouldn't send the body for POST or HEAD; that leaves GET.
  2185  	if !hadCT && r.Method == "GET" {
  2186  		body := "<a href=\"" + htmlEscape(url) + "\">" + StatusText(code) + "</a>.\n"
  2187  		fmt.Fprintln(w, body)
  2188  	}
  2189  }
  2190  
  2191  var htmlReplacer = strings.NewReplacer(
  2192  	"&", "&amp;",
  2193  	"<", "&lt;",
  2194  	">", "&gt;",
  2195  	// "&#34;" is shorter than "&quot;".
  2196  	`"`, "&#34;",
  2197  	// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
  2198  	"'", "&#39;",
  2199  )
  2200  
  2201  func htmlEscape(s string) string {
  2202  	return htmlReplacer.Replace(s)
  2203  }
  2204  
  2205  // Redirect to a fixed URL
  2206  type redirectHandler struct {
  2207  	url  string
  2208  	code int
  2209  }
  2210  
  2211  func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *http.Request) {
  2212  	Redirect(w, r, rh.url, rh.code)
  2213  }
  2214  
  2215  // RedirectHandler returns a request handler that redirects
  2216  // each request it receives to the given url using the given
  2217  // status code.
  2218  //
  2219  // The provided code should be in the 3xx range and is usually
  2220  // [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
  2221  func RedirectHandler(url string, code int) Handler {
  2222  	return &redirectHandler{url, code}
  2223  }
  2224  
  2225  // ServeMux is an HTTP request multiplexer.
  2226  // It matches the URL of each incoming request against a list of registered
  2227  // patterns and calls the handler for the pattern that
  2228  // most closely matches the URL.
  2229  //
  2230  // # Patterns
  2231  //
  2232  // Patterns can match the method, host and path of a request.
  2233  // Some examples:
  2234  //
  2235  //   - "/index.html" matches the path "/index.html" for any host and method.
  2236  //   - "GET /static/" matches a GET request whose path begins with "/static/".
  2237  //   - "example.com/" matches any request to the host "example.com".
  2238  //   - "example.com/{$}" matches requests with host "example.com" and path "/".
  2239  //   - "/b/{bucket}/o/{objectname...}" matches paths whose first segment is "b"
  2240  //     and whose third segment is "o". The name "bucket" denotes the second
  2241  //     segment and "objectname" denotes the remainder of the path.
  2242  //
  2243  // In general, a pattern looks like
  2244  //
  2245  //	[METHOD ][HOST]/[PATH]
  2246  //
  2247  // All three parts are optional; "/" is a valid pattern.
  2248  // If METHOD is present, it must be followed by at least one space or tab.
  2249  //
  2250  // Literal (that is, non-wildcard) parts of a pattern match
  2251  // the corresponding parts of a request case-sensitively.
  2252  //
  2253  // A pattern with no method matches every method. A pattern
  2254  // with the method GET matches both GET and HEAD requests.
  2255  // Otherwise, the method must match exactly.
  2256  //
  2257  // A pattern with no host matches every host.
  2258  // A pattern with a host matches URLs on that host only.
  2259  //
  2260  // A path can include wildcard segments of the form {NAME} or {NAME...}.
  2261  // For example, "/b/{bucket}/o/{objectname...}".
  2262  // The wildcard name must be a valid Go identifier.
  2263  // Wildcards must be full path segments: they must be preceded by a slash and followed by
  2264  // either a slash or the end of the string.
  2265  // For example, "/b_{bucket}" is not a valid pattern.
  2266  //
  2267  // Normally a wildcard matches only a single path segment,
  2268  // ending at the next literal slash (not %2F) in the request URL.
  2269  // But if the "..." is present, then the wildcard matches the remainder of the URL path, including slashes.
  2270  // (Therefore it is invalid for a "..." wildcard to appear anywhere but at the end of a pattern.)
  2271  // The match for a wildcard can be obtained by calling [Request.PathValue] with the wildcard's name.
  2272  // A trailing slash in a path acts as an anonymous "..." wildcard.
  2273  //
  2274  // The special wildcard {$} matches only the end of the URL.
  2275  // For example, the pattern "/{$}" matches only the path "/",
  2276  // whereas the pattern "/" matches every path.
  2277  //
  2278  // For matching, both pattern paths and incoming request paths are unescaped segment by segment.
  2279  // So, for example, the path "/a%2Fb/100%25" is treated as having two segments, "a/b" and "100%".
  2280  // The pattern "/a%2fb/" matches it, but the pattern "/a/b/" does not.
  2281  //
  2282  // # Precedence
  2283  //
  2284  // If two or more patterns match a request, then the most specific pattern takes precedence.
  2285  // A pattern P1 is more specific than P2 if P1 matches a strict subset of P2’s requests;
  2286  // that is, if P2 matches all the requests of P1 and more.
  2287  // If neither is more specific, then the patterns conflict.
  2288  // There is one exception to this rule, for backwards compatibility:
  2289  // if two patterns would otherwise conflict and one has a host while the other does not,
  2290  // then the pattern with the host takes precedence.
  2291  // If a pattern passed to [ServeMux.Handle] or [ServeMux.HandleFunc] conflicts with
  2292  // another pattern that is already registered, those functions panic.
  2293  //
  2294  // As an example of the general rule, "/images/thumbnails/" is more specific than "/images/",
  2295  // so both can be registered.
  2296  // The former matches paths beginning with "/images/thumbnails/"
  2297  // and the latter will match any other path in the "/images/" subtree.
  2298  //
  2299  // As another example, consider the patterns "GET /" and "/index.html":
  2300  // both match a GET request for "/index.html", but the former pattern
  2301  // matches all other GET and HEAD requests, while the latter matches any
  2302  // request for "/index.html" that uses a different method.
  2303  // The patterns conflict.
  2304  //
  2305  // # Trailing-slash redirection
  2306  //
  2307  // Consider a [ServeMux] with a handler for a subtree, registered using a trailing slash or "..." wildcard.
  2308  // If the ServeMux receives a request for the subtree root without a trailing slash,
  2309  // it redirects the request by adding the trailing slash.
  2310  // This behavior can be overridden with a separate registration for the path without
  2311  // the trailing slash or "..." wildcard. For example, registering "/images/" causes ServeMux
  2312  // to redirect a request for "/images" to "/images/", unless "/images" has
  2313  // been registered separately.
  2314  //
  2315  // # Request sanitizing
  2316  //
  2317  // ServeMux also takes care of sanitizing the URL request path and the Host
  2318  // header, stripping the port number and redirecting any request containing . or
  2319  // .. segments or repeated slashes to an equivalent, cleaner URL.
  2320  //
  2321  // # Compatibility
  2322  //
  2323  // The pattern syntax and matching behavior of ServeMux changed significantly
  2324  // in Go 1.22. To restore the old behavior, set the GODEBUG environment variable
  2325  // to "httpmuxgo121=1". This setting is read once, at program startup; changes
  2326  // during execution will be ignored.
  2327  //
  2328  // The backwards-incompatible changes include:
  2329  //   - Wildcards are just ordinary literal path segments in 1.21.
  2330  //     For example, the pattern "/{x}" will match only that path in 1.21,
  2331  //     but will match any one-segment path in 1.22.
  2332  //   - In 1.21, no pattern was rejected, unless it was empty or conflicted with an existing pattern.
  2333  //     In 1.22, syntactically invalid patterns will cause [ServeMux.Handle] and [ServeMux.HandleFunc] to panic.
  2334  //     For example, in 1.21, the patterns "/{"  and "/a{x}" match themselves,
  2335  //     but in 1.22 they are invalid and will cause a panic when registered.
  2336  //   - In 1.22, each segment of a pattern is unescaped; this was not done in 1.21.
  2337  //     For example, in 1.22 the pattern "/%61" matches the path "/a" ("%61" being the URL escape sequence for "a"),
  2338  //     but in 1.21 it would match only the path "/%2561" (where "%25" is the escape for the percent sign).
  2339  //   - When matching patterns to paths, in 1.22 each segment of the path is unescaped; in 1.21, the entire path is unescaped.
  2340  //     This change mostly affects how paths with %2F escapes adjacent to slashes are treated.
  2341  //     See https://go.dev/issue/21955 for details.
  2342  type ServeMux struct {
  2343  	mu       sync.RWMutex
  2344  	tree     routingNode
  2345  	index    routingIndex
  2346  	patterns []*pattern // TODO(jba): remove if possible
  2347  	//mux121   serveMux121 // used only when GODEBUG=httpmuxgo121=1
  2348  }
  2349  
  2350  // NewServeMux allocates and returns a new [ServeMux].
  2351  func NewServeMux() *ServeMux {
  2352  	return &ServeMux{}
  2353  }
  2354  
  2355  // DefaultServeMux is the default [ServeMux] used by [Serve].
  2356  var DefaultServeMux = &defaultServeMux
  2357  
  2358  var defaultServeMux ServeMux
  2359  
  2360  // cleanPath returns the canonical path for p, eliminating . and .. elements.
  2361  func cleanPath(p string) string {
  2362  	if p == "" {
  2363  		return "/"
  2364  	}
  2365  	if p[0] != '/' {
  2366  		p = "/" + p
  2367  	}
  2368  	np := path.Clean(p)
  2369  	// path.Clean removes trailing slash except for root;
  2370  	// put the trailing slash back if necessary.
  2371  	if p[len(p)-1] == '/' && np != "/" {
  2372  		// Fast path for common case of p being the string we want:
  2373  		if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
  2374  			np = p
  2375  		} else {
  2376  			np += "/"
  2377  		}
  2378  	}
  2379  	return np
  2380  }
  2381  
  2382  // stripHostPort returns h without any trailing ":<port>".
  2383  func stripHostPort(h string) string {
  2384  	// If no port on host, return unchanged
  2385  	if !strings.Contains(h, ":") {
  2386  		return h
  2387  	}
  2388  	host, _, err := net.SplitHostPort(h)
  2389  	if err != nil {
  2390  		return h // on error, return unchanged
  2391  	}
  2392  	return host
  2393  }
  2394  
  2395  // Handler returns the handler to use for the given request,
  2396  // consulting r.Method, r.Host, and r.URL.Path. It always returns
  2397  // a non-nil handler. If the path is not in its canonical form, the
  2398  // handler will be an internally-generated handler that redirects
  2399  // to the canonical path. If the host contains a port, it is ignored
  2400  // when matching handlers.
  2401  //
  2402  // The path and host are used unchanged for CONNECT requests.
  2403  //
  2404  // Handler also returns the registered pattern that matches the
  2405  // request or, in the case of internally-generated redirects,
  2406  // the path that will match after following the redirect.
  2407  //
  2408  // If there is no registered handler that applies to the request,
  2409  // Handler returns a “page not found” handler and an empty pattern.
  2410  func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
  2411  	//if use121 {
  2412  	//	return mux.mux121.findHandler(r)
  2413  	//}
  2414  	h, p, _, _ := mux.findHandler(r)
  2415  	return h, p
  2416  }
  2417  
  2418  // findHandler finds a handler for a request.
  2419  // If there is a matching handler, it returns it and the pattern that matched.
  2420  // Otherwise it returns a Redirect or NotFound handler with the path that would match
  2421  // after the redirect.
  2422  func (mux *ServeMux) findHandler(r *Request) (h Handler, patStr string, _ *pattern, matches []string) {
  2423  	var n *routingNode
  2424  	host := r.URL.Host
  2425  	escapedPath := r.URL.EscapedPath()
  2426  	path := escapedPath
  2427  	// CONNECT requests are not canonicalized.
  2428  	if r.Method == "CONNECT" {
  2429  		// If r.URL.Path is /tree and its handler is not registered,
  2430  		// the /tree -> /tree/ redirect applies to CONNECT requests
  2431  		// but the path canonicalization does not.
  2432  		_, _, u := mux.matchOrRedirect(host, r.Method, path, r.URL)
  2433  		if u != nil {
  2434  			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
  2435  		}
  2436  		// Redo the match, this time with r.Host instead of r.URL.Host.
  2437  		// Pass a nil URL to skip the trailing-slash redirect logic.
  2438  		n, matches, _ = mux.matchOrRedirect(r.Host, r.Method, path, nil)
  2439  	} else {
  2440  		// All other requests have any port stripped and path cleaned
  2441  		// before passing to mux.handler.
  2442  		host = stripHostPort(r.Host)
  2443  		path = cleanPath(path)
  2444  
  2445  		// If the given path is /tree and its handler is not registered,
  2446  		// redirect for /tree/.
  2447  		var u *url.URL
  2448  		n, matches, u = mux.matchOrRedirect(host, r.Method, path, r.URL)
  2449  		if u != nil {
  2450  			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
  2451  		}
  2452  		if path != escapedPath {
  2453  			// Redirect to cleaned path.
  2454  			patStr := ""
  2455  			if n != nil {
  2456  				patStr = n.pattern.String()
  2457  			}
  2458  			u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
  2459  			return RedirectHandler(u.String(), StatusMovedPermanently), patStr, nil, nil
  2460  		}
  2461  	}
  2462  	if n == nil {
  2463  		// We didn't find a match with the request method. To distinguish between
  2464  		// Not Found and Method Not Allowed, see if there is another pattern that
  2465  		// matches except for the method.
  2466  		allowedMethods := mux.matchingMethods(host, path)
  2467  		if len(allowedMethods) > 0 {
  2468  			return HandlerFunc(func(w ResponseWriter, r *http.Request) {
  2469  				w.Header().Set("Allow", strings.Join(allowedMethods, ", "))
  2470  				Error(w, StatusText(StatusMethodNotAllowed), StatusMethodNotAllowed)
  2471  			}), "", nil, nil
  2472  		}
  2473  		return NotFoundHandler(), "", nil, nil
  2474  	}
  2475  	return n.handler, n.pattern.String(), n.pattern, matches
  2476  }
  2477  
  2478  // matchOrRedirect looks up a node in the tree that matches the host, method and path.
  2479  //
  2480  // If the url argument is non-nil, handler also deals with trailing-slash
  2481  // redirection: when a path doesn't match exactly, the match is tried again
  2482  // after appending "/" to the path. If that second match succeeds, the last
  2483  // return value is the URL to redirect to.
  2484  func (mux *ServeMux) matchOrRedirect(host, method, path string, u *url.URL) (_ *routingNode, matches []string, redirectTo *url.URL) {
  2485  	mux.mu.RLock()
  2486  	defer mux.mu.RUnlock()
  2487  
  2488  	n, matches := mux.tree.match(host, method, path)
  2489  	// If we have an exact match, or we were asked not to try trailing-slash redirection,
  2490  	// or the URL already has a trailing slash, then we're done.
  2491  	if !exactMatch(n, path) && u != nil && !strings.HasSuffix(path, "/") {
  2492  		// If there is an exact match with a trailing slash, then redirect.
  2493  		path += "/"
  2494  		n2, _ := mux.tree.match(host, method, path)
  2495  		if exactMatch(n2, path) {
  2496  			return nil, nil, &url.URL{Path: cleanPath(u.Path) + "/", RawQuery: u.RawQuery}
  2497  		}
  2498  	}
  2499  	return n, matches, nil
  2500  }
  2501  
  2502  // exactMatch reports whether the node's pattern exactly matches the path.
  2503  // As a special case, if the node is nil, exactMatch return false.
  2504  //
  2505  // Before wildcards were introduced, it was clear that an exact match meant
  2506  // that the pattern and path were the same string. The only other possibility
  2507  // was that a trailing-slash pattern, like "/", matched a path longer than
  2508  // it, like "/a".
  2509  //
  2510  // With wildcards, we define an inexact match as any one where a multi wildcard
  2511  // matches a non-empty string. All other matches are exact.
  2512  // For example, these are all exact matches:
  2513  //
  2514  //	pattern   path
  2515  //	/a        /a
  2516  //	/{x}      /a
  2517  //	/a/{$}    /a/
  2518  //	/a/       /a/
  2519  //
  2520  // The last case has a multi wildcard (implicitly), but the match is exact because
  2521  // the wildcard matches the empty string.
  2522  //
  2523  // Examples of matches that are not exact:
  2524  //
  2525  //	pattern   path
  2526  //	/         /a
  2527  //	/a/{x...} /a/b
  2528  func exactMatch(n *routingNode, path string) bool {
  2529  	if n == nil {
  2530  		return false
  2531  	}
  2532  	// We can't directly implement the definition (empty match for multi
  2533  	// wildcard) because we don't record a match for anonymous multis.
  2534  
  2535  	// If there is no multi, the match is exact.
  2536  	if !n.pattern.lastSegment().multi {
  2537  		return true
  2538  	}
  2539  
  2540  	// If the path doesn't end in a trailing slash, then the multi match
  2541  	// is non-empty.
  2542  	if len(path) > 0 && path[len(path)-1] != '/' {
  2543  		return false
  2544  	}
  2545  	// Only patterns ending in {$} or a multi wildcard can
  2546  	// match a path with a trailing slash.
  2547  	// For the match to be exact, the number of pattern
  2548  	// segments should be the same as the number of slashes in the path.
  2549  	// E.g. "/a/b/{$}" and "/a/b/{...}" exactly match "/a/b/", but "/a/" does not.
  2550  	return len(n.pattern.segments) == strings.Count(path, "/")
  2551  }
  2552  
  2553  // matchingMethods return a sorted list of all methods that would match with the given host and path.
  2554  func (mux *ServeMux) matchingMethods(host, path string) []string {
  2555  	// Hold the read lock for the entire method so that the two matches are done
  2556  	// on the same set of registered patterns.
  2557  	mux.mu.RLock()
  2558  	defer mux.mu.RUnlock()
  2559  	ms := map[string]bool{}
  2560  	mux.tree.matchingMethods(host, path, ms)
  2561  	// matchOrRedirect will try appending a trailing slash if there is no match.
  2562  	mux.tree.matchingMethods(host, path+"/", ms)
  2563  	methods := mapKeys(ms)
  2564  	slices.Sort(methods)
  2565  	return methods
  2566  }
  2567  
  2568  // TODO(jba): replace with maps.Keys when it is defined.
  2569  func mapKeys[K comparable, V any](m map[K]V) []K {
  2570  	var ks []K
  2571  	for k := range m {
  2572  		ks = append(ks, k)
  2573  	}
  2574  	return ks
  2575  }
  2576  
  2577  // ServeHTTP dispatches the request to the handler whose
  2578  // pattern most closely matches the request URL.
  2579  func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
  2580  	if r.RequestURI == "*" {
  2581  		if r.ProtoAtLeast(1, 1) {
  2582  			w.Header().Set("Connection", "close")
  2583  		}
  2584  		w.WriteHeader(StatusBadRequest)
  2585  		return
  2586  	}
  2587  	var h Handler
  2588  	//if use121 {
  2589  	//	h, _ = mux.mux121.findHandler(r)
  2590  	//} else {
  2591  	h, _, r.pat, r.matches = mux.findHandler(r)
  2592  	//}
  2593  	h.ServeHTTP(w, r.toHttp())
  2594  }
  2595  
  2596  // The four functions below all call ServeMux.register so that callerLocation
  2597  // always refers to user code.
  2598  
  2599  // Handle registers the handler for the given pattern.
  2600  // If the given pattern conflicts, with one that is already registered, Handle
  2601  // panics.
  2602  func (mux *ServeMux) Handle(pattern string, handler Handler) {
  2603  	//if use121 {
  2604  	//	mux.mux121.handle(pattern, handler)
  2605  	//} else {
  2606  	mux.register(pattern, handler)
  2607  	//}
  2608  }
  2609  
  2610  // HandleFunc registers the handler function for the given pattern.
  2611  // If the given pattern conflicts, with one that is already registered, HandleFunc
  2612  // panics.
  2613  func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *http.Request)) {
  2614  	//if use121 {
  2615  	//	mux.mux121.handleFunc(pattern, handler)
  2616  	//} else {
  2617  	mux.register(pattern, HandlerFunc(handler))
  2618  	//}
  2619  }
  2620  
  2621  // Handle registers the handler for the given pattern in [DefaultServeMux].
  2622  // The documentation for [ServeMux] explains how patterns are matched.
  2623  func Handle(pattern string, handler Handler) {
  2624  	//if use121 {
  2625  	//	DefaultServeMux.mux121.handle(pattern, handler)
  2626  	//} else {
  2627  	DefaultServeMux.register(pattern, handler)
  2628  	//}
  2629  }
  2630  
  2631  // HandleFunc registers the handler function for the given pattern in [DefaultServeMux].
  2632  // The documentation for [ServeMux] explains how patterns are matched.
  2633  func HandleFunc(pattern string, handler func(ResponseWriter, *http.Request)) {
  2634  	//if use121 {
  2635  	//	DefaultServeMux.mux121.handleFunc(pattern, handler)
  2636  	//} else {
  2637  	DefaultServeMux.register(pattern, HandlerFunc(handler))
  2638  	//}
  2639  }
  2640  
  2641  func (mux *ServeMux) register(pattern string, handler Handler) {
  2642  	if err := mux.registerErr(pattern, handler); err != nil {
  2643  		panic(err)
  2644  	}
  2645  }
  2646  
  2647  func (mux *ServeMux) registerErr(patstr string, handler Handler) error {
  2648  	if patstr == "" {
  2649  		return errors.New("http: invalid pattern")
  2650  	}
  2651  	if handler == nil {
  2652  		return errors.New("http: nil handler")
  2653  	}
  2654  	if f, ok := handler.(HandlerFunc); ok && f == nil {
  2655  		return errors.New("http: nil handler")
  2656  	}
  2657  
  2658  	pat, err := parsePattern(patstr)
  2659  	if err != nil {
  2660  		return fmt.Errorf("parsing %q: %w", patstr, err)
  2661  	}
  2662  
  2663  	// Get the caller's location, for better conflict error messages.
  2664  	// Skip register and whatever calls it.
  2665  	_, file, line, ok := runtime.Caller(3)
  2666  	if !ok {
  2667  		pat.loc = "unknown location"
  2668  	} else {
  2669  		pat.loc = fmt.Sprintf("%s:%d", file, line)
  2670  	}
  2671  
  2672  	mux.mu.Lock()
  2673  	defer mux.mu.Unlock()
  2674  	// Check for conflict.
  2675  	if err := mux.index.possiblyConflictingPatterns(pat, func(pat2 *pattern) error {
  2676  		if pat.conflictsWith(pat2) {
  2677  			d := describeConflict(pat, pat2)
  2678  			return fmt.Errorf("pattern %q (registered at %s) conflicts with pattern %q (registered at %s):\n%s",
  2679  				pat, pat.loc, pat2, pat2.loc, d)
  2680  		}
  2681  		return nil
  2682  	}); err != nil {
  2683  		return err
  2684  	}
  2685  	mux.tree.addPattern(pat, handler)
  2686  	mux.index.addPattern(pat)
  2687  	mux.patterns = append(mux.patterns, pat)
  2688  	return nil
  2689  }
  2690  
  2691  // Serve accepts incoming HTTP connections on the listener l,
  2692  // creating a new service goroutine for each. The service goroutines
  2693  // read requests and then call handler to reply to them.
  2694  //
  2695  // The handler is typically nil, in which case [DefaultServeMux] is used.
  2696  //
  2697  // HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
  2698  // connections and they were configured with "h2" in the TLS
  2699  // Config.NextProtos.
  2700  //
  2701  // Serve always returns a non-nil error.
  2702  func Serve(l net.Listener, handler Handler) error {
  2703  	srv := &Server{Handler: handler}
  2704  	return srv.Serve(l)
  2705  }
  2706  
  2707  // ServeTLS accepts incoming HTTPS connections on the listener l,
  2708  // creating a new service goroutine for each. The service goroutines
  2709  // read requests and then call handler to reply to them.
  2710  //
  2711  // The handler is typically nil, in which case [DefaultServeMux] is used.
  2712  //
  2713  // Additionally, files containing a certificate and matching private key
  2714  // for the server must be provided. If the certificate is signed by a
  2715  // certificate authority, the certFile should be the concatenation
  2716  // of the server's certificate, any intermediates, and the CA's certificate.
  2717  //
  2718  // ServeTLS always returns a non-nil error.
  2719  func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
  2720  	srv := &Server{Handler: handler}
  2721  	return srv.ServeTLS(l, certFile, keyFile)
  2722  }
  2723  
  2724  // A Server defines parameters for running an HTTP server.
  2725  // The zero value for Server is a valid configuration.
  2726  type Server struct {
  2727  	// Addr optionally specifies the TCP address for the server to listen on,
  2728  	// in the form "host:port". If empty, ":http" (port 80) is used.
  2729  	// The service names are defined in RFC 6335 and assigned by IANA.
  2730  	// See net.Dial for details of the address format.
  2731  	Addr string
  2732  	// https://datatracker.ietf.org/doc/draft-ietf-webtrans-http2/08/
  2733  	WebTransportMaxSessions uint32
  2734  
  2735  	Handler Handler // handler to invoke, http.DefaultServeMux if nil
  2736  
  2737  	// DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the Handler,
  2738  	// otherwise responds with 200 OK and Content-Length: 0.
  2739  	DisableGeneralOptionsHandler bool
  2740  
  2741  	// TLSConfig optionally provides a TLS configuration for use
  2742  	// by ServeTLS and ListenAndServeTLS. Note that this value is
  2743  	// cloned by ServeTLS and ListenAndServeTLS, so it's not
  2744  	// possible to modify the configuration with methods like
  2745  	// tls.Config.SetSessionTicketKeys. To use
  2746  	// SetSessionTicketKeys, use Server.Serve with a TLS Listener
  2747  	// instead.
  2748  	TLSConfig *tls.Config
  2749  
  2750  	// ReadTimeout is the maximum duration for reading the entire
  2751  	// request, including the body. A zero or negative value means
  2752  	// there will be no timeout.
  2753  	//
  2754  	// Because ReadTimeout does not let Handlers make per-request
  2755  	// decisions on each request body's acceptable deadline or
  2756  	// upload rate, most users will prefer to use
  2757  	// ReadHeaderTimeout. It is valid to use them both.
  2758  	ReadTimeout time.Duration
  2759  
  2760  	// ReadHeaderTimeout is the amount of time allowed to read
  2761  	// request headers. The connection's read deadline is reset
  2762  	// after reading the headers and the Handler can decide what
  2763  	// is considered too slow for the body. If zero, the value of
  2764  	// ReadTimeout is used. If negative, or if zero and ReadTimeout
  2765  	// is zero or negative, there is no timeout.
  2766  	ReadHeaderTimeout time.Duration
  2767  
  2768  	// WriteTimeout is the maximum duration before timing out
  2769  	// writes of the response. It is reset whenever a new
  2770  	// request's header is read. Like ReadTimeout, it does not
  2771  	// let Handlers make decisions on a per-request basis.
  2772  	// A zero or negative value means there will be no timeout.
  2773  	WriteTimeout time.Duration
  2774  
  2775  	// IdleTimeout is the maximum amount of time to wait for the
  2776  	// next request when keep-alives are enabled. If zero, the value
  2777  	// of ReadTimeout is used. If negative, or if zero and ReadTimeout
  2778  	// is zero or negative, there is no timeout.
  2779  	IdleTimeout time.Duration
  2780  
  2781  	// MaxHeaderBytes controls the maximum number of bytes the
  2782  	// server will read parsing the request header's keys and
  2783  	// values, including the request line. It does not limit the
  2784  	// size of the request body.
  2785  	// If zero, DefaultMaxHeaderBytes is used.
  2786  	MaxHeaderBytes int
  2787  
  2788  	// TLSNextProto optionally specifies a function to take over
  2789  	// ownership of the provided TLS connection when an ALPN
  2790  	// protocol upgrade has occurred. The map key is the protocol
  2791  	// name negotiated. The Handler argument should be used to
  2792  	// handle HTTP requests and will initialize the Request's TLS
  2793  	// and RemoteAddr if not already set. The connection is
  2794  	// automatically closed when the function returns.
  2795  	// If TLSNextProto is not nil, HTTP/2 support is not enabled
  2796  	// automatically.
  2797  	TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
  2798  
  2799  	// ConnState specifies an optional callback function that is
  2800  	// called when a client connection changes state. See the
  2801  	// ConnState type and associated constants for details.
  2802  	ConnState func(net.Conn, ConnState)
  2803  
  2804  	// ErrorLog specifies an optional logger for errors accepting
  2805  	// connections, unexpected behavior from handlers, and
  2806  	// underlying FileSystem errors.
  2807  	// If nil, logging is done via the log package's standard logger.
  2808  	ErrorLog *log.Logger
  2809  
  2810  	// BaseContext optionally specifies a function that returns
  2811  	// the base context for incoming requests on this server.
  2812  	// The provided Listener is the specific Listener that's
  2813  	// about to start accepting requests.
  2814  	// If BaseContext is nil, the default is context.Background().
  2815  	// If non-nil, it must return a non-nil context.
  2816  	BaseContext func(net.Listener) context.Context
  2817  
  2818  	// ConnContext optionally specifies a function that modifies
  2819  	// the context used for a new connection c. The provided ctx
  2820  	// is derived from the base context and has a ServerContextKey
  2821  	// value.
  2822  	ConnContext func(ctx context.Context, c net.Conn) context.Context
  2823  
  2824  	inShutdown atomic.Bool // true when server is in shutdown
  2825  
  2826  	disableKeepAlives atomic.Bool
  2827  	nextProtoOnce     sync.Once // guards setupHTTP2_* init
  2828  	nextProtoErr      error     // result of http2.ConfigureServer if used
  2829  
  2830  	mu         sync.Mutex
  2831  	listeners  map[*net.Listener]struct{}
  2832  	activeConn map[*conn]struct{}
  2833  	onShutdown []func()
  2834  
  2835  	listenerGroup sync.WaitGroup
  2836  }
  2837  
  2838  // Close immediately closes all active net.Listeners and any
  2839  // connections in state [StateNew], [StateActive], or [StateIdle]. For a
  2840  // graceful shutdown, use [Server.Shutdown].
  2841  //
  2842  // Close does not attempt to close (and does not even know about)
  2843  // any hijacked connections, such as WebSockets.
  2844  //
  2845  // Close returns any error returned from closing the [Server]'s
  2846  // underlying Listener(s).
  2847  func (srv *Server) Close() error {
  2848  	srv.inShutdown.Store(true)
  2849  	srv.mu.Lock()
  2850  	defer srv.mu.Unlock()
  2851  	err := srv.closeListenersLocked()
  2852  
  2853  	// Unlock srv.mu while waiting for listenerGroup.
  2854  	// The group Add and Done calls are made with srv.mu held,
  2855  	// to avoid adding a new listener in the window between
  2856  	// us setting inShutdown above and waiting here.
  2857  	srv.mu.Unlock()
  2858  	srv.listenerGroup.Wait()
  2859  	srv.mu.Lock()
  2860  
  2861  	for c := range srv.activeConn {
  2862  		c.rwc.Close()
  2863  		delete(srv.activeConn, c)
  2864  	}
  2865  	return err
  2866  }
  2867  
  2868  // shutdownPollIntervalMax is the max polling interval when checking
  2869  // quiescence during Server.Shutdown. Polling starts with a small
  2870  // interval and backs off to the max.
  2871  // Ideally we could find a solution that doesn't involve polling,
  2872  // but which also doesn't have a high runtime cost (and doesn't
  2873  // involve any contentious mutexes), but that is left as an
  2874  // exercise for the reader.
  2875  const shutdownPollIntervalMax = 500 * time.Millisecond
  2876  
  2877  // Shutdown gracefully shuts down the server without interrupting any
  2878  // active connections. Shutdown works by first closing all open
  2879  // listeners, then closing all idle connections, and then waiting
  2880  // indefinitely for connections to return to idle and then shut down.
  2881  // If the provided context expires before the shutdown is complete,
  2882  // Shutdown returns the context's error, otherwise it returns any
  2883  // error returned from closing the [Server]'s underlying Listener(s).
  2884  //
  2885  // When Shutdown is called, [Serve], [ListenAndServe], and
  2886  // [ListenAndServeTLS] immediately return [ErrServerClosed]. Make sure the
  2887  // program doesn't exit and waits instead for Shutdown to return.
  2888  //
  2889  // Shutdown does not attempt to close nor wait for hijacked
  2890  // connections such as WebSockets. The caller of Shutdown should
  2891  // separately notify such long-lived connections of shutdown and wait
  2892  // for them to close, if desired. See [Server.RegisterOnShutdown] for a way to
  2893  // register shutdown notification functions.
  2894  //
  2895  // Once Shutdown has been called on a server, it may not be reused;
  2896  // future calls to methods such as Serve will return ErrServerClosed.
  2897  func (srv *Server) Shutdown(ctx context.Context) error {
  2898  	srv.inShutdown.Store(true)
  2899  
  2900  	srv.mu.Lock()
  2901  	lnerr := srv.closeListenersLocked()
  2902  	for _, f := range srv.onShutdown {
  2903  		go f()
  2904  	}
  2905  	srv.mu.Unlock()
  2906  	srv.listenerGroup.Wait()
  2907  
  2908  	pollIntervalBase := time.Millisecond
  2909  	nextPollInterval := func() time.Duration {
  2910  		// Add 10% jitter.
  2911  		interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
  2912  		// Double and clamp for next time.
  2913  		pollIntervalBase *= 2
  2914  		if pollIntervalBase > shutdownPollIntervalMax {
  2915  			pollIntervalBase = shutdownPollIntervalMax
  2916  		}
  2917  		return interval
  2918  	}
  2919  
  2920  	timer := time.NewTimer(nextPollInterval())
  2921  	defer timer.Stop()
  2922  	for {
  2923  		if srv.closeIdleConns() {
  2924  			return lnerr
  2925  		}
  2926  		select {
  2927  		case <-ctx.Done():
  2928  			return ctx.Err()
  2929  		case <-timer.C:
  2930  			timer.Reset(nextPollInterval())
  2931  		}
  2932  	}
  2933  }
  2934  
  2935  // RegisterOnShutdown registers a function to call on [Server.Shutdown].
  2936  // This can be used to gracefully shutdown connections that have
  2937  // undergone ALPN protocol upgrade or that have been hijacked.
  2938  // This function should start protocol-specific graceful shutdown,
  2939  // but should not wait for shutdown to complete.
  2940  func (srv *Server) RegisterOnShutdown(f func()) {
  2941  	srv.mu.Lock()
  2942  	srv.onShutdown = append(srv.onShutdown, f)
  2943  	srv.mu.Unlock()
  2944  }
  2945  
  2946  // closeIdleConns closes all idle connections and reports whether the
  2947  // server is quiescent.
  2948  func (s *Server) closeIdleConns() bool {
  2949  	s.mu.Lock()
  2950  	defer s.mu.Unlock()
  2951  	quiescent := true
  2952  	for c := range s.activeConn {
  2953  		st, unixSec := c.getState()
  2954  		// Issue 22682: treat StateNew connections as if
  2955  		// they're idle if we haven't read the first request's
  2956  		// header in over 5 seconds.
  2957  		if st == StateNew && unixSec < time.Now().Unix()-5 {
  2958  			st = StateIdle
  2959  		}
  2960  		if st != StateIdle || unixSec == 0 {
  2961  			// Assume unixSec == 0 means it's a very new
  2962  			// connection, without state set yet.
  2963  			quiescent = false
  2964  			continue
  2965  		}
  2966  		c.rwc.Close()
  2967  		delete(s.activeConn, c)
  2968  	}
  2969  	return quiescent
  2970  }
  2971  
  2972  func (s *Server) closeListenersLocked() error {
  2973  	var err error
  2974  	for ln := range s.listeners {
  2975  		if cerr := (*ln).Close(); cerr != nil && err == nil {
  2976  			err = cerr
  2977  		}
  2978  	}
  2979  	return err
  2980  }
  2981  
  2982  // A ConnState represents the state of a client connection to a server.
  2983  // It's used by the optional [Server.ConnState] hook.
  2984  type ConnState int
  2985  
  2986  const (
  2987  	// StateNew represents a new connection that is expected to
  2988  	// send a request immediately. Connections begin at this
  2989  	// state and then transition to either StateActive or
  2990  	// StateClosed.
  2991  	StateNew ConnState = iota
  2992  
  2993  	// StateActive represents a connection that has read 1 or more
  2994  	// bytes of a request. The Server.ConnState hook for
  2995  	// StateActive fires before the request has entered a handler
  2996  	// and doesn't fire again until the request has been
  2997  	// handled. After the request is handled, the state
  2998  	// transitions to StateClosed, StateHijacked, or StateIdle.
  2999  	// For HTTP/2, StateActive fires on the transition from zero
  3000  	// to one active request, and only transitions away once all
  3001  	// active requests are complete. That means that ConnState
  3002  	// cannot be used to do per-request work; ConnState only notes
  3003  	// the overall state of the connection.
  3004  	StateActive
  3005  
  3006  	// StateIdle represents a connection that has finished
  3007  	// handling a request and is in the keep-alive state, waiting
  3008  	// for a new request. Connections transition from StateIdle
  3009  	// to either StateActive or StateClosed.
  3010  	StateIdle
  3011  
  3012  	// StateHijacked represents a hijacked connection.
  3013  	// This is a terminal state. It does not transition to StateClosed.
  3014  	StateHijacked
  3015  
  3016  	// StateClosed represents a closed connection.
  3017  	// This is a terminal state. Hijacked connections do not
  3018  	// transition to StateClosed.
  3019  	StateClosed
  3020  )
  3021  
  3022  var stateName = map[ConnState]string{
  3023  	StateNew:      "new",
  3024  	StateActive:   "active",
  3025  	StateIdle:     "idle",
  3026  	StateHijacked: "hijacked",
  3027  	StateClosed:   "closed",
  3028  }
  3029  
  3030  func (c ConnState) String() string {
  3031  	return stateName[c]
  3032  }
  3033  
  3034  // serverHandler delegates to either the server's Handler or
  3035  // DefaultServeMux and also handles "OPTIONS *" requests.
  3036  type serverHandler struct {
  3037  	srv *Server
  3038  }
  3039  
  3040  func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *http.Request) {
  3041  	handler := sh.srv.Handler
  3042  	if handler == nil {
  3043  		handler = http.DefaultServeMux
  3044  	}
  3045  	if !sh.srv.DisableGeneralOptionsHandler && req.RequestURI == "*" && req.Method == "OPTIONS" {
  3046  		handler = globalOptionsHandler{}
  3047  	}
  3048  
  3049  	handler.ServeHTTP(rw, req)
  3050  }
  3051  
  3052  // AllowQuerySemicolons returns a handler that serves requests by converting any
  3053  // unescaped semicolons in the URL query to ampersands, and invoking the handler h.
  3054  //
  3055  // This restores the pre-Go 1.17 behavior of splitting query parameters on both
  3056  // semicolons and ampersands. (See golang.org/issue/25192). Note that this
  3057  // behavior doesn't match that of many proxies, and the mismatch can lead to
  3058  // security issues.
  3059  //
  3060  // AllowQuerySemicolons should be invoked before [Request.ParseForm] is called.
  3061  func AllowQuerySemicolons(h Handler) Handler {
  3062  	return HandlerFunc(func(w ResponseWriter, r *http.Request) {
  3063  		if strings.Contains(r.URL.RawQuery, ";") {
  3064  			r2 := new(http.Request)
  3065  			*r2 = *r
  3066  			r2.URL = new(url.URL)
  3067  			*r2.URL = *r.URL
  3068  			r2.URL.RawQuery = strings.ReplaceAll(r.URL.RawQuery, ";", "&")
  3069  			h.ServeHTTP(w, r2)
  3070  		} else {
  3071  			h.ServeHTTP(w, r)
  3072  		}
  3073  	})
  3074  }
  3075  
  3076  // ListenAndServe listens on the TCP network address srv.Addr and then
  3077  // calls [Serve] to handle requests on incoming connections.
  3078  // Accepted connections are configured to enable TCP keep-alives.
  3079  //
  3080  // If srv.Addr is blank, ":http" is used.
  3081  //
  3082  // ListenAndServe always returns a non-nil error. After [Server.Shutdown] or [Server.Close],
  3083  // the returned error is [ErrServerClosed].
  3084  func (srv *Server) ListenAndServe() error {
  3085  	if srv.shuttingDown() {
  3086  		return ErrServerClosed
  3087  	}
  3088  	addr := srv.Addr
  3089  	if addr == "" {
  3090  		addr = ":http"
  3091  	}
  3092  	ln, err := net.Listen("tcp", addr)
  3093  	if err != nil {
  3094  		return err
  3095  	}
  3096  	return srv.Serve(ln)
  3097  }
  3098  
  3099  var testHookServerServe func(*Server, net.Listener) // used if non-nil
  3100  
  3101  // shouldConfigureHTTP2ForServe reports whether Server.Serve should configure
  3102  // automatic HTTP/2. (which sets up the srv.TLSNextProto map)
  3103  func (srv *Server) shouldConfigureHTTP2ForServe() bool {
  3104  	if srv.TLSConfig == nil {
  3105  		// Compatibility with Go 1.6:
  3106  		// If there's no TLSConfig, it's possible that the user just
  3107  		// didn't set it on the http.Server, but did pass it to
  3108  		// tls.NewListener and passed that listener to Serve.
  3109  		// So we should configure HTTP/2 (to set up srv.TLSNextProto)
  3110  		// in case the listener returns an "h2" *tls.Conn.
  3111  		return true
  3112  	}
  3113  	// The user specified a TLSConfig on their http.Server.
  3114  	// In this, case, only configure HTTP/2 if their tls.Config
  3115  	// explicitly mentions "h2". Otherwise http2.ConfigureServer
  3116  	// would modify the tls.Config to add it, but they probably already
  3117  	// passed this tls.Config to tls.NewListener. And if they did,
  3118  	// it's too late anyway to fix it. It would only be potentially racy.
  3119  	// See Issue 15908.
  3120  	return slices.Contains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
  3121  }
  3122  
  3123  // ErrServerClosed is returned by the [Server.Serve], [ServeTLS], [ListenAndServe],
  3124  // and [ListenAndServeTLS] methods after a call to [Server.Shutdown] or [Server.Close].
  3125  var ErrServerClosed = errors.New("http: Server closed")
  3126  
  3127  // Serve accepts incoming connections on the Listener l, creating a
  3128  // new service goroutine for each. The service goroutines read requests and
  3129  // then call srv.Handler to reply to them.
  3130  //
  3131  // HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
  3132  // connections and they were configured with "h2" in the TLS
  3133  // Config.NextProtos.
  3134  //
  3135  // Serve always returns a non-nil error and closes l.
  3136  // After [Server.Shutdown] or [Server.Close], the returned error is [ErrServerClosed].
  3137  func (srv *Server) Serve(l net.Listener) error {
  3138  	if fn := testHookServerServe; fn != nil {
  3139  		fn(srv, l) // call hook with unwrapped listener
  3140  	}
  3141  
  3142  	origListener := l
  3143  	l = &onceCloseListener{Listener: l}
  3144  	defer l.Close()
  3145  
  3146  	if err := srv.setupHTTP2_Serve(); err != nil {
  3147  		return err
  3148  	}
  3149  
  3150  	if !srv.trackListener(&l, true) {
  3151  		return ErrServerClosed
  3152  	}
  3153  	defer srv.trackListener(&l, false)
  3154  
  3155  	baseCtx := context.Background()
  3156  	if srv.BaseContext != nil {
  3157  		baseCtx = srv.BaseContext(origListener)
  3158  		if baseCtx == nil {
  3159  			panic("BaseContext returned a nil context")
  3160  		}
  3161  	}
  3162  
  3163  	var tempDelay time.Duration // how long to sleep on accept failure
  3164  
  3165  	ctx := context.WithValue(baseCtx, ServerContextKey, srv)
  3166  	for {
  3167  		rw, err := l.Accept()
  3168  		if err != nil {
  3169  			if srv.shuttingDown() {
  3170  				return ErrServerClosed
  3171  			}
  3172  			if ne, ok := err.(net.Error); ok && ne.Temporary() {
  3173  				if tempDelay == 0 {
  3174  					tempDelay = 5 * time.Millisecond
  3175  				} else {
  3176  					tempDelay *= 2
  3177  				}
  3178  				if max := 1 * time.Second; tempDelay > max {
  3179  					tempDelay = max
  3180  				}
  3181  				srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
  3182  				time.Sleep(tempDelay)
  3183  				continue
  3184  			}
  3185  			return err
  3186  		}
  3187  		connCtx := ctx
  3188  		if cc := srv.ConnContext; cc != nil {
  3189  			connCtx = cc(connCtx, rw)
  3190  			if connCtx == nil {
  3191  				panic("ConnContext returned nil")
  3192  			}
  3193  		}
  3194  		tempDelay = 0
  3195  		c := srv.newConn(rw)
  3196  		c.setState(c.rwc, StateNew, runHooks) // before Serve can return
  3197  		go c.serve(connCtx)
  3198  	}
  3199  }
  3200  
  3201  // ServeTLS accepts incoming connections on the Listener l, creating a
  3202  // new service goroutine for each. The service goroutines perform TLS
  3203  // setup and then read requests, calling srv.Handler to reply to them.
  3204  //
  3205  // Files containing a certificate and matching private key for the
  3206  // server must be provided if neither the [Server]'s
  3207  // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
  3208  // If the certificate is signed by a certificate authority, the
  3209  // certFile should be the concatenation of the server's certificate,
  3210  // any intermediates, and the CA's certificate.
  3211  //
  3212  // ServeTLS always returns a non-nil error. After [Server.Shutdown] or [Server.Close], the
  3213  // returned error is [ErrServerClosed].
  3214  func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
  3215  	// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
  3216  	// before we clone it and create the TLS Listener.
  3217  	if err := srv.setupHTTP2_ServeTLS(); err != nil {
  3218  		return err
  3219  	}
  3220  
  3221  	config := cloneTLSConfig(srv.TLSConfig)
  3222  	if !slices.Contains(config.NextProtos, "http/1.1") {
  3223  		config.NextProtos = append(config.NextProtos, "http/1.1")
  3224  	}
  3225  
  3226  	configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
  3227  	if !configHasCert || certFile != "" || keyFile != "" {
  3228  		var err error
  3229  		config.Certificates = make([]tls.Certificate, 1)
  3230  		config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
  3231  		if err != nil {
  3232  			return err
  3233  		}
  3234  	}
  3235  
  3236  	tlsListener := tls.NewListener(l, config)
  3237  	return srv.Serve(tlsListener)
  3238  }
  3239  
  3240  // trackListener adds or removes a net.Listener to the set of tracked
  3241  // listeners.
  3242  //
  3243  // We store a pointer to interface in the map set, in case the
  3244  // net.Listener is not comparable. This is safe because we only call
  3245  // trackListener via Serve and can track+defer untrack the same
  3246  // pointer to local variable there. We never need to compare a
  3247  // Listener from another caller.
  3248  //
  3249  // It reports whether the server is still up (not Shutdown or Closed).
  3250  func (s *Server) trackListener(ln *net.Listener, add bool) bool {
  3251  	s.mu.Lock()
  3252  	defer s.mu.Unlock()
  3253  	if s.listeners == nil {
  3254  		s.listeners = make(map[*net.Listener]struct{})
  3255  	}
  3256  	if add {
  3257  		if s.shuttingDown() {
  3258  			return false
  3259  		}
  3260  		s.listeners[ln] = struct{}{}
  3261  		s.listenerGroup.Add(1)
  3262  	} else {
  3263  		delete(s.listeners, ln)
  3264  		s.listenerGroup.Done()
  3265  	}
  3266  	return true
  3267  }
  3268  
  3269  func (s *Server) trackConn(c *conn, add bool) {
  3270  	s.mu.Lock()
  3271  	defer s.mu.Unlock()
  3272  	if s.activeConn == nil {
  3273  		s.activeConn = make(map[*conn]struct{})
  3274  	}
  3275  	if add {
  3276  		s.activeConn[c] = struct{}{}
  3277  	} else {
  3278  		delete(s.activeConn, c)
  3279  	}
  3280  }
  3281  
  3282  func (s *Server) idleTimeout() time.Duration {
  3283  	if s.IdleTimeout != 0 {
  3284  		return s.IdleTimeout
  3285  	}
  3286  	return s.ReadTimeout
  3287  }
  3288  
  3289  func (s *Server) readHeaderTimeout() time.Duration {
  3290  	if s.ReadHeaderTimeout != 0 {
  3291  		return s.ReadHeaderTimeout
  3292  	}
  3293  	return s.ReadTimeout
  3294  }
  3295  
  3296  func (s *Server) doKeepAlives() bool {
  3297  	return !s.disableKeepAlives.Load() && !s.shuttingDown()
  3298  }
  3299  
  3300  func (s *Server) shuttingDown() bool {
  3301  	return s.inShutdown.Load()
  3302  }
  3303  
  3304  // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
  3305  // By default, keep-alives are always enabled. Only very
  3306  // resource-constrained environments or servers in the process of
  3307  // shutting down should disable them.
  3308  func (srv *Server) SetKeepAlivesEnabled(v bool) {
  3309  	if v {
  3310  		srv.disableKeepAlives.Store(false)
  3311  		return
  3312  	}
  3313  	srv.disableKeepAlives.Store(true)
  3314  
  3315  	// Close idle HTTP/1 conns:
  3316  	srv.closeIdleConns()
  3317  
  3318  	// TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
  3319  }
  3320  
  3321  func (s *Server) logf(format string, args ...any) {
  3322  	if s.ErrorLog != nil {
  3323  		s.ErrorLog.Printf(format, args...)
  3324  	} else {
  3325  		log.Printf(format, args...)
  3326  	}
  3327  }
  3328  
  3329  // logf prints to the ErrorLog of the *Server associated with request r
  3330  // via ServerContextKey. If there's no associated server, or if ErrorLog
  3331  // is nil, logging is done via the log package's standard logger.
  3332  func logf(r *http.Request, format string, args ...any) {
  3333  	s, _ := r.Context().Value(ServerContextKey).(*Server)
  3334  	if s != nil && s.ErrorLog != nil {
  3335  		s.ErrorLog.Printf(format, args...)
  3336  	} else {
  3337  		log.Printf(format, args...)
  3338  	}
  3339  }
  3340  
  3341  // ListenAndServe listens on the TCP network address addr and then calls
  3342  // [Serve] with handler to handle requests on incoming connections.
  3343  // Accepted connections are configured to enable TCP keep-alives.
  3344  //
  3345  // The handler is typically nil, in which case [DefaultServeMux] is used.
  3346  //
  3347  // ListenAndServe always returns a non-nil error.
  3348  func ListenAndServe(addr string, handler Handler) error {
  3349  	server := &Server{Addr: addr, Handler: handler}
  3350  	return server.ListenAndServe()
  3351  }
  3352  
  3353  // ListenAndServeTLS acts identically to [ListenAndServe], except that it
  3354  // expects HTTPS connections. Additionally, files containing a certificate and
  3355  // matching private key for the server must be provided. If the certificate
  3356  // is signed by a certificate authority, the certFile should be the concatenation
  3357  // of the server's certificate, any intermediates, and the CA's certificate.
  3358  func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
  3359  	server := &Server{Addr: addr, Handler: handler}
  3360  	return server.ListenAndServeTLS(certFile, keyFile)
  3361  }
  3362  
  3363  // ListenAndServeTLS listens on the TCP network address srv.Addr and
  3364  // then calls [ServeTLS] to handle requests on incoming TLS connections.
  3365  // Accepted connections are configured to enable TCP keep-alives.
  3366  //
  3367  // Filenames containing a certificate and matching private key for the
  3368  // server must be provided if neither the [Server]'s TLSConfig.Certificates
  3369  // nor TLSConfig.GetCertificate are populated. If the certificate is
  3370  // signed by a certificate authority, the certFile should be the
  3371  // concatenation of the server's certificate, any intermediates, and
  3372  // the CA's certificate.
  3373  //
  3374  // If srv.Addr is blank, ":https" is used.
  3375  //
  3376  // ListenAndServeTLS always returns a non-nil error. After [Server.Shutdown] or
  3377  // [Server.Close], the returned error is [ErrServerClosed].
  3378  func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
  3379  	if srv.shuttingDown() {
  3380  		return ErrServerClosed
  3381  	}
  3382  	addr := srv.Addr
  3383  	if addr == "" {
  3384  		addr = ":https"
  3385  	}
  3386  
  3387  	ln, err := net.Listen("tcp", addr)
  3388  	if err != nil {
  3389  		return err
  3390  	}
  3391  
  3392  	defer ln.Close()
  3393  
  3394  	return srv.ServeTLS(ln, certFile, keyFile)
  3395  }
  3396  
  3397  // setupHTTP2_ServeTLS conditionally configures HTTP/2 on
  3398  // srv and reports whether there was an error setting it up. If it is
  3399  // not configured for policy reasons, nil is returned.
  3400  func (srv *Server) setupHTTP2_ServeTLS() error {
  3401  	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
  3402  	return srv.nextProtoErr
  3403  }
  3404  
  3405  // setupHTTP2_Serve is called from (*Server).Serve and conditionally
  3406  // configures HTTP/2 on srv using a more conservative policy than
  3407  // setupHTTP2_ServeTLS because Serve is called after tls.Listen,
  3408  // and may be called concurrently. See shouldConfigureHTTP2ForServe.
  3409  //
  3410  // The tests named TestTransportAutomaticHTTP2* and
  3411  // TestConcurrentServerServe in server_test.go demonstrate some
  3412  // of the supported use cases and motivations.
  3413  func (srv *Server) setupHTTP2_Serve() error {
  3414  	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
  3415  	return srv.nextProtoErr
  3416  }
  3417  
  3418  func (srv *Server) onceSetNextProtoDefaults_Serve() {
  3419  	if srv.shouldConfigureHTTP2ForServe() {
  3420  		srv.onceSetNextProtoDefaults()
  3421  	}
  3422  }
  3423  
  3424  //var http2server = godebug.New("http2server")
  3425  
  3426  // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
  3427  // configured otherwise. (by setting srv.TLSNextProto non-nil)
  3428  // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
  3429  func (srv *Server) onceSetNextProtoDefaults() {
  3430  	if omitBundledHTTP2 {
  3431  		return
  3432  	}
  3433  	//if http2server.Value() == "0" {
  3434  	//	http2server.IncNonDefault()
  3435  	//	return
  3436  	//}
  3437  	// Enable HTTP/2 by default if the user hasn't otherwise
  3438  	// configured their TLSNextProto map.
  3439  	if srv.TLSNextProto == nil {
  3440  		conf := &http2Server{
  3441  			NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
  3442  		}
  3443  		srv.nextProtoErr = http2ConfigureServer(srv, conf)
  3444  	}
  3445  }
  3446  
  3447  // TimeoutHandler returns a [Handler] that runs h with the given time limit.
  3448  //
  3449  // The new Handler calls h.ServeHTTP to handle each request, but if a
  3450  // call runs for longer than its time limit, the handler responds with
  3451  // a 503 Service Unavailable error and the given message in its body.
  3452  // (If msg is empty, a suitable default message will be sent.)
  3453  // After such a timeout, writes by h to its [ResponseWriter] will return
  3454  // [ErrHandlerTimeout].
  3455  //
  3456  // TimeoutHandler supports the [Pusher] interface but does not support
  3457  // the [Hijacker] or [Flusher] interfaces.
  3458  func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
  3459  	return &timeoutHandler{
  3460  		handler: h,
  3461  		body:    msg,
  3462  		dt:      dt,
  3463  	}
  3464  }
  3465  
  3466  // ErrHandlerTimeout is returned on [ResponseWriter] Write calls
  3467  // in handlers which have timed out.
  3468  var ErrHandlerTimeout = errors.New("http: Handler timeout")
  3469  
  3470  type timeoutHandler struct {
  3471  	handler Handler
  3472  	body    string
  3473  	dt      time.Duration
  3474  
  3475  	// When set, no context will be created and this context will
  3476  	// be used instead.
  3477  	testContext context.Context
  3478  }
  3479  
  3480  func (h *timeoutHandler) errorBody() string {
  3481  	if h.body != "" {
  3482  		return h.body
  3483  	}
  3484  	return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
  3485  }
  3486  
  3487  func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *http.Request) {
  3488  	ctx := h.testContext
  3489  	if ctx == nil {
  3490  		var cancelCtx context.CancelFunc
  3491  		ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
  3492  		defer cancelCtx()
  3493  	}
  3494  	r = r.WithContext(ctx)
  3495  	done := make(chan struct{})
  3496  	tw := &timeoutWriter{
  3497  		w:   w,
  3498  		h:   make(Header),
  3499  		req: r,
  3500  	}
  3501  	panicChan := make(chan any, 1)
  3502  	go func() {
  3503  		defer func() {
  3504  			if p := recover(); p != nil {
  3505  				panicChan <- p
  3506  			}
  3507  		}()
  3508  		h.handler.ServeHTTP(tw, r)
  3509  		close(done)
  3510  	}()
  3511  	select {
  3512  	case p := <-panicChan:
  3513  		panic(p)
  3514  	case <-done:
  3515  		tw.mu.Lock()
  3516  		defer tw.mu.Unlock()
  3517  		dst := w.Header()
  3518  		for k, vv := range tw.h {
  3519  			dst[k] = vv
  3520  		}
  3521  		if !tw.wroteHeader {
  3522  			tw.code = StatusOK
  3523  		}
  3524  		w.WriteHeader(tw.code)
  3525  		w.Write(tw.wbuf.Bytes())
  3526  	case <-ctx.Done():
  3527  		tw.mu.Lock()
  3528  		defer tw.mu.Unlock()
  3529  		switch err := ctx.Err(); err {
  3530  		case context.DeadlineExceeded:
  3531  			w.WriteHeader(StatusServiceUnavailable)
  3532  			io.WriteString(w, h.errorBody())
  3533  			tw.err = ErrHandlerTimeout
  3534  		default:
  3535  			w.WriteHeader(StatusServiceUnavailable)
  3536  			tw.err = err
  3537  		}
  3538  	}
  3539  }
  3540  
  3541  type timeoutWriter struct {
  3542  	w    ResponseWriter
  3543  	h    Header
  3544  	wbuf bytes.Buffer
  3545  	req  *http.Request
  3546  
  3547  	mu          sync.Mutex
  3548  	err         error
  3549  	wroteHeader bool
  3550  	code        int
  3551  }
  3552  
  3553  var _ Pusher = (*timeoutWriter)(nil)
  3554  
  3555  // Push implements the [Pusher] interface.
  3556  func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
  3557  	if pusher, ok := tw.w.(Pusher); ok {
  3558  		return pusher.Push(target, opts)
  3559  	}
  3560  	return ErrNotSupported
  3561  }
  3562  
  3563  func (tw *timeoutWriter) Header() http.Header { return tw.h.toHttp() }
  3564  
  3565  func (tw *timeoutWriter) Write(p []byte) (int, error) {
  3566  	tw.mu.Lock()
  3567  	defer tw.mu.Unlock()
  3568  	if tw.err != nil {
  3569  		return 0, tw.err
  3570  	}
  3571  	if !tw.wroteHeader {
  3572  		tw.writeHeaderLocked(StatusOK)
  3573  	}
  3574  	return tw.wbuf.Write(p)
  3575  }
  3576  
  3577  func (tw *timeoutWriter) writeHeaderLocked(code int) {
  3578  	checkWriteHeaderCode(code)
  3579  
  3580  	switch {
  3581  	case tw.err != nil:
  3582  		return
  3583  	case tw.wroteHeader:
  3584  		if tw.req != nil {
  3585  			caller := relevantCaller()
  3586  			logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  3587  		}
  3588  	default:
  3589  		tw.wroteHeader = true
  3590  		tw.code = code
  3591  	}
  3592  }
  3593  
  3594  func (tw *timeoutWriter) WriteHeader(code int) {
  3595  	tw.mu.Lock()
  3596  	defer tw.mu.Unlock()
  3597  	tw.writeHeaderLocked(code)
  3598  }
  3599  
  3600  // onceCloseListener wraps a net.Listener, protecting it from
  3601  // multiple Close calls.
  3602  type onceCloseListener struct {
  3603  	net.Listener
  3604  	once     sync.Once
  3605  	closeErr error
  3606  }
  3607  
  3608  func (oc *onceCloseListener) Close() error {
  3609  	oc.once.Do(oc.close)
  3610  	return oc.closeErr
  3611  }
  3612  
  3613  func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
  3614  
  3615  // globalOptionsHandler responds to "OPTIONS *" requests.
  3616  type globalOptionsHandler struct{}
  3617  
  3618  func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *http.Request) {
  3619  	w.Header().Set("Content-Length", "0")
  3620  	if r.ContentLength != 0 {
  3621  		// Read up to 4KB of OPTIONS body (as mentioned in the
  3622  		// spec as being reserved for future use), but anything
  3623  		// over that is considered a waste of server resources
  3624  		// (or an attack) and we abort and close the connection,
  3625  		// courtesy of MaxBytesReader's EOF behavior.
  3626  		mb := MaxBytesReader(w, r.Body, 4<<10)
  3627  		io.Copy(io.Discard, mb)
  3628  	}
  3629  }
  3630  
  3631  // initALPNRequest is an HTTP handler that initializes certain
  3632  // uninitialized fields in its *Request. Such partially-initialized
  3633  // Requests come from ALPN protocol handlers.
  3634  type initALPNRequest struct {
  3635  	ctx context.Context
  3636  	c   *tls.Conn
  3637  	h   serverHandler
  3638  }
  3639  
  3640  // BaseContext is an exported but unadvertised [http.Handler] method
  3641  // recognized by x/net/http2 to pass down a context; the TLSNextProto
  3642  // API predates context support so we shoehorn through the only
  3643  // interface we have available.
  3644  func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
  3645  
  3646  func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *http.Request) {
  3647  	if req.TLS == nil {
  3648  		req.TLS = &tls.ConnectionState{}
  3649  		*req.TLS = h.c.ConnectionState()
  3650  	}
  3651  	if req.Body == nil {
  3652  		req.Body = NoBody
  3653  	}
  3654  	if req.RemoteAddr == "" {
  3655  		req.RemoteAddr = h.c.RemoteAddr().String()
  3656  	}
  3657  	h.h.ServeHTTP(rw, req)
  3658  }
  3659  
  3660  // loggingConn is used for debugging.
  3661  type loggingConn struct {
  3662  	name string
  3663  	net.Conn
  3664  }
  3665  
  3666  var (
  3667  	uniqNameMu   sync.Mutex
  3668  	uniqNameNext = make(map[string]int)
  3669  )
  3670  
  3671  func newLoggingConn(baseName string, c net.Conn) net.Conn {
  3672  	uniqNameMu.Lock()
  3673  	defer uniqNameMu.Unlock()
  3674  	uniqNameNext[baseName]++
  3675  	return &loggingConn{
  3676  		name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
  3677  		Conn: c,
  3678  	}
  3679  }
  3680  
  3681  func (c *loggingConn) Write(p []byte) (n int, err error) {
  3682  	log.Printf("%s.Write(%d) = ....", c.name, len(p))
  3683  	n, err = c.Conn.Write(p)
  3684  	log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
  3685  	return
  3686  }
  3687  
  3688  func (c *loggingConn) Read(p []byte) (n int, err error) {
  3689  	log.Printf("%s.Read(%d) = ....", c.name, len(p))
  3690  	n, err = c.Conn.Read(p)
  3691  	log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
  3692  	return
  3693  }
  3694  
  3695  func (c *loggingConn) Close() (err error) {
  3696  	log.Printf("%s.Close() = ...", c.name)
  3697  	err = c.Conn.Close()
  3698  	log.Printf("%s.Close() = %v", c.name, err)
  3699  	return
  3700  }
  3701  
  3702  // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
  3703  // It only contains one field (and a pointer field at that), so it
  3704  // fits in an interface value without an extra allocation.
  3705  type checkConnErrorWriter struct {
  3706  	c *conn
  3707  }
  3708  
  3709  func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
  3710  	n, err = w.c.rwc.Write(p)
  3711  	if err != nil && w.c.werr == nil {
  3712  		w.c.werr = err
  3713  		w.c.cancelCtx()
  3714  	}
  3715  	return
  3716  }
  3717  
  3718  func numLeadingCRorLF(v []byte) (n int) {
  3719  	for _, b := range v {
  3720  		if b == '\r' || b == '\n' {
  3721  			n++
  3722  			continue
  3723  		}
  3724  		break
  3725  	}
  3726  	return
  3727  }
  3728  
  3729  // tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
  3730  // looks like it might've been a misdirected plaintext HTTP request.
  3731  func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
  3732  	switch string(hdr[:]) {
  3733  	case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
  3734  		return true
  3735  	}
  3736  	return false
  3737  }
  3738  
  3739  // MaxBytesHandler returns a [Handler] that runs h with its [ResponseWriter] and [Request.Body] wrapped by a MaxBytesReader.
  3740  func MaxBytesHandler(h Handler, n int64) Handler {
  3741  	return HandlerFunc(func(w ResponseWriter, r *http.Request) {
  3742  		r2 := *r
  3743  		r2.Body = MaxBytesReader(w, r.Body, n)
  3744  		h.ServeHTTP(w, &r2)
  3745  	})
  3746  }