github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/net/http/server.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // HTTP server. See RFC 7230 through 7235.
     6  
     7  package http
     8  
     9  import (
    10  	"bufio"
    11  	"bytes"
    12  	"context"
    13  	"crypto/tls"
    14  	"errors"
    15  	"fmt"
    16  	"internal/godebug"
    17  	"io"
    18  	"log"
    19  	"math/rand"
    20  	"net"
    21  	"net/textproto"
    22  	"net/url"
    23  	urlpkg "net/url"
    24  	"path"
    25  	"runtime"
    26  	"sort"
    27  	"strconv"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"golang.org/x/net/http/httpguts"
    34  )
    35  
    36  // Errors used by the HTTP server.
    37  var (
    38  	// ErrBodyNotAllowed is returned by ResponseWriter.Write calls
    39  	// when the HTTP method or response code does not permit a
    40  	// body.
    41  	ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
    42  
    43  	// ErrHijacked is returned by ResponseWriter.Write calls when
    44  	// the underlying connection has been hijacked using the
    45  	// Hijacker interface. A zero-byte write on a hijacked
    46  	// connection will return ErrHijacked without any other side
    47  	// effects.
    48  	ErrHijacked = errors.New("http: connection has been hijacked")
    49  
    50  	// ErrContentLength is returned by ResponseWriter.Write calls
    51  	// when a Handler set a Content-Length response header with a
    52  	// declared size and then attempted to write more bytes than
    53  	// declared.
    54  	ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
    55  
    56  	// Deprecated: ErrWriteAfterFlush is no longer returned by
    57  	// anything in the net/http package. Callers should not
    58  	// compare errors against this variable.
    59  	ErrWriteAfterFlush = errors.New("unused")
    60  )
    61  
    62  // A Handler responds to an HTTP request.
    63  //
    64  // ServeHTTP should write reply headers and data to the ResponseWriter
    65  // and then return. Returning signals that the request is finished; it
    66  // is not valid to use the ResponseWriter or read from the
    67  // Request.Body after or concurrently with the completion of the
    68  // ServeHTTP call.
    69  //
    70  // Depending on the HTTP client software, HTTP protocol version, and
    71  // any intermediaries between the client and the Go server, it may not
    72  // be possible to read from the Request.Body after writing to the
    73  // ResponseWriter. Cautious handlers should read the Request.Body
    74  // first, and then reply.
    75  //
    76  // Except for reading the body, handlers should not modify the
    77  // provided Request.
    78  //
    79  // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
    80  // that the effect of the panic was isolated to the active request.
    81  // It recovers the panic, logs a stack trace to the server error log,
    82  // and either closes the network connection or sends an HTTP/2
    83  // RST_STREAM, depending on the HTTP protocol. To abort a handler so
    84  // the client sees an interrupted response but the server doesn't log
    85  // an error, panic with the value ErrAbortHandler.
    86  type Handler interface {
    87  	ServeHTTP(ResponseWriter, *Request)
    88  }
    89  
    90  // A ResponseWriter interface is used by an HTTP handler to
    91  // construct an HTTP response.
    92  //
    93  // A ResponseWriter may not be used after the Handler.ServeHTTP method
    94  // has returned.
    95  type ResponseWriter interface {
    96  	// Header returns the header map that will be sent by
    97  	// WriteHeader. The Header map also is the mechanism with which
    98  	// Handlers can set HTTP trailers.
    99  	//
   100  	// Changing the header map after a call to WriteHeader (or
   101  	// Write) has no effect unless the HTTP status code was of the
   102  	// 1xx class or the modified headers are trailers.
   103  	//
   104  	// There are two ways to set Trailers. The preferred way is to
   105  	// predeclare in the headers which trailers you will later
   106  	// send by setting the "Trailer" header to the names of the
   107  	// trailer keys which will come later. In this case, those
   108  	// keys of the Header map are treated as if they were
   109  	// trailers. See the example. The second way, for trailer
   110  	// keys not known to the Handler until after the first Write,
   111  	// is to prefix the Header map keys with the TrailerPrefix
   112  	// constant value. See TrailerPrefix.
   113  	//
   114  	// To suppress automatic response headers (such as "Date"), set
   115  	// their value to nil.
   116  	Header() Header
   117  
   118  	// Write writes the data to the connection as part of an HTTP reply.
   119  	//
   120  	// If WriteHeader has not yet been called, Write calls
   121  	// WriteHeader(http.StatusOK) before writing the data. If the Header
   122  	// does not contain a Content-Type line, Write adds a Content-Type set
   123  	// to the result of passing the initial 512 bytes of written data to
   124  	// DetectContentType. Additionally, if the total size of all written
   125  	// data is under a few KB and there are no Flush calls, the
   126  	// Content-Length header is added automatically.
   127  	//
   128  	// Depending on the HTTP protocol version and the client, calling
   129  	// Write or WriteHeader may prevent future reads on the
   130  	// Request.Body. For HTTP/1.x requests, handlers should read any
   131  	// needed request body data before writing the response. Once the
   132  	// headers have been flushed (due to either an explicit Flusher.Flush
   133  	// call or writing enough data to trigger a flush), the request body
   134  	// may be unavailable. For HTTP/2 requests, the Go HTTP server permits
   135  	// handlers to continue to read the request body while concurrently
   136  	// writing the response. However, such behavior may not be supported
   137  	// by all HTTP/2 clients. Handlers should read before writing if
   138  	// possible to maximize compatibility.
   139  	Write([]byte) (int, error)
   140  
   141  	// WriteHeader sends an HTTP response header with the provided
   142  	// status code.
   143  	//
   144  	// If WriteHeader is not called explicitly, the first call to Write
   145  	// will trigger an implicit WriteHeader(http.StatusOK).
   146  	// Thus explicit calls to WriteHeader are mainly used to
   147  	// send error codes or 1xx informational responses.
   148  	//
   149  	// The provided code must be a valid HTTP 1xx-5xx status code.
   150  	// Any number of 1xx headers may be written, followed by at most
   151  	// one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx
   152  	// headers may be buffered. Use the Flusher interface to send
   153  	// buffered data. The header map is cleared when 2xx-5xx headers are
   154  	// sent, but not with 1xx headers.
   155  	//
   156  	// The server will automatically send a 100 (Continue) header
   157  	// on the first read from the request body if the request has
   158  	// an "Expect: 100-continue" header.
   159  	WriteHeader(statusCode int)
   160  }
   161  
   162  // The Flusher interface is implemented by ResponseWriters that allow
   163  // an HTTP handler to flush buffered data to the client.
   164  //
   165  // The default HTTP/1.x and HTTP/2 ResponseWriter implementations
   166  // support Flusher, but ResponseWriter wrappers may not. Handlers
   167  // should always test for this ability at runtime.
   168  //
   169  // Note that even for ResponseWriters that support Flush,
   170  // if the client is connected through an HTTP proxy,
   171  // the buffered data may not reach the client until the response
   172  // completes.
   173  type Flusher interface {
   174  	// Flush sends any buffered data to the client.
   175  	Flush()
   176  }
   177  
   178  // The Hijacker interface is implemented by ResponseWriters that allow
   179  // an HTTP handler to take over the connection.
   180  //
   181  // The default ResponseWriter for HTTP/1.x connections supports
   182  // Hijacker, but HTTP/2 connections intentionally do not.
   183  // ResponseWriter wrappers may also not support Hijacker. Handlers
   184  // should always test for this ability at runtime.
   185  type Hijacker interface {
   186  	// Hijack lets the caller take over the connection.
   187  	// After a call to Hijack the HTTP server library
   188  	// will not do anything else with the connection.
   189  	//
   190  	// It becomes the caller's responsibility to manage
   191  	// and close the connection.
   192  	//
   193  	// The returned net.Conn may have read or write deadlines
   194  	// already set, depending on the configuration of the
   195  	// Server. It is the caller's responsibility to set
   196  	// or clear those deadlines as needed.
   197  	//
   198  	// The returned bufio.Reader may contain unprocessed buffered
   199  	// data from the client.
   200  	//
   201  	// After a call to Hijack, the original Request.Body must not
   202  	// be used. The original Request's Context remains valid and
   203  	// is not canceled until the Request's ServeHTTP method
   204  	// returns.
   205  	Hijack() (net.Conn, *bufio.ReadWriter, error)
   206  }
   207  
   208  // The CloseNotifier interface is implemented by ResponseWriters which
   209  // allow detecting when the underlying connection has gone away.
   210  //
   211  // This mechanism can be used to cancel long operations on the server
   212  // if the client has disconnected before the response is ready.
   213  //
   214  // Deprecated: the CloseNotifier interface predates Go's context package.
   215  // New code should use Request.Context instead.
   216  type CloseNotifier interface {
   217  	// CloseNotify returns a channel that receives at most a
   218  	// single value (true) when the client connection has gone
   219  	// away.
   220  	//
   221  	// CloseNotify may wait to notify until Request.Body has been
   222  	// fully read.
   223  	//
   224  	// After the Handler has returned, there is no guarantee
   225  	// that the channel receives a value.
   226  	//
   227  	// If the protocol is HTTP/1.1 and CloseNotify is called while
   228  	// processing an idempotent request (such a GET) while
   229  	// HTTP/1.1 pipelining is in use, the arrival of a subsequent
   230  	// pipelined request may cause a value to be sent on the
   231  	// returned channel. In practice HTTP/1.1 pipelining is not
   232  	// enabled in browsers and not seen often in the wild. If this
   233  	// is a problem, use HTTP/2 or only use CloseNotify on methods
   234  	// such as POST.
   235  	CloseNotify() <-chan bool
   236  }
   237  
   238  var (
   239  	// ServerContextKey is a context key. It can be used in HTTP
   240  	// handlers with Context.Value to access the server that
   241  	// started the handler. The associated value will be of
   242  	// type *Server.
   243  	ServerContextKey = &contextKey{"http-server"}
   244  
   245  	// LocalAddrContextKey is a context key. It can be used in
   246  	// HTTP handlers with Context.Value to access the local
   247  	// address the connection arrived on.
   248  	// The associated value will be of type net.Addr.
   249  	LocalAddrContextKey = &contextKey{"local-addr"}
   250  )
   251  
   252  // A conn represents the server side of an HTTP connection.
   253  type conn struct {
   254  	// server is the server on which the connection arrived.
   255  	// Immutable; never nil.
   256  	server *Server
   257  
   258  	// cancelCtx cancels the connection-level context.
   259  	cancelCtx context.CancelFunc
   260  
   261  	// rwc is the underlying network connection.
   262  	// This is never wrapped by other types and is the value given out
   263  	// to CloseNotifier callers. It is usually of type *net.TCPConn or
   264  	// *tls.Conn.
   265  	rwc net.Conn
   266  
   267  	// remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
   268  	// inside the Listener's Accept goroutine, as some implementations block.
   269  	// It is populated immediately inside the (*conn).serve goroutine.
   270  	// This is the value of a Handler's (*Request).RemoteAddr.
   271  	remoteAddr string
   272  
   273  	// tlsState is the TLS connection state when using TLS.
   274  	// nil means not TLS.
   275  	tlsState *tls.ConnectionState
   276  
   277  	// werr is set to the first write error to rwc.
   278  	// It is set via checkConnErrorWriter{w}, where bufw writes.
   279  	werr error
   280  
   281  	// r is bufr's read source. It's a wrapper around rwc that provides
   282  	// io.LimitedReader-style limiting (while reading request headers)
   283  	// and functionality to support CloseNotifier. See *connReader docs.
   284  	r *connReader
   285  
   286  	// bufr reads from r.
   287  	bufr *bufio.Reader
   288  
   289  	// bufw writes to checkConnErrorWriter{c}, which populates werr on error.
   290  	bufw *bufio.Writer
   291  
   292  	// lastMethod is the method of the most recent request
   293  	// on this connection, if any.
   294  	lastMethod string
   295  
   296  	curReq atomic.Pointer[response] // (which has a Request in it)
   297  
   298  	curState atomic.Uint64 // packed (unixtime<<8|uint8(ConnState))
   299  
   300  	// mu guards hijackedv
   301  	mu sync.Mutex
   302  
   303  	// hijackedv is whether this connection has been hijacked
   304  	// by a Handler with the Hijacker interface.
   305  	// It is guarded by mu.
   306  	hijackedv bool
   307  }
   308  
   309  func (c *conn) hijacked() bool {
   310  	c.mu.Lock()
   311  	defer c.mu.Unlock()
   312  	return c.hijackedv
   313  }
   314  
   315  // c.mu must be held.
   316  func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
   317  	if c.hijackedv {
   318  		return nil, nil, ErrHijacked
   319  	}
   320  	c.r.abortPendingRead()
   321  
   322  	c.hijackedv = true
   323  	rwc = c.rwc
   324  	rwc.SetDeadline(time.Time{})
   325  
   326  	buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
   327  	if c.r.hasByte {
   328  		if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
   329  			return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
   330  		}
   331  	}
   332  	c.setState(rwc, StateHijacked, runHooks)
   333  	return
   334  }
   335  
   336  // This should be >= 512 bytes for DetectContentType,
   337  // but otherwise it's somewhat arbitrary.
   338  const bufferBeforeChunkingSize = 2048
   339  
   340  // chunkWriter writes to a response's conn buffer, and is the writer
   341  // wrapped by the response.w buffered writer.
   342  //
   343  // chunkWriter also is responsible for finalizing the Header, including
   344  // conditionally setting the Content-Type and setting a Content-Length
   345  // in cases where the handler's final output is smaller than the buffer
   346  // size. It also conditionally adds chunk headers, when in chunking mode.
   347  //
   348  // See the comment above (*response).Write for the entire write flow.
   349  type chunkWriter struct {
   350  	res *response
   351  
   352  	// header is either nil or a deep clone of res.handlerHeader
   353  	// at the time of res.writeHeader, if res.writeHeader is
   354  	// called and extra buffering is being done to calculate
   355  	// Content-Type and/or Content-Length.
   356  	header Header
   357  
   358  	// wroteHeader tells whether the header's been written to "the
   359  	// wire" (or rather: w.conn.buf). this is unlike
   360  	// (*response).wroteHeader, which tells only whether it was
   361  	// logically written.
   362  	wroteHeader bool
   363  
   364  	// set by the writeHeader method:
   365  	chunking bool // using chunked transfer encoding for reply body
   366  }
   367  
   368  var (
   369  	crlf       = []byte("\r\n")
   370  	colonSpace = []byte(": ")
   371  )
   372  
   373  func (cw *chunkWriter) Write(p []byte) (n int, err error) {
   374  	if !cw.wroteHeader {
   375  		cw.writeHeader(p)
   376  	}
   377  	if cw.res.req.Method == "HEAD" {
   378  		// Eat writes.
   379  		return len(p), nil
   380  	}
   381  	if cw.chunking {
   382  		_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
   383  		if err != nil {
   384  			cw.res.conn.rwc.Close()
   385  			return
   386  		}
   387  	}
   388  	n, err = cw.res.conn.bufw.Write(p)
   389  	if cw.chunking && err == nil {
   390  		_, err = cw.res.conn.bufw.Write(crlf)
   391  	}
   392  	if err != nil {
   393  		cw.res.conn.rwc.Close()
   394  	}
   395  	return
   396  }
   397  
   398  func (cw *chunkWriter) flush() {
   399  	if !cw.wroteHeader {
   400  		cw.writeHeader(nil)
   401  	}
   402  	cw.res.conn.bufw.Flush()
   403  }
   404  
   405  func (cw *chunkWriter) close() {
   406  	if !cw.wroteHeader {
   407  		cw.writeHeader(nil)
   408  	}
   409  	if cw.chunking {
   410  		bw := cw.res.conn.bufw // conn's bufio writer
   411  		// zero chunk to mark EOF
   412  		bw.WriteString("0\r\n")
   413  		if trailers := cw.res.finalTrailers(); trailers != nil {
   414  			trailers.Write(bw) // the writer handles noting errors
   415  		}
   416  		// final blank line after the trailers (whether
   417  		// present or not)
   418  		bw.WriteString("\r\n")
   419  	}
   420  }
   421  
   422  // A response represents the server side of an HTTP response.
   423  type response struct {
   424  	conn             *conn
   425  	req              *Request // request for this response
   426  	reqBody          io.ReadCloser
   427  	cancelCtx        context.CancelFunc // when ServeHTTP exits
   428  	wroteHeader      bool               // a non-1xx header has been (logically) written
   429  	wroteContinue    bool               // 100 Continue response was written
   430  	wants10KeepAlive bool               // HTTP/1.0 w/ Connection "keep-alive"
   431  	wantsClose       bool               // HTTP request has Connection "close"
   432  
   433  	// canWriteContinue is an atomic boolean that says whether or
   434  	// not a 100 Continue header can be written to the
   435  	// connection.
   436  	// writeContinueMu must be held while writing the header.
   437  	// These two fields together synchronize the body reader (the
   438  	// expectContinueReader, which wants to write 100 Continue)
   439  	// against the main writer.
   440  	canWriteContinue atomic.Bool
   441  	writeContinueMu  sync.Mutex
   442  
   443  	w  *bufio.Writer // buffers output in chunks to chunkWriter
   444  	cw chunkWriter
   445  
   446  	// handlerHeader is the Header that Handlers get access to,
   447  	// which may be retained and mutated even after WriteHeader.
   448  	// handlerHeader is copied into cw.header at WriteHeader
   449  	// time, and privately mutated thereafter.
   450  	handlerHeader Header
   451  	calledHeader  bool // handler accessed handlerHeader via Header
   452  
   453  	written       int64 // number of bytes written in body
   454  	contentLength int64 // explicitly-declared Content-Length; or -1
   455  	status        int   // status code passed to WriteHeader
   456  
   457  	// close connection after this reply.  set on request and
   458  	// updated after response from handler if there's a
   459  	// "Connection: keep-alive" response header and a
   460  	// Content-Length.
   461  	closeAfterReply bool
   462  
   463  	// requestBodyLimitHit is set by requestTooLarge when
   464  	// maxBytesReader hits its max size. It is checked in
   465  	// WriteHeader, to make sure we don't consume the
   466  	// remaining request body to try to advance to the next HTTP
   467  	// request. Instead, when this is set, we stop reading
   468  	// subsequent requests on this connection and stop reading
   469  	// input from it.
   470  	requestBodyLimitHit bool
   471  
   472  	// trailers are the headers to be sent after the handler
   473  	// finishes writing the body. This field is initialized from
   474  	// the Trailer response header when the response header is
   475  	// written.
   476  	trailers []string
   477  
   478  	handlerDone atomic.Bool // set true when the handler exits
   479  
   480  	// Buffers for Date, Content-Length, and status code
   481  	dateBuf   [len(TimeFormat)]byte
   482  	clenBuf   [10]byte
   483  	statusBuf [3]byte
   484  
   485  	// closeNotifyCh is the channel returned by CloseNotify.
   486  	// TODO(bradfitz): this is currently (for Go 1.8) always
   487  	// non-nil. Make this lazily-created again as it used to be?
   488  	closeNotifyCh  chan bool
   489  	didCloseNotify int32 // atomic (only 0->1 winner should send)
   490  }
   491  
   492  // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
   493  // that, if present, signals that the map entry is actually for
   494  // the response trailers, and not the response headers. The prefix
   495  // is stripped after the ServeHTTP call finishes and the values are
   496  // sent in the trailers.
   497  //
   498  // This mechanism is intended only for trailers that are not known
   499  // prior to the headers being written. If the set of trailers is fixed
   500  // or known before the header is written, the normal Go trailers mechanism
   501  // is preferred:
   502  //
   503  //	https://pkg.go.dev/net/http#ResponseWriter
   504  //	https://pkg.go.dev/net/http#example-ResponseWriter-Trailers
   505  const TrailerPrefix = "Trailer:"
   506  
   507  // finalTrailers is called after the Handler exits and returns a non-nil
   508  // value if the Handler set any trailers.
   509  func (w *response) finalTrailers() Header {
   510  	var t Header
   511  	for k, vv := range w.handlerHeader {
   512  		if kk, found := strings.CutPrefix(k, TrailerPrefix); found {
   513  			if t == nil {
   514  				t = make(Header)
   515  			}
   516  			t[kk] = vv
   517  		}
   518  	}
   519  	for _, k := range w.trailers {
   520  		if t == nil {
   521  			t = make(Header)
   522  		}
   523  		for _, v := range w.handlerHeader[k] {
   524  			t.Add(k, v)
   525  		}
   526  	}
   527  	return t
   528  }
   529  
   530  // declareTrailer is called for each Trailer header when the
   531  // response header is written. It notes that a header will need to be
   532  // written in the trailers at the end of the response.
   533  func (w *response) declareTrailer(k string) {
   534  	k = CanonicalHeaderKey(k)
   535  	if !httpguts.ValidTrailerHeader(k) {
   536  		// Forbidden by RFC 7230, section 4.1.2
   537  		return
   538  	}
   539  	w.trailers = append(w.trailers, k)
   540  }
   541  
   542  // requestTooLarge is called by maxBytesReader when too much input has
   543  // been read from the client.
   544  func (w *response) requestTooLarge() {
   545  	w.closeAfterReply = true
   546  	w.requestBodyLimitHit = true
   547  	if !w.wroteHeader {
   548  		w.Header().Set("Connection", "close")
   549  	}
   550  }
   551  
   552  // writerOnly hides an io.Writer value's optional ReadFrom method
   553  // from io.Copy.
   554  type writerOnly struct {
   555  	io.Writer
   556  }
   557  
   558  // ReadFrom is here to optimize copying from an *os.File regular file
   559  // to a *net.TCPConn with sendfile, or from a supported src type such
   560  // as a *net.TCPConn on Linux with splice.
   561  func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
   562  	bufp := copyBufPool.Get().(*[]byte)
   563  	buf := *bufp
   564  	defer copyBufPool.Put(bufp)
   565  
   566  	// Our underlying w.conn.rwc is usually a *TCPConn (with its
   567  	// own ReadFrom method). If not, just fall back to the normal
   568  	// copy method.
   569  	rf, ok := w.conn.rwc.(io.ReaderFrom)
   570  	if !ok {
   571  		return io.CopyBuffer(writerOnly{w}, src, buf)
   572  	}
   573  
   574  	// Copy the first sniffLen bytes before switching to ReadFrom.
   575  	// This ensures we don't start writing the response before the
   576  	// source is available (see golang.org/issue/5660) and provides
   577  	// enough bytes to perform Content-Type sniffing when required.
   578  	if !w.cw.wroteHeader {
   579  		n0, err := io.CopyBuffer(writerOnly{w}, io.LimitReader(src, sniffLen), buf)
   580  		n += n0
   581  		if err != nil || n0 < sniffLen {
   582  			return n, err
   583  		}
   584  	}
   585  
   586  	w.w.Flush()  // get rid of any previous writes
   587  	w.cw.flush() // make sure Header is written; flush data to rwc
   588  
   589  	// Now that cw has been flushed, its chunking field is guaranteed initialized.
   590  	if !w.cw.chunking && w.bodyAllowed() {
   591  		n0, err := rf.ReadFrom(src)
   592  		n += n0
   593  		w.written += n0
   594  		return n, err
   595  	}
   596  
   597  	n0, err := io.CopyBuffer(writerOnly{w}, src, buf)
   598  	n += n0
   599  	return n, err
   600  }
   601  
   602  // debugServerConnections controls whether all server connections are wrapped
   603  // with a verbose logging wrapper.
   604  const debugServerConnections = false
   605  
   606  // Create new connection from rwc.
   607  func (srv *Server) newConn(rwc net.Conn) *conn {
   608  	c := &conn{
   609  		server: srv,
   610  		rwc:    rwc,
   611  	}
   612  	if debugServerConnections {
   613  		c.rwc = newLoggingConn("server", c.rwc)
   614  	}
   615  	return c
   616  }
   617  
   618  type readResult struct {
   619  	_   incomparable
   620  	n   int
   621  	err error
   622  	b   byte // byte read, if n == 1
   623  }
   624  
   625  // connReader is the io.Reader wrapper used by *conn. It combines a
   626  // selectively-activated io.LimitedReader (to bound request header
   627  // read sizes) with support for selectively keeping an io.Reader.Read
   628  // call blocked in a background goroutine to wait for activity and
   629  // trigger a CloseNotifier channel.
   630  type connReader struct {
   631  	conn *conn
   632  
   633  	mu      sync.Mutex // guards following
   634  	hasByte bool
   635  	byteBuf [1]byte
   636  	cond    *sync.Cond
   637  	inRead  bool
   638  	aborted bool  // set true before conn.rwc deadline is set to past
   639  	remain  int64 // bytes remaining
   640  }
   641  
   642  func (cr *connReader) lock() {
   643  	cr.mu.Lock()
   644  	if cr.cond == nil {
   645  		cr.cond = sync.NewCond(&cr.mu)
   646  	}
   647  }
   648  
   649  func (cr *connReader) unlock() { cr.mu.Unlock() }
   650  
   651  func (cr *connReader) startBackgroundRead() {
   652  	cr.lock()
   653  	defer cr.unlock()
   654  	if cr.inRead {
   655  		panic("invalid concurrent Body.Read call")
   656  	}
   657  	if cr.hasByte {
   658  		return
   659  	}
   660  	cr.inRead = true
   661  	cr.conn.rwc.SetReadDeadline(time.Time{})
   662  	go cr.backgroundRead()
   663  }
   664  
   665  func (cr *connReader) backgroundRead() {
   666  	n, err := cr.conn.rwc.Read(cr.byteBuf[:])
   667  	cr.lock()
   668  	if n == 1 {
   669  		cr.hasByte = true
   670  		// We were past the end of the previous request's body already
   671  		// (since we wouldn't be in a background read otherwise), so
   672  		// this is a pipelined HTTP request. Prior to Go 1.11 we used to
   673  		// send on the CloseNotify channel and cancel the context here,
   674  		// but the behavior was documented as only "may", and we only
   675  		// did that because that's how CloseNotify accidentally behaved
   676  		// in very early Go releases prior to context support. Once we
   677  		// added context support, people used a Handler's
   678  		// Request.Context() and passed it along. Having that context
   679  		// cancel on pipelined HTTP requests caused problems.
   680  		// Fortunately, almost nothing uses HTTP/1.x pipelining.
   681  		// Unfortunately, apt-get does, or sometimes does.
   682  		// New Go 1.11 behavior: don't fire CloseNotify or cancel
   683  		// contexts on pipelined requests. Shouldn't affect people, but
   684  		// fixes cases like Issue 23921. This does mean that a client
   685  		// closing their TCP connection after sending a pipelined
   686  		// request won't cancel the context, but we'll catch that on any
   687  		// write failure (in checkConnErrorWriter.Write).
   688  		// If the server never writes, yes, there are still contrived
   689  		// server & client behaviors where this fails to ever cancel the
   690  		// context, but that's kinda why HTTP/1.x pipelining died
   691  		// anyway.
   692  	}
   693  	if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
   694  		// Ignore this error. It's the expected error from
   695  		// another goroutine calling abortPendingRead.
   696  	} else if err != nil {
   697  		cr.handleReadError(err)
   698  	}
   699  	cr.aborted = false
   700  	cr.inRead = false
   701  	cr.unlock()
   702  	cr.cond.Broadcast()
   703  }
   704  
   705  func (cr *connReader) abortPendingRead() {
   706  	cr.lock()
   707  	defer cr.unlock()
   708  	if !cr.inRead {
   709  		return
   710  	}
   711  	cr.aborted = true
   712  	cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
   713  	for cr.inRead {
   714  		cr.cond.Wait()
   715  	}
   716  	cr.conn.rwc.SetReadDeadline(time.Time{})
   717  }
   718  
   719  func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
   720  func (cr *connReader) setInfiniteReadLimit()     { cr.remain = maxInt64 }
   721  func (cr *connReader) hitReadLimit() bool        { return cr.remain <= 0 }
   722  
   723  // handleReadError is called whenever a Read from the client returns a
   724  // non-nil error.
   725  //
   726  // The provided non-nil err is almost always io.EOF or a "use of
   727  // closed network connection". In any case, the error is not
   728  // particularly interesting, except perhaps for debugging during
   729  // development. Any error means the connection is dead and we should
   730  // down its context.
   731  //
   732  // It may be called from multiple goroutines.
   733  func (cr *connReader) handleReadError(_ error) {
   734  	cr.conn.cancelCtx()
   735  	cr.closeNotify()
   736  }
   737  
   738  // may be called from multiple goroutines.
   739  func (cr *connReader) closeNotify() {
   740  	res := cr.conn.curReq.Load()
   741  	if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) {
   742  		res.closeNotifyCh <- true
   743  	}
   744  }
   745  
   746  func (cr *connReader) Read(p []byte) (n int, err error) {
   747  	cr.lock()
   748  	if cr.inRead {
   749  		cr.unlock()
   750  		if cr.conn.hijacked() {
   751  			panic("invalid Body.Read call. After hijacked, the original Request must not be used")
   752  		}
   753  		panic("invalid concurrent Body.Read call")
   754  	}
   755  	if cr.hitReadLimit() {
   756  		cr.unlock()
   757  		return 0, io.EOF
   758  	}
   759  	if len(p) == 0 {
   760  		cr.unlock()
   761  		return 0, nil
   762  	}
   763  	if int64(len(p)) > cr.remain {
   764  		p = p[:cr.remain]
   765  	}
   766  	if cr.hasByte {
   767  		p[0] = cr.byteBuf[0]
   768  		cr.hasByte = false
   769  		cr.unlock()
   770  		return 1, nil
   771  	}
   772  	cr.inRead = true
   773  	cr.unlock()
   774  	n, err = cr.conn.rwc.Read(p)
   775  
   776  	cr.lock()
   777  	cr.inRead = false
   778  	if err != nil {
   779  		cr.handleReadError(err)
   780  	}
   781  	cr.remain -= int64(n)
   782  	cr.unlock()
   783  
   784  	cr.cond.Broadcast()
   785  	return n, err
   786  }
   787  
   788  var (
   789  	bufioReaderPool   sync.Pool
   790  	bufioWriter2kPool sync.Pool
   791  	bufioWriter4kPool sync.Pool
   792  )
   793  
   794  var copyBufPool = sync.Pool{
   795  	New: func() any {
   796  		b := make([]byte, 32*1024)
   797  		return &b
   798  	},
   799  }
   800  
   801  func bufioWriterPool(size int) *sync.Pool {
   802  	switch size {
   803  	case 2 << 10:
   804  		return &bufioWriter2kPool
   805  	case 4 << 10:
   806  		return &bufioWriter4kPool
   807  	}
   808  	return nil
   809  }
   810  
   811  func newBufioReader(r io.Reader) *bufio.Reader {
   812  	if v := bufioReaderPool.Get(); v != nil {
   813  		br := v.(*bufio.Reader)
   814  		br.Reset(r)
   815  		return br
   816  	}
   817  	// Note: if this reader size is ever changed, update
   818  	// TestHandlerBodyClose's assumptions.
   819  	return bufio.NewReader(r)
   820  }
   821  
   822  func putBufioReader(br *bufio.Reader) {
   823  	br.Reset(nil)
   824  	bufioReaderPool.Put(br)
   825  }
   826  
   827  func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
   828  	pool := bufioWriterPool(size)
   829  	if pool != nil {
   830  		if v := pool.Get(); v != nil {
   831  			bw := v.(*bufio.Writer)
   832  			bw.Reset(w)
   833  			return bw
   834  		}
   835  	}
   836  	return bufio.NewWriterSize(w, size)
   837  }
   838  
   839  func putBufioWriter(bw *bufio.Writer) {
   840  	bw.Reset(nil)
   841  	if pool := bufioWriterPool(bw.Available()); pool != nil {
   842  		pool.Put(bw)
   843  	}
   844  }
   845  
   846  // DefaultMaxHeaderBytes is the maximum permitted size of the headers
   847  // in an HTTP request.
   848  // This can be overridden by setting Server.MaxHeaderBytes.
   849  const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
   850  
   851  func (srv *Server) maxHeaderBytes() int {
   852  	if srv.MaxHeaderBytes > 0 {
   853  		return srv.MaxHeaderBytes
   854  	}
   855  	return DefaultMaxHeaderBytes
   856  }
   857  
   858  func (srv *Server) initialReadLimitSize() int64 {
   859  	return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
   860  }
   861  
   862  // tlsHandshakeTimeout returns the time limit permitted for the TLS
   863  // handshake, or zero for unlimited.
   864  //
   865  // It returns the minimum of any positive ReadHeaderTimeout,
   866  // ReadTimeout, or WriteTimeout.
   867  func (srv *Server) tlsHandshakeTimeout() time.Duration {
   868  	var ret time.Duration
   869  	for _, v := range [...]time.Duration{
   870  		srv.ReadHeaderTimeout,
   871  		srv.ReadTimeout,
   872  		srv.WriteTimeout,
   873  	} {
   874  		if v <= 0 {
   875  			continue
   876  		}
   877  		if ret == 0 || v < ret {
   878  			ret = v
   879  		}
   880  	}
   881  	return ret
   882  }
   883  
   884  // wrapper around io.ReadCloser which on first read, sends an
   885  // HTTP/1.1 100 Continue header
   886  type expectContinueReader struct {
   887  	resp       *response
   888  	readCloser io.ReadCloser
   889  	closed     atomic.Bool
   890  	sawEOF     atomic.Bool
   891  }
   892  
   893  func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
   894  	if ecr.closed.Load() {
   895  		return 0, ErrBodyReadAfterClose
   896  	}
   897  	w := ecr.resp
   898  	if !w.wroteContinue && w.canWriteContinue.Load() && !w.conn.hijacked() {
   899  		w.wroteContinue = true
   900  		w.writeContinueMu.Lock()
   901  		if w.canWriteContinue.Load() {
   902  			w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
   903  			w.conn.bufw.Flush()
   904  			w.canWriteContinue.Store(false)
   905  		}
   906  		w.writeContinueMu.Unlock()
   907  	}
   908  	n, err = ecr.readCloser.Read(p)
   909  	if err == io.EOF {
   910  		ecr.sawEOF.Store(true)
   911  	}
   912  	return
   913  }
   914  
   915  func (ecr *expectContinueReader) Close() error {
   916  	ecr.closed.Store(true)
   917  	return ecr.readCloser.Close()
   918  }
   919  
   920  // TimeFormat is the time format to use when generating times in HTTP
   921  // headers. It is like time.RFC1123 but hard-codes GMT as the time
   922  // zone. The time being formatted must be in UTC for Format to
   923  // generate the correct format.
   924  //
   925  // For parsing this time format, see ParseTime.
   926  const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
   927  
   928  // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
   929  func appendTime(b []byte, t time.Time) []byte {
   930  	const days = "SunMonTueWedThuFriSat"
   931  	const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
   932  
   933  	t = t.UTC()
   934  	yy, mm, dd := t.Date()
   935  	hh, mn, ss := t.Clock()
   936  	day := days[3*t.Weekday():]
   937  	mon := months[3*(mm-1):]
   938  
   939  	return append(b,
   940  		day[0], day[1], day[2], ',', ' ',
   941  		byte('0'+dd/10), byte('0'+dd%10), ' ',
   942  		mon[0], mon[1], mon[2], ' ',
   943  		byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
   944  		byte('0'+hh/10), byte('0'+hh%10), ':',
   945  		byte('0'+mn/10), byte('0'+mn%10), ':',
   946  		byte('0'+ss/10), byte('0'+ss%10), ' ',
   947  		'G', 'M', 'T')
   948  }
   949  
   950  var errTooLarge = errors.New("http: request too large")
   951  
   952  // Read next request from connection.
   953  func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
   954  	if c.hijacked() {
   955  		return nil, ErrHijacked
   956  	}
   957  
   958  	var (
   959  		wholeReqDeadline time.Time // or zero if none
   960  		hdrDeadline      time.Time // or zero if none
   961  	)
   962  	t0 := time.Now()
   963  	if d := c.server.readHeaderTimeout(); d > 0 {
   964  		hdrDeadline = t0.Add(d)
   965  	}
   966  	if d := c.server.ReadTimeout; d > 0 {
   967  		wholeReqDeadline = t0.Add(d)
   968  	}
   969  	c.rwc.SetReadDeadline(hdrDeadline)
   970  	if d := c.server.WriteTimeout; d > 0 {
   971  		defer func() {
   972  			c.rwc.SetWriteDeadline(time.Now().Add(d))
   973  		}()
   974  	}
   975  
   976  	c.r.setReadLimit(c.server.initialReadLimitSize())
   977  	if c.lastMethod == "POST" {
   978  		// RFC 7230 section 3 tolerance for old buggy clients.
   979  		peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
   980  		c.bufr.Discard(numLeadingCRorLF(peek))
   981  	}
   982  	req, err := readRequest(c.bufr)
   983  	if err != nil {
   984  		if c.r.hitReadLimit() {
   985  			return nil, errTooLarge
   986  		}
   987  		return nil, err
   988  	}
   989  
   990  	if !http1ServerSupportsRequest(req) {
   991  		return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
   992  	}
   993  
   994  	c.lastMethod = req.Method
   995  	c.r.setInfiniteReadLimit()
   996  
   997  	hosts, haveHost := req.Header["Host"]
   998  	isH2Upgrade := req.isH2Upgrade()
   999  	if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
  1000  		return nil, badRequestError("missing required Host header")
  1001  	}
  1002  	if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
  1003  		return nil, badRequestError("malformed Host header")
  1004  	}
  1005  	for k, vv := range req.Header {
  1006  		if !httpguts.ValidHeaderFieldName(k) {
  1007  			return nil, badRequestError("invalid header name")
  1008  		}
  1009  		for _, v := range vv {
  1010  			if !httpguts.ValidHeaderFieldValue(v) {
  1011  				return nil, badRequestError("invalid header value")
  1012  			}
  1013  		}
  1014  	}
  1015  	delete(req.Header, "Host")
  1016  
  1017  	ctx, cancelCtx := context.WithCancel(ctx)
  1018  	req.ctx = ctx
  1019  	req.RemoteAddr = c.remoteAddr
  1020  	req.TLS = c.tlsState
  1021  	if body, ok := req.Body.(*body); ok {
  1022  		body.doEarlyClose = true
  1023  	}
  1024  
  1025  	// Adjust the read deadline if necessary.
  1026  	if !hdrDeadline.Equal(wholeReqDeadline) {
  1027  		c.rwc.SetReadDeadline(wholeReqDeadline)
  1028  	}
  1029  
  1030  	w = &response{
  1031  		conn:          c,
  1032  		cancelCtx:     cancelCtx,
  1033  		req:           req,
  1034  		reqBody:       req.Body,
  1035  		handlerHeader: make(Header),
  1036  		contentLength: -1,
  1037  		closeNotifyCh: make(chan bool, 1),
  1038  
  1039  		// We populate these ahead of time so we're not
  1040  		// reading from req.Header after their Handler starts
  1041  		// and maybe mutates it (Issue 14940)
  1042  		wants10KeepAlive: req.wantsHttp10KeepAlive(),
  1043  		wantsClose:       req.wantsClose(),
  1044  	}
  1045  	if isH2Upgrade {
  1046  		w.closeAfterReply = true
  1047  	}
  1048  	w.cw.res = w
  1049  	w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
  1050  	return w, nil
  1051  }
  1052  
  1053  // http1ServerSupportsRequest reports whether Go's HTTP/1.x server
  1054  // supports the given request.
  1055  func http1ServerSupportsRequest(req *Request) bool {
  1056  	if req.ProtoMajor == 1 {
  1057  		return true
  1058  	}
  1059  	// Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
  1060  	// wire up their own HTTP/2 upgrades.
  1061  	if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
  1062  		req.Method == "PRI" && req.RequestURI == "*" {
  1063  		return true
  1064  	}
  1065  	// Reject HTTP/0.x, and all other HTTP/2+ requests (which
  1066  	// aren't encoded in ASCII anyway).
  1067  	return false
  1068  }
  1069  
  1070  func (w *response) Header() Header {
  1071  	if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
  1072  		// Accessing the header between logically writing it
  1073  		// and physically writing it means we need to allocate
  1074  		// a clone to snapshot the logically written state.
  1075  		w.cw.header = w.handlerHeader.Clone()
  1076  	}
  1077  	w.calledHeader = true
  1078  	return w.handlerHeader
  1079  }
  1080  
  1081  // maxPostHandlerReadBytes is the max number of Request.Body bytes not
  1082  // consumed by a handler that the server will read from the client
  1083  // in order to keep a connection alive. If there are more bytes than
  1084  // this then the server to be paranoid instead sends a "Connection:
  1085  // close" response.
  1086  //
  1087  // This number is approximately what a typical machine's TCP buffer
  1088  // size is anyway.  (if we have the bytes on the machine, we might as
  1089  // well read them)
  1090  const maxPostHandlerReadBytes = 256 << 10
  1091  
  1092  func checkWriteHeaderCode(code int) {
  1093  	// Issue 22880: require valid WriteHeader status codes.
  1094  	// For now we only enforce that it's three digits.
  1095  	// In the future we might block things over 599 (600 and above aren't defined
  1096  	// at https://httpwg.org/specs/rfc7231.html#status.codes).
  1097  	// But for now any three digits.
  1098  	//
  1099  	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
  1100  	// no equivalent bogus thing we can realistically send in HTTP/2,
  1101  	// so we'll consistently panic instead and help people find their bugs
  1102  	// early. (We can't return an error from WriteHeader even if we wanted to.)
  1103  	if code < 100 || code > 999 {
  1104  		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
  1105  	}
  1106  }
  1107  
  1108  // relevantCaller searches the call stack for the first function outside of net/http.
  1109  // The purpose of this function is to provide more helpful error messages.
  1110  func relevantCaller() runtime.Frame {
  1111  	pc := make([]uintptr, 16)
  1112  	n := runtime.Callers(1, pc)
  1113  	frames := runtime.CallersFrames(pc[:n])
  1114  	var frame runtime.Frame
  1115  	for {
  1116  		frame, more := frames.Next()
  1117  		if !strings.HasPrefix(frame.Function, "net/http.") {
  1118  			return frame
  1119  		}
  1120  		if !more {
  1121  			break
  1122  		}
  1123  	}
  1124  	return frame
  1125  }
  1126  
  1127  func (w *response) WriteHeader(code int) {
  1128  	if w.conn.hijacked() {
  1129  		caller := relevantCaller()
  1130  		w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1131  		return
  1132  	}
  1133  	if w.wroteHeader {
  1134  		caller := relevantCaller()
  1135  		w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1136  		return
  1137  	}
  1138  	checkWriteHeaderCode(code)
  1139  
  1140  	// Handle informational headers
  1141  	if code >= 100 && code <= 199 {
  1142  		// Prevent a potential race with an automatically-sent 100 Continue triggered by Request.Body.Read()
  1143  		if code == 100 && w.canWriteContinue.Load() {
  1144  			w.writeContinueMu.Lock()
  1145  			w.canWriteContinue.Store(false)
  1146  			w.writeContinueMu.Unlock()
  1147  		}
  1148  
  1149  		writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
  1150  
  1151  		// Per RFC 8297 we must not clear the current header map
  1152  		w.handlerHeader.WriteSubset(w.conn.bufw, excludedHeadersNoBody)
  1153  		w.conn.bufw.Write(crlf)
  1154  		w.conn.bufw.Flush()
  1155  
  1156  		return
  1157  	}
  1158  
  1159  	w.wroteHeader = true
  1160  	w.status = code
  1161  
  1162  	if w.calledHeader && w.cw.header == nil {
  1163  		w.cw.header = w.handlerHeader.Clone()
  1164  	}
  1165  
  1166  	if cl := w.handlerHeader.get("Content-Length"); cl != "" {
  1167  		v, err := strconv.ParseInt(cl, 10, 64)
  1168  		if err == nil && v >= 0 {
  1169  			w.contentLength = v
  1170  		} else {
  1171  			w.conn.server.logf("http: invalid Content-Length of %q", cl)
  1172  			w.handlerHeader.Del("Content-Length")
  1173  		}
  1174  	}
  1175  }
  1176  
  1177  // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
  1178  // This type is used to avoid extra allocations from cloning and/or populating
  1179  // the response Header map and all its 1-element slices.
  1180  type extraHeader struct {
  1181  	contentType      string
  1182  	connection       string
  1183  	transferEncoding string
  1184  	date             []byte // written if not nil
  1185  	contentLength    []byte // written if not nil
  1186  }
  1187  
  1188  // Sorted the same as extraHeader.Write's loop.
  1189  var extraHeaderKeys = [][]byte{
  1190  	[]byte("Content-Type"),
  1191  	[]byte("Connection"),
  1192  	[]byte("Transfer-Encoding"),
  1193  }
  1194  
  1195  var (
  1196  	headerContentLength = []byte("Content-Length: ")
  1197  	headerDate          = []byte("Date: ")
  1198  )
  1199  
  1200  // Write writes the headers described in h to w.
  1201  //
  1202  // This method has a value receiver, despite the somewhat large size
  1203  // of h, because it prevents an allocation. The escape analysis isn't
  1204  // smart enough to realize this function doesn't mutate h.
  1205  func (h extraHeader) Write(w *bufio.Writer) {
  1206  	if h.date != nil {
  1207  		w.Write(headerDate)
  1208  		w.Write(h.date)
  1209  		w.Write(crlf)
  1210  	}
  1211  	if h.contentLength != nil {
  1212  		w.Write(headerContentLength)
  1213  		w.Write(h.contentLength)
  1214  		w.Write(crlf)
  1215  	}
  1216  	for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
  1217  		if v != "" {
  1218  			w.Write(extraHeaderKeys[i])
  1219  			w.Write(colonSpace)
  1220  			w.WriteString(v)
  1221  			w.Write(crlf)
  1222  		}
  1223  	}
  1224  }
  1225  
  1226  // writeHeader finalizes the header sent to the client and writes it
  1227  // to cw.res.conn.bufw.
  1228  //
  1229  // p is not written by writeHeader, but is the first chunk of the body
  1230  // that will be written. It is sniffed for a Content-Type if none is
  1231  // set explicitly. It's also used to set the Content-Length, if the
  1232  // total body size was small and the handler has already finished
  1233  // running.
  1234  func (cw *chunkWriter) writeHeader(p []byte) {
  1235  	if cw.wroteHeader {
  1236  		return
  1237  	}
  1238  	cw.wroteHeader = true
  1239  
  1240  	w := cw.res
  1241  	keepAlivesEnabled := w.conn.server.doKeepAlives()
  1242  	isHEAD := w.req.Method == "HEAD"
  1243  
  1244  	// header is written out to w.conn.buf below. Depending on the
  1245  	// state of the handler, we either own the map or not. If we
  1246  	// don't own it, the exclude map is created lazily for
  1247  	// WriteSubset to remove headers. The setHeader struct holds
  1248  	// headers we need to add.
  1249  	header := cw.header
  1250  	owned := header != nil
  1251  	if !owned {
  1252  		header = w.handlerHeader
  1253  	}
  1254  	var excludeHeader map[string]bool
  1255  	delHeader := func(key string) {
  1256  		if owned {
  1257  			header.Del(key)
  1258  			return
  1259  		}
  1260  		if _, ok := header[key]; !ok {
  1261  			return
  1262  		}
  1263  		if excludeHeader == nil {
  1264  			excludeHeader = make(map[string]bool)
  1265  		}
  1266  		excludeHeader[key] = true
  1267  	}
  1268  	var setHeader extraHeader
  1269  
  1270  	// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
  1271  	trailers := false
  1272  	for k := range cw.header {
  1273  		if strings.HasPrefix(k, TrailerPrefix) {
  1274  			if excludeHeader == nil {
  1275  				excludeHeader = make(map[string]bool)
  1276  			}
  1277  			excludeHeader[k] = true
  1278  			trailers = true
  1279  		}
  1280  	}
  1281  	for _, v := range cw.header["Trailer"] {
  1282  		trailers = true
  1283  		foreachHeaderElement(v, cw.res.declareTrailer)
  1284  	}
  1285  
  1286  	te := header.get("Transfer-Encoding")
  1287  	hasTE := te != ""
  1288  
  1289  	// If the handler is done but never sent a Content-Length
  1290  	// response header and this is our first (and last) write, set
  1291  	// it, even to zero. This helps HTTP/1.0 clients keep their
  1292  	// "keep-alive" connections alive.
  1293  	// Exceptions: 304/204/1xx responses never get Content-Length, and if
  1294  	// it was a HEAD request, we don't know the difference between
  1295  	// 0 actual bytes and 0 bytes because the handler noticed it
  1296  	// was a HEAD request and chose not to write anything. So for
  1297  	// HEAD, the handler should either write the Content-Length or
  1298  	// write non-zero bytes. If it's actually 0 bytes and the
  1299  	// handler never looked at the Request.Method, we just don't
  1300  	// send a Content-Length header.
  1301  	// Further, we don't send an automatic Content-Length if they
  1302  	// set a Transfer-Encoding, because they're generally incompatible.
  1303  	if w.handlerDone.Load() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
  1304  		w.contentLength = int64(len(p))
  1305  		setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
  1306  	}
  1307  
  1308  	// If this was an HTTP/1.0 request with keep-alive and we sent a
  1309  	// Content-Length back, we can make this a keep-alive response ...
  1310  	if w.wants10KeepAlive && keepAlivesEnabled {
  1311  		sentLength := header.get("Content-Length") != ""
  1312  		if sentLength && header.get("Connection") == "keep-alive" {
  1313  			w.closeAfterReply = false
  1314  		}
  1315  	}
  1316  
  1317  	// Check for an explicit (and valid) Content-Length header.
  1318  	hasCL := w.contentLength != -1
  1319  
  1320  	if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
  1321  		_, connectionHeaderSet := header["Connection"]
  1322  		if !connectionHeaderSet {
  1323  			setHeader.connection = "keep-alive"
  1324  		}
  1325  	} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
  1326  		w.closeAfterReply = true
  1327  	}
  1328  
  1329  	if header.get("Connection") == "close" || !keepAlivesEnabled {
  1330  		w.closeAfterReply = true
  1331  	}
  1332  
  1333  	// If the client wanted a 100-continue but we never sent it to
  1334  	// them (or, more strictly: we never finished reading their
  1335  	// request body), don't reuse this connection because it's now
  1336  	// in an unknown state: we might be sending this response at
  1337  	// the same time the client is now sending its request body
  1338  	// after a timeout.  (Some HTTP clients send Expect:
  1339  	// 100-continue but knowing that some servers don't support
  1340  	// it, the clients set a timer and send the body later anyway)
  1341  	// If we haven't seen EOF, we can't skip over the unread body
  1342  	// because we don't know if the next bytes on the wire will be
  1343  	// the body-following-the-timer or the subsequent request.
  1344  	// See Issue 11549.
  1345  	if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.Load() {
  1346  		w.closeAfterReply = true
  1347  	}
  1348  
  1349  	// Per RFC 2616, we should consume the request body before
  1350  	// replying, if the handler hasn't already done so. But we
  1351  	// don't want to do an unbounded amount of reading here for
  1352  	// DoS reasons, so we only try up to a threshold.
  1353  	// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
  1354  	// about HTTP/1.x Handlers concurrently reading and writing, like
  1355  	// HTTP/2 handlers can do. Maybe this code should be relaxed?
  1356  	if w.req.ContentLength != 0 && !w.closeAfterReply {
  1357  		var discard, tooBig bool
  1358  
  1359  		switch bdy := w.req.Body.(type) {
  1360  		case *expectContinueReader:
  1361  			if bdy.resp.wroteContinue {
  1362  				discard = true
  1363  			}
  1364  		case *body:
  1365  			bdy.mu.Lock()
  1366  			switch {
  1367  			case bdy.closed:
  1368  				if !bdy.sawEOF {
  1369  					// Body was closed in handler with non-EOF error.
  1370  					w.closeAfterReply = true
  1371  				}
  1372  			case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
  1373  				tooBig = true
  1374  			default:
  1375  				discard = true
  1376  			}
  1377  			bdy.mu.Unlock()
  1378  		default:
  1379  			discard = true
  1380  		}
  1381  
  1382  		if discard {
  1383  			_, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
  1384  			switch err {
  1385  			case nil:
  1386  				// There must be even more data left over.
  1387  				tooBig = true
  1388  			case ErrBodyReadAfterClose:
  1389  				// Body was already consumed and closed.
  1390  			case io.EOF:
  1391  				// The remaining body was just consumed, close it.
  1392  				err = w.reqBody.Close()
  1393  				if err != nil {
  1394  					w.closeAfterReply = true
  1395  				}
  1396  			default:
  1397  				// Some other kind of error occurred, like a read timeout, or
  1398  				// corrupt chunked encoding. In any case, whatever remains
  1399  				// on the wire must not be parsed as another HTTP request.
  1400  				w.closeAfterReply = true
  1401  			}
  1402  		}
  1403  
  1404  		if tooBig {
  1405  			w.requestTooLarge()
  1406  			delHeader("Connection")
  1407  			setHeader.connection = "close"
  1408  		}
  1409  	}
  1410  
  1411  	code := w.status
  1412  	if bodyAllowedForStatus(code) {
  1413  		// If no content type, apply sniffing algorithm to body.
  1414  		_, haveType := header["Content-Type"]
  1415  
  1416  		// If the Content-Encoding was set and is non-blank,
  1417  		// we shouldn't sniff the body. See Issue 31753.
  1418  		ce := header.Get("Content-Encoding")
  1419  		hasCE := len(ce) > 0
  1420  		if !hasCE && !haveType && !hasTE && len(p) > 0 {
  1421  			setHeader.contentType = DetectContentType(p)
  1422  		}
  1423  	} else {
  1424  		for _, k := range suppressedHeaders(code) {
  1425  			delHeader(k)
  1426  		}
  1427  	}
  1428  
  1429  	if !header.has("Date") {
  1430  		setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
  1431  	}
  1432  
  1433  	if hasCL && hasTE && te != "identity" {
  1434  		// TODO: return an error if WriteHeader gets a return parameter
  1435  		// For now just ignore the Content-Length.
  1436  		w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
  1437  			te, w.contentLength)
  1438  		delHeader("Content-Length")
  1439  		hasCL = false
  1440  	}
  1441  
  1442  	if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == StatusNoContent {
  1443  		// Response has no body.
  1444  		delHeader("Transfer-Encoding")
  1445  	} else if hasCL {
  1446  		// Content-Length has been provided, so no chunking is to be done.
  1447  		delHeader("Transfer-Encoding")
  1448  	} else if w.req.ProtoAtLeast(1, 1) {
  1449  		// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
  1450  		// content-length has been provided. The connection must be closed after the
  1451  		// reply is written, and no chunking is to be done. This is the setup
  1452  		// recommended in the Server-Sent Events candidate recommendation 11,
  1453  		// section 8.
  1454  		if hasTE && te == "identity" {
  1455  			cw.chunking = false
  1456  			w.closeAfterReply = true
  1457  			delHeader("Transfer-Encoding")
  1458  		} else {
  1459  			// HTTP/1.1 or greater: use chunked transfer encoding
  1460  			// to avoid closing the connection at EOF.
  1461  			cw.chunking = true
  1462  			setHeader.transferEncoding = "chunked"
  1463  			if hasTE && te == "chunked" {
  1464  				// We will send the chunked Transfer-Encoding header later.
  1465  				delHeader("Transfer-Encoding")
  1466  			}
  1467  		}
  1468  	} else {
  1469  		// HTTP version < 1.1: cannot do chunked transfer
  1470  		// encoding and we don't know the Content-Length so
  1471  		// signal EOF by closing connection.
  1472  		w.closeAfterReply = true
  1473  		delHeader("Transfer-Encoding") // in case already set
  1474  	}
  1475  
  1476  	// Cannot use Content-Length with non-identity Transfer-Encoding.
  1477  	if cw.chunking {
  1478  		delHeader("Content-Length")
  1479  	}
  1480  	if !w.req.ProtoAtLeast(1, 0) {
  1481  		return
  1482  	}
  1483  
  1484  	// Only override the Connection header if it is not a successful
  1485  	// protocol switch response and if KeepAlives are not enabled.
  1486  	// See https://golang.org/issue/36381.
  1487  	delConnectionHeader := w.closeAfterReply &&
  1488  		(!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
  1489  		!isProtocolSwitchResponse(w.status, header)
  1490  	if delConnectionHeader {
  1491  		delHeader("Connection")
  1492  		if w.req.ProtoAtLeast(1, 1) {
  1493  			setHeader.connection = "close"
  1494  		}
  1495  	}
  1496  
  1497  	writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
  1498  	cw.header.WriteSubset(w.conn.bufw, excludeHeader)
  1499  	setHeader.Write(w.conn.bufw)
  1500  	w.conn.bufw.Write(crlf)
  1501  }
  1502  
  1503  // foreachHeaderElement splits v according to the "#rule" construction
  1504  // in RFC 7230 section 7 and calls fn for each non-empty element.
  1505  func foreachHeaderElement(v string, fn func(string)) {
  1506  	v = textproto.TrimString(v)
  1507  	if v == "" {
  1508  		return
  1509  	}
  1510  	if !strings.Contains(v, ",") {
  1511  		fn(v)
  1512  		return
  1513  	}
  1514  	for _, f := range strings.Split(v, ",") {
  1515  		if f = textproto.TrimString(f); f != "" {
  1516  			fn(f)
  1517  		}
  1518  	}
  1519  }
  1520  
  1521  // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
  1522  // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
  1523  // code is the response status code.
  1524  // scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
  1525  func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
  1526  	if is11 {
  1527  		bw.WriteString("HTTP/1.1 ")
  1528  	} else {
  1529  		bw.WriteString("HTTP/1.0 ")
  1530  	}
  1531  	if text := StatusText(code); text != "" {
  1532  		bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
  1533  		bw.WriteByte(' ')
  1534  		bw.WriteString(text)
  1535  		bw.WriteString("\r\n")
  1536  	} else {
  1537  		// don't worry about performance
  1538  		fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
  1539  	}
  1540  }
  1541  
  1542  // bodyAllowed reports whether a Write is allowed for this response type.
  1543  // It's illegal to call this before the header has been flushed.
  1544  func (w *response) bodyAllowed() bool {
  1545  	if !w.wroteHeader {
  1546  		panic("")
  1547  	}
  1548  	return bodyAllowedForStatus(w.status)
  1549  }
  1550  
  1551  // The Life Of A Write is like this:
  1552  //
  1553  // Handler starts. No header has been sent. The handler can either
  1554  // write a header, or just start writing. Writing before sending a header
  1555  // sends an implicitly empty 200 OK header.
  1556  //
  1557  // If the handler didn't declare a Content-Length up front, we either
  1558  // go into chunking mode or, if the handler finishes running before
  1559  // the chunking buffer size, we compute a Content-Length and send that
  1560  // in the header instead.
  1561  //
  1562  // Likewise, if the handler didn't set a Content-Type, we sniff that
  1563  // from the initial chunk of output.
  1564  //
  1565  // The Writers are wired together like:
  1566  //
  1567  //  1. *response (the ResponseWriter) ->
  1568  //  2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes ->
  1569  //  3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
  1570  //     and which writes the chunk headers, if needed ->
  1571  //  4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
  1572  //  5. checkConnErrorWriter{c}, which notes any non-nil error on Write
  1573  //     and populates c.werr with it if so, but otherwise writes to ->
  1574  //  6. the rwc, the net.Conn.
  1575  //
  1576  // TODO(bradfitz): short-circuit some of the buffering when the
  1577  // initial header contains both a Content-Type and Content-Length.
  1578  // Also short-circuit in (1) when the header's been sent and not in
  1579  // chunking mode, writing directly to (4) instead, if (2) has no
  1580  // buffered data. More generally, we could short-circuit from (1) to
  1581  // (3) even in chunking mode if the write size from (1) is over some
  1582  // threshold and nothing is in (2).  The answer might be mostly making
  1583  // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
  1584  // with this instead.
  1585  func (w *response) Write(data []byte) (n int, err error) {
  1586  	return w.write(len(data), data, "")
  1587  }
  1588  
  1589  func (w *response) WriteString(data string) (n int, err error) {
  1590  	return w.write(len(data), nil, data)
  1591  }
  1592  
  1593  // either dataB or dataS is non-zero.
  1594  func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
  1595  	if w.conn.hijacked() {
  1596  		if lenData > 0 {
  1597  			caller := relevantCaller()
  1598  			w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  1599  		}
  1600  		return 0, ErrHijacked
  1601  	}
  1602  
  1603  	if w.canWriteContinue.Load() {
  1604  		// Body reader wants to write 100 Continue but hasn't yet.
  1605  		// Tell it not to. The store must be done while holding the lock
  1606  		// because the lock makes sure that there is not an active write
  1607  		// this very moment.
  1608  		w.writeContinueMu.Lock()
  1609  		w.canWriteContinue.Store(false)
  1610  		w.writeContinueMu.Unlock()
  1611  	}
  1612  
  1613  	if !w.wroteHeader {
  1614  		w.WriteHeader(StatusOK)
  1615  	}
  1616  	if lenData == 0 {
  1617  		return 0, nil
  1618  	}
  1619  	if !w.bodyAllowed() {
  1620  		return 0, ErrBodyNotAllowed
  1621  	}
  1622  
  1623  	w.written += int64(lenData) // ignoring errors, for errorKludge
  1624  	if w.contentLength != -1 && w.written > w.contentLength {
  1625  		return 0, ErrContentLength
  1626  	}
  1627  	if dataB != nil {
  1628  		return w.w.Write(dataB)
  1629  	} else {
  1630  		return w.w.WriteString(dataS)
  1631  	}
  1632  }
  1633  
  1634  func (w *response) finishRequest() {
  1635  	w.handlerDone.Store(true)
  1636  
  1637  	if !w.wroteHeader {
  1638  		w.WriteHeader(StatusOK)
  1639  	}
  1640  
  1641  	w.w.Flush()
  1642  	putBufioWriter(w.w)
  1643  	w.cw.close()
  1644  	w.conn.bufw.Flush()
  1645  
  1646  	w.conn.r.abortPendingRead()
  1647  
  1648  	// Close the body (regardless of w.closeAfterReply) so we can
  1649  	// re-use its bufio.Reader later safely.
  1650  	w.reqBody.Close()
  1651  
  1652  	if w.req.MultipartForm != nil {
  1653  		w.req.MultipartForm.RemoveAll()
  1654  	}
  1655  }
  1656  
  1657  // shouldReuseConnection reports whether the underlying TCP connection can be reused.
  1658  // It must only be called after the handler is done executing.
  1659  func (w *response) shouldReuseConnection() bool {
  1660  	if w.closeAfterReply {
  1661  		// The request or something set while executing the
  1662  		// handler indicated we shouldn't reuse this
  1663  		// connection.
  1664  		return false
  1665  	}
  1666  
  1667  	if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
  1668  		// Did not write enough. Avoid getting out of sync.
  1669  		return false
  1670  	}
  1671  
  1672  	// There was some error writing to the underlying connection
  1673  	// during the request, so don't re-use this conn.
  1674  	if w.conn.werr != nil {
  1675  		return false
  1676  	}
  1677  
  1678  	if w.closedRequestBodyEarly() {
  1679  		return false
  1680  	}
  1681  
  1682  	return true
  1683  }
  1684  
  1685  func (w *response) closedRequestBodyEarly() bool {
  1686  	body, ok := w.req.Body.(*body)
  1687  	return ok && body.didEarlyClose()
  1688  }
  1689  
  1690  func (w *response) Flush() {
  1691  	if !w.wroteHeader {
  1692  		w.WriteHeader(StatusOK)
  1693  	}
  1694  	w.w.Flush()
  1695  	w.cw.flush()
  1696  }
  1697  
  1698  func (c *conn) finalFlush() {
  1699  	if c.bufr != nil {
  1700  		// Steal the bufio.Reader (~4KB worth of memory) and its associated
  1701  		// reader for a future connection.
  1702  		putBufioReader(c.bufr)
  1703  		c.bufr = nil
  1704  	}
  1705  
  1706  	if c.bufw != nil {
  1707  		c.bufw.Flush()
  1708  		// Steal the bufio.Writer (~4KB worth of memory) and its associated
  1709  		// writer for a future connection.
  1710  		putBufioWriter(c.bufw)
  1711  		c.bufw = nil
  1712  	}
  1713  }
  1714  
  1715  // Close the connection.
  1716  func (c *conn) close() {
  1717  	c.finalFlush()
  1718  	c.rwc.Close()
  1719  }
  1720  
  1721  // rstAvoidanceDelay is the amount of time we sleep after closing the
  1722  // write side of a TCP connection before closing the entire socket.
  1723  // By sleeping, we increase the chances that the client sees our FIN
  1724  // and processes its final data before they process the subsequent RST
  1725  // from closing a connection with known unread data.
  1726  // This RST seems to occur mostly on BSD systems. (And Windows?)
  1727  // This timeout is somewhat arbitrary (~latency around the planet).
  1728  const rstAvoidanceDelay = 500 * time.Millisecond
  1729  
  1730  type closeWriter interface {
  1731  	CloseWrite() error
  1732  }
  1733  
  1734  var _ closeWriter = (*net.TCPConn)(nil)
  1735  
  1736  // closeWrite flushes any outstanding data and sends a FIN packet (if
  1737  // client is connected via TCP), signaling that we're done. We then
  1738  // pause for a bit, hoping the client processes it before any
  1739  // subsequent RST.
  1740  //
  1741  // See https://golang.org/issue/3595
  1742  func (c *conn) closeWriteAndWait() {
  1743  	c.finalFlush()
  1744  	if tcp, ok := c.rwc.(closeWriter); ok {
  1745  		tcp.CloseWrite()
  1746  	}
  1747  	time.Sleep(rstAvoidanceDelay)
  1748  }
  1749  
  1750  // validNextProto reports whether the proto is a valid ALPN protocol name.
  1751  // Everything is valid except the empty string and built-in protocol types,
  1752  // so that those can't be overridden with alternate implementations.
  1753  func validNextProto(proto string) bool {
  1754  	switch proto {
  1755  	case "", "http/1.1", "http/1.0":
  1756  		return false
  1757  	}
  1758  	return true
  1759  }
  1760  
  1761  const (
  1762  	runHooks  = true
  1763  	skipHooks = false
  1764  )
  1765  
  1766  func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
  1767  	srv := c.server
  1768  	switch state {
  1769  	case StateNew:
  1770  		srv.trackConn(c, true)
  1771  	case StateHijacked, StateClosed:
  1772  		srv.trackConn(c, false)
  1773  	}
  1774  	if state > 0xff || state < 0 {
  1775  		panic("internal error")
  1776  	}
  1777  	packedState := uint64(time.Now().Unix()<<8) | uint64(state)
  1778  	c.curState.Store(packedState)
  1779  	if !runHook {
  1780  		return
  1781  	}
  1782  	if hook := srv.ConnState; hook != nil {
  1783  		hook(nc, state)
  1784  	}
  1785  }
  1786  
  1787  func (c *conn) getState() (state ConnState, unixSec int64) {
  1788  	packedState := c.curState.Load()
  1789  	return ConnState(packedState & 0xff), int64(packedState >> 8)
  1790  }
  1791  
  1792  // badRequestError is a literal string (used by in the server in HTML,
  1793  // unescaped) to tell the user why their request was bad. It should
  1794  // be plain text without user info or other embedded errors.
  1795  func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
  1796  
  1797  // statusError is an error used to respond to a request with an HTTP status.
  1798  // The text should be plain text without user info or other embedded errors.
  1799  type statusError struct {
  1800  	code int
  1801  	text string
  1802  }
  1803  
  1804  func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
  1805  
  1806  // ErrAbortHandler is a sentinel panic value to abort a handler.
  1807  // While any panic from ServeHTTP aborts the response to the client,
  1808  // panicking with ErrAbortHandler also suppresses logging of a stack
  1809  // trace to the server's error log.
  1810  var ErrAbortHandler = errors.New("net/http: abort Handler")
  1811  
  1812  // isCommonNetReadError reports whether err is a common error
  1813  // encountered during reading a request off the network when the
  1814  // client has gone away or had its read fail somehow. This is used to
  1815  // determine which logs are interesting enough to log about.
  1816  func isCommonNetReadError(err error) bool {
  1817  	if err == io.EOF {
  1818  		return true
  1819  	}
  1820  	if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
  1821  		return true
  1822  	}
  1823  	if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
  1824  		return true
  1825  	}
  1826  	return false
  1827  }
  1828  
  1829  // Serve a new connection.
  1830  func (c *conn) serve(ctx context.Context) {
  1831  	c.remoteAddr = c.rwc.RemoteAddr().String()
  1832  	ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
  1833  	var inFlightResponse *response
  1834  	defer func() {
  1835  		if err := recover(); err != nil && err != ErrAbortHandler {
  1836  			const size = 64 << 10
  1837  			buf := make([]byte, size)
  1838  			buf = buf[:runtime.Stack(buf, false)]
  1839  			c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
  1840  		}
  1841  		if inFlightResponse != nil {
  1842  			inFlightResponse.cancelCtx()
  1843  		}
  1844  		if !c.hijacked() {
  1845  			if inFlightResponse != nil {
  1846  				inFlightResponse.conn.r.abortPendingRead()
  1847  				inFlightResponse.reqBody.Close()
  1848  			}
  1849  			c.close()
  1850  			c.setState(c.rwc, StateClosed, runHooks)
  1851  		}
  1852  	}()
  1853  
  1854  	if tlsConn, ok := c.rwc.(*tls.Conn); ok {
  1855  		tlsTO := c.server.tlsHandshakeTimeout()
  1856  		if tlsTO > 0 {
  1857  			dl := time.Now().Add(tlsTO)
  1858  			c.rwc.SetReadDeadline(dl)
  1859  			c.rwc.SetWriteDeadline(dl)
  1860  		}
  1861  		if err := tlsConn.HandshakeContext(ctx); err != nil {
  1862  			// If the handshake failed due to the client not speaking
  1863  			// TLS, assume they're speaking plaintext HTTP and write a
  1864  			// 400 response on the TLS conn's underlying net.Conn.
  1865  			if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
  1866  				io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
  1867  				re.Conn.Close()
  1868  				return
  1869  			}
  1870  			c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
  1871  			return
  1872  		}
  1873  		// Restore Conn-level deadlines.
  1874  		if tlsTO > 0 {
  1875  			c.rwc.SetReadDeadline(time.Time{})
  1876  			c.rwc.SetWriteDeadline(time.Time{})
  1877  		}
  1878  		c.tlsState = new(tls.ConnectionState)
  1879  		*c.tlsState = tlsConn.ConnectionState()
  1880  		if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
  1881  			if fn := c.server.TLSNextProto[proto]; fn != nil {
  1882  				h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
  1883  				// Mark freshly created HTTP/2 as active and prevent any server state hooks
  1884  				// from being run on these connections. This prevents closeIdleConns from
  1885  				// closing such connections. See issue https://golang.org/issue/39776.
  1886  				c.setState(c.rwc, StateActive, skipHooks)
  1887  				fn(c.server, tlsConn, h)
  1888  			}
  1889  			return
  1890  		}
  1891  	}
  1892  
  1893  	// HTTP/1.x from here on.
  1894  
  1895  	ctx, cancelCtx := context.WithCancel(ctx)
  1896  	c.cancelCtx = cancelCtx
  1897  	defer cancelCtx()
  1898  
  1899  	c.r = &connReader{conn: c}
  1900  	c.bufr = newBufioReader(c.r)
  1901  	c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
  1902  
  1903  	for {
  1904  		w, err := c.readRequest(ctx)
  1905  		if c.r.remain != c.server.initialReadLimitSize() {
  1906  			// If we read any bytes off the wire, we're active.
  1907  			c.setState(c.rwc, StateActive, runHooks)
  1908  		}
  1909  		if err != nil {
  1910  			const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
  1911  
  1912  			switch {
  1913  			case err == errTooLarge:
  1914  				// Their HTTP client may or may not be
  1915  				// able to read this if we're
  1916  				// responding to them and hanging up
  1917  				// while they're still writing their
  1918  				// request. Undefined behavior.
  1919  				const publicErr = "431 Request Header Fields Too Large"
  1920  				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
  1921  				c.closeWriteAndWait()
  1922  				return
  1923  
  1924  			case isUnsupportedTEError(err):
  1925  				// Respond as per RFC 7230 Section 3.3.1 which says,
  1926  				//      A server that receives a request message with a
  1927  				//      transfer coding it does not understand SHOULD
  1928  				//      respond with 501 (Unimplemented).
  1929  				code := StatusNotImplemented
  1930  
  1931  				// We purposefully aren't echoing back the transfer-encoding's value,
  1932  				// so as to mitigate the risk of cross side scripting by an attacker.
  1933  				fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
  1934  				return
  1935  
  1936  			case isCommonNetReadError(err):
  1937  				return // don't reply
  1938  
  1939  			default:
  1940  				if v, ok := err.(statusError); ok {
  1941  					fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
  1942  					return
  1943  				}
  1944  				publicErr := "400 Bad Request"
  1945  				fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
  1946  				return
  1947  			}
  1948  		}
  1949  
  1950  		// Expect 100 Continue support
  1951  		req := w.req
  1952  		if req.expectsContinue() {
  1953  			if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
  1954  				// Wrap the Body reader with one that replies on the connection
  1955  				req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
  1956  				w.canWriteContinue.Store(true)
  1957  			}
  1958  		} else if req.Header.get("Expect") != "" {
  1959  			w.sendExpectationFailed()
  1960  			return
  1961  		}
  1962  
  1963  		c.curReq.Store(w)
  1964  
  1965  		if requestBodyRemains(req.Body) {
  1966  			registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
  1967  		} else {
  1968  			w.conn.r.startBackgroundRead()
  1969  		}
  1970  
  1971  		// HTTP cannot have multiple simultaneous active requests.[*]
  1972  		// Until the server replies to this request, it can't read another,
  1973  		// so we might as well run the handler in this goroutine.
  1974  		// [*] Not strictly true: HTTP pipelining. We could let them all process
  1975  		// in parallel even if their responses need to be serialized.
  1976  		// But we're not going to implement HTTP pipelining because it
  1977  		// was never deployed in the wild and the answer is HTTP/2.
  1978  		inFlightResponse = w
  1979  		serverHandler{c.server}.ServeHTTP(w, w.req)
  1980  		inFlightResponse = nil
  1981  		w.cancelCtx()
  1982  		if c.hijacked() {
  1983  			return
  1984  		}
  1985  		w.finishRequest()
  1986  		if !w.shouldReuseConnection() {
  1987  			if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
  1988  				c.closeWriteAndWait()
  1989  			}
  1990  			return
  1991  		}
  1992  		c.setState(c.rwc, StateIdle, runHooks)
  1993  		c.curReq.Store(nil)
  1994  
  1995  		if !w.conn.server.doKeepAlives() {
  1996  			// We're in shutdown mode. We might've replied
  1997  			// to the user without "Connection: close" and
  1998  			// they might think they can send another
  1999  			// request, but such is life with HTTP/1.1.
  2000  			return
  2001  		}
  2002  
  2003  		if d := c.server.idleTimeout(); d != 0 {
  2004  			c.rwc.SetReadDeadline(time.Now().Add(d))
  2005  		} else {
  2006  			c.rwc.SetReadDeadline(time.Time{})
  2007  		}
  2008  
  2009  		// Wait for the connection to become readable again before trying to
  2010  		// read the next request. This prevents a ReadHeaderTimeout or
  2011  		// ReadTimeout from starting until the first bytes of the next request
  2012  		// have been received.
  2013  		if _, err := c.bufr.Peek(4); err != nil {
  2014  			return
  2015  		}
  2016  
  2017  		c.rwc.SetReadDeadline(time.Time{})
  2018  	}
  2019  }
  2020  
  2021  func (w *response) sendExpectationFailed() {
  2022  	// TODO(bradfitz): let ServeHTTP handlers handle
  2023  	// requests with non-standard expectation[s]? Seems
  2024  	// theoretical at best, and doesn't fit into the
  2025  	// current ServeHTTP model anyway. We'd need to
  2026  	// make the ResponseWriter an optional
  2027  	// "ExpectReplier" interface or something.
  2028  	//
  2029  	// For now we'll just obey RFC 7231 5.1.1 which says
  2030  	// "A server that receives an Expect field-value other
  2031  	// than 100-continue MAY respond with a 417 (Expectation
  2032  	// Failed) status code to indicate that the unexpected
  2033  	// expectation cannot be met."
  2034  	w.Header().Set("Connection", "close")
  2035  	w.WriteHeader(StatusExpectationFailed)
  2036  	w.finishRequest()
  2037  }
  2038  
  2039  // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
  2040  // and a Hijacker.
  2041  func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
  2042  	if w.handlerDone.Load() {
  2043  		panic("net/http: Hijack called after ServeHTTP finished")
  2044  	}
  2045  	if w.wroteHeader {
  2046  		w.cw.flush()
  2047  	}
  2048  
  2049  	c := w.conn
  2050  	c.mu.Lock()
  2051  	defer c.mu.Unlock()
  2052  
  2053  	// Release the bufioWriter that writes to the chunk writer, it is not
  2054  	// used after a connection has been hijacked.
  2055  	rwc, buf, err = c.hijackLocked()
  2056  	if err == nil {
  2057  		putBufioWriter(w.w)
  2058  		w.w = nil
  2059  	}
  2060  	return rwc, buf, err
  2061  }
  2062  
  2063  func (w *response) CloseNotify() <-chan bool {
  2064  	if w.handlerDone.Load() {
  2065  		panic("net/http: CloseNotify called after ServeHTTP finished")
  2066  	}
  2067  	return w.closeNotifyCh
  2068  }
  2069  
  2070  func registerOnHitEOF(rc io.ReadCloser, fn func()) {
  2071  	switch v := rc.(type) {
  2072  	case *expectContinueReader:
  2073  		registerOnHitEOF(v.readCloser, fn)
  2074  	case *body:
  2075  		v.registerOnHitEOF(fn)
  2076  	default:
  2077  		panic("unexpected type " + fmt.Sprintf("%T", rc))
  2078  	}
  2079  }
  2080  
  2081  // requestBodyRemains reports whether future calls to Read
  2082  // on rc might yield more data.
  2083  func requestBodyRemains(rc io.ReadCloser) bool {
  2084  	if rc == NoBody {
  2085  		return false
  2086  	}
  2087  	switch v := rc.(type) {
  2088  	case *expectContinueReader:
  2089  		return requestBodyRemains(v.readCloser)
  2090  	case *body:
  2091  		return v.bodyRemains()
  2092  	default:
  2093  		panic("unexpected type " + fmt.Sprintf("%T", rc))
  2094  	}
  2095  }
  2096  
  2097  // The HandlerFunc type is an adapter to allow the use of
  2098  // ordinary functions as HTTP handlers. If f is a function
  2099  // with the appropriate signature, HandlerFunc(f) is a
  2100  // Handler that calls f.
  2101  type HandlerFunc func(ResponseWriter, *Request)
  2102  
  2103  // ServeHTTP calls f(w, r).
  2104  func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
  2105  	f(w, r)
  2106  }
  2107  
  2108  // Helper handlers
  2109  
  2110  // Error replies to the request with the specified error message and HTTP code.
  2111  // It does not otherwise end the request; the caller should ensure no further
  2112  // writes are done to w.
  2113  // The error message should be plain text.
  2114  func Error(w ResponseWriter, error string, code int) {
  2115  	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
  2116  	w.Header().Set("X-Content-Type-Options", "nosniff")
  2117  	w.WriteHeader(code)
  2118  	fmt.Fprintln(w, error)
  2119  }
  2120  
  2121  // NotFound replies to the request with an HTTP 404 not found error.
  2122  func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
  2123  
  2124  // NotFoundHandler returns a simple request handler
  2125  // that replies to each request with a “404 page not found” reply.
  2126  func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
  2127  
  2128  // StripPrefix returns a handler that serves HTTP requests by removing the
  2129  // given prefix from the request URL's Path (and RawPath if set) and invoking
  2130  // the handler h. StripPrefix handles a request for a path that doesn't begin
  2131  // with prefix by replying with an HTTP 404 not found error. The prefix must
  2132  // match exactly: if the prefix in the request contains escaped characters
  2133  // the reply is also an HTTP 404 not found error.
  2134  func StripPrefix(prefix string, h Handler) Handler {
  2135  	if prefix == "" {
  2136  		return h
  2137  	}
  2138  	return HandlerFunc(func(w ResponseWriter, r *Request) {
  2139  		p := strings.TrimPrefix(r.URL.Path, prefix)
  2140  		rp := strings.TrimPrefix(r.URL.RawPath, prefix)
  2141  		if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
  2142  			r2 := new(Request)
  2143  			*r2 = *r
  2144  			r2.URL = new(url.URL)
  2145  			*r2.URL = *r.URL
  2146  			r2.URL.Path = p
  2147  			r2.URL.RawPath = rp
  2148  			h.ServeHTTP(w, r2)
  2149  		} else {
  2150  			NotFound(w, r)
  2151  		}
  2152  	})
  2153  }
  2154  
  2155  // Redirect replies to the request with a redirect to url,
  2156  // which may be a path relative to the request path.
  2157  //
  2158  // The provided code should be in the 3xx range and is usually
  2159  // StatusMovedPermanently, StatusFound or StatusSeeOther.
  2160  //
  2161  // If the Content-Type header has not been set, Redirect sets it
  2162  // to "text/html; charset=utf-8" and writes a small HTML body.
  2163  // Setting the Content-Type header to any value, including nil,
  2164  // disables that behavior.
  2165  func Redirect(w ResponseWriter, r *Request, url string, code int) {
  2166  	if u, err := urlpkg.Parse(url); err == nil {
  2167  		// If url was relative, make its path absolute by
  2168  		// combining with request path.
  2169  		// The client would probably do this for us,
  2170  		// but doing it ourselves is more reliable.
  2171  		// See RFC 7231, section 7.1.2
  2172  		if u.Scheme == "" && u.Host == "" {
  2173  			oldpath := r.URL.Path
  2174  			if oldpath == "" { // should not happen, but avoid a crash if it does
  2175  				oldpath = "/"
  2176  			}
  2177  
  2178  			// no leading http://server
  2179  			if url == "" || url[0] != '/' {
  2180  				// make relative path absolute
  2181  				olddir, _ := path.Split(oldpath)
  2182  				url = olddir + url
  2183  			}
  2184  
  2185  			var query string
  2186  			if i := strings.Index(url, "?"); i != -1 {
  2187  				url, query = url[:i], url[i:]
  2188  			}
  2189  
  2190  			// clean up but preserve trailing slash
  2191  			trailing := strings.HasSuffix(url, "/")
  2192  			url = path.Clean(url)
  2193  			if trailing && !strings.HasSuffix(url, "/") {
  2194  				url += "/"
  2195  			}
  2196  			url += query
  2197  		}
  2198  	}
  2199  
  2200  	h := w.Header()
  2201  
  2202  	// RFC 7231 notes that a short HTML body is usually included in
  2203  	// the response because older user agents may not understand 301/307.
  2204  	// Do it only if the request didn't already have a Content-Type header.
  2205  	_, hadCT := h["Content-Type"]
  2206  
  2207  	h.Set("Location", hexEscapeNonASCII(url))
  2208  	if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
  2209  		h.Set("Content-Type", "text/html; charset=utf-8")
  2210  	}
  2211  	w.WriteHeader(code)
  2212  
  2213  	// Shouldn't send the body for POST or HEAD; that leaves GET.
  2214  	if !hadCT && r.Method == "GET" {
  2215  		body := "<a href=\"" + htmlEscape(url) + "\">" + StatusText(code) + "</a>.\n"
  2216  		fmt.Fprintln(w, body)
  2217  	}
  2218  }
  2219  
  2220  var htmlReplacer = strings.NewReplacer(
  2221  	"&", "&amp;",
  2222  	"<", "&lt;",
  2223  	">", "&gt;",
  2224  	// "&#34;" is shorter than "&quot;".
  2225  	`"`, "&#34;",
  2226  	// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
  2227  	"'", "&#39;",
  2228  )
  2229  
  2230  func htmlEscape(s string) string {
  2231  	return htmlReplacer.Replace(s)
  2232  }
  2233  
  2234  // Redirect to a fixed URL
  2235  type redirectHandler struct {
  2236  	url  string
  2237  	code int
  2238  }
  2239  
  2240  func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
  2241  	Redirect(w, r, rh.url, rh.code)
  2242  }
  2243  
  2244  // RedirectHandler returns a request handler that redirects
  2245  // each request it receives to the given url using the given
  2246  // status code.
  2247  //
  2248  // The provided code should be in the 3xx range and is usually
  2249  // StatusMovedPermanently, StatusFound or StatusSeeOther.
  2250  func RedirectHandler(url string, code int) Handler {
  2251  	return &redirectHandler{url, code}
  2252  }
  2253  
  2254  // ServeMux is an HTTP request multiplexer.
  2255  // It matches the URL of each incoming request against a list of registered
  2256  // patterns and calls the handler for the pattern that
  2257  // most closely matches the URL.
  2258  //
  2259  // Patterns name fixed, rooted paths, like "/favicon.ico",
  2260  // or rooted subtrees, like "/images/" (note the trailing slash).
  2261  // Longer patterns take precedence over shorter ones, so that
  2262  // if there are handlers registered for both "/images/"
  2263  // and "/images/thumbnails/", the latter handler will be
  2264  // called for paths beginning "/images/thumbnails/" and the
  2265  // former will receive requests for any other paths in the
  2266  // "/images/" subtree.
  2267  //
  2268  // Note that since a pattern ending in a slash names a rooted subtree,
  2269  // the pattern "/" matches all paths not matched by other registered
  2270  // patterns, not just the URL with Path == "/".
  2271  //
  2272  // If a subtree has been registered and a request is received naming the
  2273  // subtree root without its trailing slash, ServeMux redirects that
  2274  // request to the subtree root (adding the trailing slash). This behavior can
  2275  // be overridden with a separate registration for the path without
  2276  // the trailing slash. For example, registering "/images/" causes ServeMux
  2277  // to redirect a request for "/images" to "/images/", unless "/images" has
  2278  // been registered separately.
  2279  //
  2280  // Patterns may optionally begin with a host name, restricting matches to
  2281  // URLs on that host only. Host-specific patterns take precedence over
  2282  // general patterns, so that a handler might register for the two patterns
  2283  // "/codesearch" and "codesearch.google.com/" without also taking over
  2284  // requests for "http://www.google.com/".
  2285  //
  2286  // ServeMux also takes care of sanitizing the URL request path and the Host
  2287  // header, stripping the port number and redirecting any request containing . or
  2288  // .. elements or repeated slashes to an equivalent, cleaner URL.
  2289  type ServeMux struct {
  2290  	mu    sync.RWMutex
  2291  	m     map[string]muxEntry
  2292  	es    []muxEntry // slice of entries sorted from longest to shortest.
  2293  	hosts bool       // whether any patterns contain hostnames
  2294  }
  2295  
  2296  type muxEntry struct {
  2297  	h       Handler
  2298  	pattern string
  2299  }
  2300  
  2301  // NewServeMux allocates and returns a new ServeMux.
  2302  func NewServeMux() *ServeMux { return new(ServeMux) }
  2303  
  2304  // DefaultServeMux is the default ServeMux used by Serve.
  2305  var DefaultServeMux = &defaultServeMux
  2306  
  2307  var defaultServeMux ServeMux
  2308  
  2309  // cleanPath returns the canonical path for p, eliminating . and .. elements.
  2310  func cleanPath(p string) string {
  2311  	if p == "" {
  2312  		return "/"
  2313  	}
  2314  	if p[0] != '/' {
  2315  		p = "/" + p
  2316  	}
  2317  	np := path.Clean(p)
  2318  	// path.Clean removes trailing slash except for root;
  2319  	// put the trailing slash back if necessary.
  2320  	if p[len(p)-1] == '/' && np != "/" {
  2321  		// Fast path for common case of p being the string we want:
  2322  		if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
  2323  			np = p
  2324  		} else {
  2325  			np += "/"
  2326  		}
  2327  	}
  2328  	return np
  2329  }
  2330  
  2331  // stripHostPort returns h without any trailing ":<port>".
  2332  func stripHostPort(h string) string {
  2333  	// If no port on host, return unchanged
  2334  	if !strings.Contains(h, ":") {
  2335  		return h
  2336  	}
  2337  	host, _, err := net.SplitHostPort(h)
  2338  	if err != nil {
  2339  		return h // on error, return unchanged
  2340  	}
  2341  	return host
  2342  }
  2343  
  2344  // Find a handler on a handler map given a path string.
  2345  // Most-specific (longest) pattern wins.
  2346  func (mux *ServeMux) match(path string) (h Handler, pattern string) {
  2347  	// Check for exact match first.
  2348  	v, ok := mux.m[path]
  2349  	if ok {
  2350  		return v.h, v.pattern
  2351  	}
  2352  
  2353  	// Check for longest valid match.  mux.es contains all patterns
  2354  	// that end in / sorted from longest to shortest.
  2355  	for _, e := range mux.es {
  2356  		if strings.HasPrefix(path, e.pattern) {
  2357  			return e.h, e.pattern
  2358  		}
  2359  	}
  2360  	return nil, ""
  2361  }
  2362  
  2363  // redirectToPathSlash determines if the given path needs appending "/" to it.
  2364  // This occurs when a handler for path + "/" was already registered, but
  2365  // not for path itself. If the path needs appending to, it creates a new
  2366  // URL, setting the path to u.Path + "/" and returning true to indicate so.
  2367  func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
  2368  	mux.mu.RLock()
  2369  	shouldRedirect := mux.shouldRedirectRLocked(host, path)
  2370  	mux.mu.RUnlock()
  2371  	if !shouldRedirect {
  2372  		return u, false
  2373  	}
  2374  	path = path + "/"
  2375  	u = &url.URL{Path: path, RawQuery: u.RawQuery}
  2376  	return u, true
  2377  }
  2378  
  2379  // shouldRedirectRLocked reports whether the given path and host should be redirected to
  2380  // path+"/". This should happen if a handler is registered for path+"/" but
  2381  // not path -- see comments at ServeMux.
  2382  func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool {
  2383  	p := []string{path, host + path}
  2384  
  2385  	for _, c := range p {
  2386  		if _, exist := mux.m[c]; exist {
  2387  			return false
  2388  		}
  2389  	}
  2390  
  2391  	n := len(path)
  2392  	if n == 0 {
  2393  		return false
  2394  	}
  2395  	for _, c := range p {
  2396  		if _, exist := mux.m[c+"/"]; exist {
  2397  			return path[n-1] != '/'
  2398  		}
  2399  	}
  2400  
  2401  	return false
  2402  }
  2403  
  2404  // Handler returns the handler to use for the given request,
  2405  // consulting r.Method, r.Host, and r.URL.Path. It always returns
  2406  // a non-nil handler. If the path is not in its canonical form, the
  2407  // handler will be an internally-generated handler that redirects
  2408  // to the canonical path. If the host contains a port, it is ignored
  2409  // when matching handlers.
  2410  //
  2411  // The path and host are used unchanged for CONNECT requests.
  2412  //
  2413  // Handler also returns the registered pattern that matches the
  2414  // request or, in the case of internally-generated redirects,
  2415  // the pattern that will match after following the redirect.
  2416  //
  2417  // If there is no registered handler that applies to the request,
  2418  // Handler returns a “page not found” handler and an empty pattern.
  2419  func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
  2420  
  2421  	// CONNECT requests are not canonicalized.
  2422  	if r.Method == "CONNECT" {
  2423  		// If r.URL.Path is /tree and its handler is not registered,
  2424  		// the /tree -> /tree/ redirect applies to CONNECT requests
  2425  		// but the path canonicalization does not.
  2426  		if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
  2427  			return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
  2428  		}
  2429  
  2430  		return mux.handler(r.Host, r.URL.Path)
  2431  	}
  2432  
  2433  	// All other requests have any port stripped and path cleaned
  2434  	// before passing to mux.handler.
  2435  	host := stripHostPort(r.Host)
  2436  	path := cleanPath(r.URL.Path)
  2437  
  2438  	// If the given path is /tree and its handler is not registered,
  2439  	// redirect for /tree/.
  2440  	if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
  2441  		return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
  2442  	}
  2443  
  2444  	if path != r.URL.Path {
  2445  		_, pattern = mux.handler(host, path)
  2446  		u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
  2447  		return RedirectHandler(u.String(), StatusMovedPermanently), pattern
  2448  	}
  2449  
  2450  	return mux.handler(host, r.URL.Path)
  2451  }
  2452  
  2453  // handler is the main implementation of Handler.
  2454  // The path is known to be in canonical form, except for CONNECT methods.
  2455  func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) {
  2456  	mux.mu.RLock()
  2457  	defer mux.mu.RUnlock()
  2458  
  2459  	// Host-specific pattern takes precedence over generic ones
  2460  	if mux.hosts {
  2461  		h, pattern = mux.match(host + path)
  2462  	}
  2463  	if h == nil {
  2464  		h, pattern = mux.match(path)
  2465  	}
  2466  	if h == nil {
  2467  		h, pattern = NotFoundHandler(), ""
  2468  	}
  2469  	return
  2470  }
  2471  
  2472  // ServeHTTP dispatches the request to the handler whose
  2473  // pattern most closely matches the request URL.
  2474  func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
  2475  	if r.RequestURI == "*" {
  2476  		if r.ProtoAtLeast(1, 1) {
  2477  			w.Header().Set("Connection", "close")
  2478  		}
  2479  		w.WriteHeader(StatusBadRequest)
  2480  		return
  2481  	}
  2482  	h, _ := mux.Handler(r)
  2483  	h.ServeHTTP(w, r)
  2484  }
  2485  
  2486  // Handle registers the handler for the given pattern.
  2487  // If a handler already exists for pattern, Handle panics.
  2488  func (mux *ServeMux) Handle(pattern string, handler Handler) {
  2489  	mux.mu.Lock()
  2490  	defer mux.mu.Unlock()
  2491  
  2492  	if pattern == "" {
  2493  		panic("http: invalid pattern")
  2494  	}
  2495  	if handler == nil {
  2496  		panic("http: nil handler")
  2497  	}
  2498  	if _, exist := mux.m[pattern]; exist {
  2499  		panic("http: multiple registrations for " + pattern)
  2500  	}
  2501  
  2502  	if mux.m == nil {
  2503  		mux.m = make(map[string]muxEntry)
  2504  	}
  2505  	e := muxEntry{h: handler, pattern: pattern}
  2506  	mux.m[pattern] = e
  2507  	if pattern[len(pattern)-1] == '/' {
  2508  		mux.es = appendSorted(mux.es, e)
  2509  	}
  2510  
  2511  	if pattern[0] != '/' {
  2512  		mux.hosts = true
  2513  	}
  2514  }
  2515  
  2516  func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
  2517  	n := len(es)
  2518  	i := sort.Search(n, func(i int) bool {
  2519  		return len(es[i].pattern) < len(e.pattern)
  2520  	})
  2521  	if i == n {
  2522  		return append(es, e)
  2523  	}
  2524  	// we now know that i points at where we want to insert
  2525  	es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
  2526  	copy(es[i+1:], es[i:])      // Move shorter entries down
  2527  	es[i] = e
  2528  	return es
  2529  }
  2530  
  2531  // HandleFunc registers the handler function for the given pattern.
  2532  func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
  2533  	if handler == nil {
  2534  		panic("http: nil handler")
  2535  	}
  2536  	mux.Handle(pattern, HandlerFunc(handler))
  2537  }
  2538  
  2539  // Handle registers the handler for the given pattern
  2540  // in the DefaultServeMux.
  2541  // The documentation for ServeMux explains how patterns are matched.
  2542  func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
  2543  
  2544  // HandleFunc registers the handler function for the given pattern
  2545  // in the DefaultServeMux.
  2546  // The documentation for ServeMux explains how patterns are matched.
  2547  func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
  2548  	DefaultServeMux.HandleFunc(pattern, handler)
  2549  }
  2550  
  2551  // Serve accepts incoming HTTP connections on the listener l,
  2552  // creating a new service goroutine for each. The service goroutines
  2553  // read requests and then call handler to reply to them.
  2554  //
  2555  // The handler is typically nil, in which case the DefaultServeMux is used.
  2556  //
  2557  // HTTP/2 support is only enabled if the Listener returns *tls.Conn
  2558  // connections and they were configured with "h2" in the TLS
  2559  // Config.NextProtos.
  2560  //
  2561  // Serve always returns a non-nil error.
  2562  func Serve(l net.Listener, handler Handler) error {
  2563  	srv := &Server{Handler: handler}
  2564  	return srv.Serve(l)
  2565  }
  2566  
  2567  // ServeTLS accepts incoming HTTPS connections on the listener l,
  2568  // creating a new service goroutine for each. The service goroutines
  2569  // read requests and then call handler to reply to them.
  2570  //
  2571  // The handler is typically nil, in which case the DefaultServeMux is used.
  2572  //
  2573  // Additionally, files containing a certificate and matching private key
  2574  // for the server must be provided. If the certificate is signed by a
  2575  // certificate authority, the certFile should be the concatenation
  2576  // of the server's certificate, any intermediates, and the CA's certificate.
  2577  //
  2578  // ServeTLS always returns a non-nil error.
  2579  func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
  2580  	srv := &Server{Handler: handler}
  2581  	return srv.ServeTLS(l, certFile, keyFile)
  2582  }
  2583  
  2584  // A Server defines parameters for running an HTTP server.
  2585  // The zero value for Server is a valid configuration.
  2586  type Server struct {
  2587  	// Addr optionally specifies the TCP address for the server to listen on,
  2588  	// in the form "host:port". If empty, ":http" (port 80) is used.
  2589  	// The service names are defined in RFC 6335 and assigned by IANA.
  2590  	// See net.Dial for details of the address format.
  2591  	Addr string
  2592  
  2593  	Handler Handler // handler to invoke, http.DefaultServeMux if nil
  2594  
  2595  	// DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the Handler,
  2596  	// otherwise responds with 200 OK and Content-Length: 0.
  2597  	DisableGeneralOptionsHandler bool
  2598  
  2599  	// TLSConfig optionally provides a TLS configuration for use
  2600  	// by ServeTLS and ListenAndServeTLS. Note that this value is
  2601  	// cloned by ServeTLS and ListenAndServeTLS, so it's not
  2602  	// possible to modify the configuration with methods like
  2603  	// tls.Config.SetSessionTicketKeys. To use
  2604  	// SetSessionTicketKeys, use Server.Serve with a TLS Listener
  2605  	// instead.
  2606  	TLSConfig *tls.Config
  2607  
  2608  	// ReadTimeout is the maximum duration for reading the entire
  2609  	// request, including the body. A zero or negative value means
  2610  	// there will be no timeout.
  2611  	//
  2612  	// Because ReadTimeout does not let Handlers make per-request
  2613  	// decisions on each request body's acceptable deadline or
  2614  	// upload rate, most users will prefer to use
  2615  	// ReadHeaderTimeout. It is valid to use them both.
  2616  	ReadTimeout time.Duration
  2617  
  2618  	// ReadHeaderTimeout is the amount of time allowed to read
  2619  	// request headers. The connection's read deadline is reset
  2620  	// after reading the headers and the Handler can decide what
  2621  	// is considered too slow for the body. If ReadHeaderTimeout
  2622  	// is zero, the value of ReadTimeout is used. If both are
  2623  	// zero, there is no timeout.
  2624  	ReadHeaderTimeout time.Duration
  2625  
  2626  	// WriteTimeout is the maximum duration before timing out
  2627  	// writes of the response. It is reset whenever a new
  2628  	// request's header is read. Like ReadTimeout, it does not
  2629  	// let Handlers make decisions on a per-request basis.
  2630  	// A zero or negative value means there will be no timeout.
  2631  	WriteTimeout time.Duration
  2632  
  2633  	// IdleTimeout is the maximum amount of time to wait for the
  2634  	// next request when keep-alives are enabled. If IdleTimeout
  2635  	// is zero, the value of ReadTimeout is used. If both are
  2636  	// zero, there is no timeout.
  2637  	IdleTimeout time.Duration
  2638  
  2639  	// MaxHeaderBytes controls the maximum number of bytes the
  2640  	// server will read parsing the request header's keys and
  2641  	// values, including the request line. It does not limit the
  2642  	// size of the request body.
  2643  	// If zero, DefaultMaxHeaderBytes is used.
  2644  	MaxHeaderBytes int
  2645  
  2646  	// TLSNextProto optionally specifies a function to take over
  2647  	// ownership of the provided TLS connection when an ALPN
  2648  	// protocol upgrade has occurred. The map key is the protocol
  2649  	// name negotiated. The Handler argument should be used to
  2650  	// handle HTTP requests and will initialize the Request's TLS
  2651  	// and RemoteAddr if not already set. The connection is
  2652  	// automatically closed when the function returns.
  2653  	// If TLSNextProto is not nil, HTTP/2 support is not enabled
  2654  	// automatically.
  2655  	TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
  2656  
  2657  	// ConnState specifies an optional callback function that is
  2658  	// called when a client connection changes state. See the
  2659  	// ConnState type and associated constants for details.
  2660  	ConnState func(net.Conn, ConnState)
  2661  
  2662  	// ErrorLog specifies an optional logger for errors accepting
  2663  	// connections, unexpected behavior from handlers, and
  2664  	// underlying FileSystem errors.
  2665  	// If nil, logging is done via the log package's standard logger.
  2666  	ErrorLog *log.Logger
  2667  
  2668  	// BaseContext optionally specifies a function that returns
  2669  	// the base context for incoming requests on this server.
  2670  	// The provided Listener is the specific Listener that's
  2671  	// about to start accepting requests.
  2672  	// If BaseContext is nil, the default is context.Background().
  2673  	// If non-nil, it must return a non-nil context.
  2674  	BaseContext func(net.Listener) context.Context
  2675  
  2676  	// ConnContext optionally specifies a function that modifies
  2677  	// the context used for a new connection c. The provided ctx
  2678  	// is derived from the base context and has a ServerContextKey
  2679  	// value.
  2680  	ConnContext func(ctx context.Context, c net.Conn) context.Context
  2681  
  2682  	inShutdown atomic.Bool // true when server is in shutdown
  2683  
  2684  	disableKeepAlives atomic.Bool
  2685  	nextProtoOnce     sync.Once // guards setupHTTP2_* init
  2686  	nextProtoErr      error     // result of http2.ConfigureServer if used
  2687  
  2688  	mu         sync.Mutex
  2689  	listeners  map[*net.Listener]struct{}
  2690  	activeConn map[*conn]struct{}
  2691  	onShutdown []func()
  2692  
  2693  	listenerGroup sync.WaitGroup
  2694  }
  2695  
  2696  // Close immediately closes all active net.Listeners and any
  2697  // connections in state StateNew, StateActive, or StateIdle. For a
  2698  // graceful shutdown, use Shutdown.
  2699  //
  2700  // Close does not attempt to close (and does not even know about)
  2701  // any hijacked connections, such as WebSockets.
  2702  //
  2703  // Close returns any error returned from closing the Server's
  2704  // underlying Listener(s).
  2705  func (srv *Server) Close() error {
  2706  	srv.inShutdown.Store(true)
  2707  	srv.mu.Lock()
  2708  	defer srv.mu.Unlock()
  2709  	err := srv.closeListenersLocked()
  2710  
  2711  	// Unlock srv.mu while waiting for listenerGroup.
  2712  	// The group Add and Done calls are made with srv.mu held,
  2713  	// to avoid adding a new listener in the window between
  2714  	// us setting inShutdown above and waiting here.
  2715  	srv.mu.Unlock()
  2716  	srv.listenerGroup.Wait()
  2717  	srv.mu.Lock()
  2718  
  2719  	for c := range srv.activeConn {
  2720  		c.rwc.Close()
  2721  		delete(srv.activeConn, c)
  2722  	}
  2723  	return err
  2724  }
  2725  
  2726  // shutdownPollIntervalMax is the max polling interval when checking
  2727  // quiescence during Server.Shutdown. Polling starts with a small
  2728  // interval and backs off to the max.
  2729  // Ideally we could find a solution that doesn't involve polling,
  2730  // but which also doesn't have a high runtime cost (and doesn't
  2731  // involve any contentious mutexes), but that is left as an
  2732  // exercise for the reader.
  2733  const shutdownPollIntervalMax = 500 * time.Millisecond
  2734  
  2735  // Shutdown gracefully shuts down the server without interrupting any
  2736  // active connections. Shutdown works by first closing all open
  2737  // listeners, then closing all idle connections, and then waiting
  2738  // indefinitely for connections to return to idle and then shut down.
  2739  // If the provided context expires before the shutdown is complete,
  2740  // Shutdown returns the context's error, otherwise it returns any
  2741  // error returned from closing the Server's underlying Listener(s).
  2742  //
  2743  // When Shutdown is called, Serve, ListenAndServe, and
  2744  // ListenAndServeTLS immediately return ErrServerClosed. Make sure the
  2745  // program doesn't exit and waits instead for Shutdown to return.
  2746  //
  2747  // Shutdown does not attempt to close nor wait for hijacked
  2748  // connections such as WebSockets. The caller of Shutdown should
  2749  // separately notify such long-lived connections of shutdown and wait
  2750  // for them to close, if desired. See RegisterOnShutdown for a way to
  2751  // register shutdown notification functions.
  2752  //
  2753  // Once Shutdown has been called on a server, it may not be reused;
  2754  // future calls to methods such as Serve will return ErrServerClosed.
  2755  func (srv *Server) Shutdown(ctx context.Context) error {
  2756  	srv.inShutdown.Store(true)
  2757  
  2758  	srv.mu.Lock()
  2759  	lnerr := srv.closeListenersLocked()
  2760  	for _, f := range srv.onShutdown {
  2761  		go f()
  2762  	}
  2763  	srv.mu.Unlock()
  2764  	srv.listenerGroup.Wait()
  2765  
  2766  	pollIntervalBase := time.Millisecond
  2767  	nextPollInterval := func() time.Duration {
  2768  		// Add 10% jitter.
  2769  		interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
  2770  		// Double and clamp for next time.
  2771  		pollIntervalBase *= 2
  2772  		if pollIntervalBase > shutdownPollIntervalMax {
  2773  			pollIntervalBase = shutdownPollIntervalMax
  2774  		}
  2775  		return interval
  2776  	}
  2777  
  2778  	timer := time.NewTimer(nextPollInterval())
  2779  	defer timer.Stop()
  2780  	for {
  2781  		if srv.closeIdleConns() {
  2782  			return lnerr
  2783  		}
  2784  		select {
  2785  		case <-ctx.Done():
  2786  			return ctx.Err()
  2787  		case <-timer.C:
  2788  			timer.Reset(nextPollInterval())
  2789  		}
  2790  	}
  2791  }
  2792  
  2793  // RegisterOnShutdown registers a function to call on Shutdown.
  2794  // This can be used to gracefully shutdown connections that have
  2795  // undergone ALPN protocol upgrade or that have been hijacked.
  2796  // This function should start protocol-specific graceful shutdown,
  2797  // but should not wait for shutdown to complete.
  2798  func (srv *Server) RegisterOnShutdown(f func()) {
  2799  	srv.mu.Lock()
  2800  	srv.onShutdown = append(srv.onShutdown, f)
  2801  	srv.mu.Unlock()
  2802  }
  2803  
  2804  // closeIdleConns closes all idle connections and reports whether the
  2805  // server is quiescent.
  2806  func (s *Server) closeIdleConns() bool {
  2807  	s.mu.Lock()
  2808  	defer s.mu.Unlock()
  2809  	quiescent := true
  2810  	for c := range s.activeConn {
  2811  		st, unixSec := c.getState()
  2812  		// Issue 22682: treat StateNew connections as if
  2813  		// they're idle if we haven't read the first request's
  2814  		// header in over 5 seconds.
  2815  		if st == StateNew && unixSec < time.Now().Unix()-5 {
  2816  			st = StateIdle
  2817  		}
  2818  		if st != StateIdle || unixSec == 0 {
  2819  			// Assume unixSec == 0 means it's a very new
  2820  			// connection, without state set yet.
  2821  			quiescent = false
  2822  			continue
  2823  		}
  2824  		c.rwc.Close()
  2825  		delete(s.activeConn, c)
  2826  	}
  2827  	return quiescent
  2828  }
  2829  
  2830  func (s *Server) closeListenersLocked() error {
  2831  	var err error
  2832  	for ln := range s.listeners {
  2833  		if cerr := (*ln).Close(); cerr != nil && err == nil {
  2834  			err = cerr
  2835  		}
  2836  	}
  2837  	return err
  2838  }
  2839  
  2840  // A ConnState represents the state of a client connection to a server.
  2841  // It's used by the optional Server.ConnState hook.
  2842  type ConnState int
  2843  
  2844  const (
  2845  	// StateNew represents a new connection that is expected to
  2846  	// send a request immediately. Connections begin at this
  2847  	// state and then transition to either StateActive or
  2848  	// StateClosed.
  2849  	StateNew ConnState = iota
  2850  
  2851  	// StateActive represents a connection that has read 1 or more
  2852  	// bytes of a request. The Server.ConnState hook for
  2853  	// StateActive fires before the request has entered a handler
  2854  	// and doesn't fire again until the request has been
  2855  	// handled. After the request is handled, the state
  2856  	// transitions to StateClosed, StateHijacked, or StateIdle.
  2857  	// For HTTP/2, StateActive fires on the transition from zero
  2858  	// to one active request, and only transitions away once all
  2859  	// active requests are complete. That means that ConnState
  2860  	// cannot be used to do per-request work; ConnState only notes
  2861  	// the overall state of the connection.
  2862  	StateActive
  2863  
  2864  	// StateIdle represents a connection that has finished
  2865  	// handling a request and is in the keep-alive state, waiting
  2866  	// for a new request. Connections transition from StateIdle
  2867  	// to either StateActive or StateClosed.
  2868  	StateIdle
  2869  
  2870  	// StateHijacked represents a hijacked connection.
  2871  	// This is a terminal state. It does not transition to StateClosed.
  2872  	StateHijacked
  2873  
  2874  	// StateClosed represents a closed connection.
  2875  	// This is a terminal state. Hijacked connections do not
  2876  	// transition to StateClosed.
  2877  	StateClosed
  2878  )
  2879  
  2880  var stateName = map[ConnState]string{
  2881  	StateNew:      "new",
  2882  	StateActive:   "active",
  2883  	StateIdle:     "idle",
  2884  	StateHijacked: "hijacked",
  2885  	StateClosed:   "closed",
  2886  }
  2887  
  2888  func (c ConnState) String() string {
  2889  	return stateName[c]
  2890  }
  2891  
  2892  // serverHandler delegates to either the server's Handler or
  2893  // DefaultServeMux and also handles "OPTIONS *" requests.
  2894  type serverHandler struct {
  2895  	srv *Server
  2896  }
  2897  
  2898  func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
  2899  	handler := sh.srv.Handler
  2900  	if handler == nil {
  2901  		handler = DefaultServeMux
  2902  	}
  2903  	if !sh.srv.DisableGeneralOptionsHandler && req.RequestURI == "*" && req.Method == "OPTIONS" {
  2904  		handler = globalOptionsHandler{}
  2905  	}
  2906  
  2907  	if req.URL != nil && strings.Contains(req.URL.RawQuery, ";") {
  2908  		var allowQuerySemicolonsInUse atomic.Bool
  2909  		req = req.WithContext(context.WithValue(req.Context(), silenceSemWarnContextKey, func() {
  2910  			allowQuerySemicolonsInUse.Store(true)
  2911  		}))
  2912  		defer func() {
  2913  			if !allowQuerySemicolonsInUse.Load() {
  2914  				sh.srv.logf("http: URL query contains semicolon, which is no longer a supported separator; parts of the query may be stripped when parsed; see golang.org/issue/25192")
  2915  			}
  2916  		}()
  2917  	}
  2918  
  2919  	handler.ServeHTTP(rw, req)
  2920  }
  2921  
  2922  var silenceSemWarnContextKey = &contextKey{"silence-semicolons"}
  2923  
  2924  // AllowQuerySemicolons returns a handler that serves requests by converting any
  2925  // unescaped semicolons in the URL query to ampersands, and invoking the handler h.
  2926  //
  2927  // This restores the pre-Go 1.17 behavior of splitting query parameters on both
  2928  // semicolons and ampersands. (See golang.org/issue/25192). Note that this
  2929  // behavior doesn't match that of many proxies, and the mismatch can lead to
  2930  // security issues.
  2931  //
  2932  // AllowQuerySemicolons should be invoked before Request.ParseForm is called.
  2933  func AllowQuerySemicolons(h Handler) Handler {
  2934  	return HandlerFunc(func(w ResponseWriter, r *Request) {
  2935  		if silenceSemicolonsWarning, ok := r.Context().Value(silenceSemWarnContextKey).(func()); ok {
  2936  			silenceSemicolonsWarning()
  2937  		}
  2938  		if strings.Contains(r.URL.RawQuery, ";") {
  2939  			r2 := new(Request)
  2940  			*r2 = *r
  2941  			r2.URL = new(url.URL)
  2942  			*r2.URL = *r.URL
  2943  			r2.URL.RawQuery = strings.ReplaceAll(r.URL.RawQuery, ";", "&")
  2944  			h.ServeHTTP(w, r2)
  2945  		} else {
  2946  			h.ServeHTTP(w, r)
  2947  		}
  2948  	})
  2949  }
  2950  
  2951  // ListenAndServe listens on the TCP network address srv.Addr and then
  2952  // calls Serve to handle requests on incoming connections.
  2953  // Accepted connections are configured to enable TCP keep-alives.
  2954  //
  2955  // If srv.Addr is blank, ":http" is used.
  2956  //
  2957  // ListenAndServe always returns a non-nil error. After Shutdown or Close,
  2958  // the returned error is ErrServerClosed.
  2959  func (srv *Server) ListenAndServe() error {
  2960  	if srv.shuttingDown() {
  2961  		return ErrServerClosed
  2962  	}
  2963  	addr := srv.Addr
  2964  	if addr == "" {
  2965  		addr = ":http"
  2966  	}
  2967  	ln, err := net.Listen("tcp", addr)
  2968  	if err != nil {
  2969  		return err
  2970  	}
  2971  	return srv.Serve(ln)
  2972  }
  2973  
  2974  var testHookServerServe func(*Server, net.Listener) // used if non-nil
  2975  
  2976  // shouldDoServeHTTP2 reports whether Server.Serve should configure
  2977  // automatic HTTP/2. (which sets up the srv.TLSNextProto map)
  2978  func (srv *Server) shouldConfigureHTTP2ForServe() bool {
  2979  	if srv.TLSConfig == nil {
  2980  		// Compatibility with Go 1.6:
  2981  		// If there's no TLSConfig, it's possible that the user just
  2982  		// didn't set it on the http.Server, but did pass it to
  2983  		// tls.NewListener and passed that listener to Serve.
  2984  		// So we should configure HTTP/2 (to set up srv.TLSNextProto)
  2985  		// in case the listener returns an "h2" *tls.Conn.
  2986  		return true
  2987  	}
  2988  	// The user specified a TLSConfig on their http.Server.
  2989  	// In this, case, only configure HTTP/2 if their tls.Config
  2990  	// explicitly mentions "h2". Otherwise http2.ConfigureServer
  2991  	// would modify the tls.Config to add it, but they probably already
  2992  	// passed this tls.Config to tls.NewListener. And if they did,
  2993  	// it's too late anyway to fix it. It would only be potentially racy.
  2994  	// See Issue 15908.
  2995  	return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
  2996  }
  2997  
  2998  // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
  2999  // and ListenAndServeTLS methods after a call to Shutdown or Close.
  3000  var ErrServerClosed = errors.New("http: Server closed")
  3001  
  3002  // Serve accepts incoming connections on the Listener l, creating a
  3003  // new service goroutine for each. The service goroutines read requests and
  3004  // then call srv.Handler to reply to them.
  3005  //
  3006  // HTTP/2 support is only enabled if the Listener returns *tls.Conn
  3007  // connections and they were configured with "h2" in the TLS
  3008  // Config.NextProtos.
  3009  //
  3010  // Serve always returns a non-nil error and closes l.
  3011  // After Shutdown or Close, the returned error is ErrServerClosed.
  3012  func (srv *Server) Serve(l net.Listener) error {
  3013  	if fn := testHookServerServe; fn != nil {
  3014  		fn(srv, l) // call hook with unwrapped listener
  3015  	}
  3016  
  3017  	origListener := l
  3018  	l = &onceCloseListener{Listener: l}
  3019  	defer l.Close()
  3020  
  3021  	if err := srv.setupHTTP2_Serve(); err != nil {
  3022  		return err
  3023  	}
  3024  
  3025  	if !srv.trackListener(&l, true) {
  3026  		return ErrServerClosed
  3027  	}
  3028  	defer srv.trackListener(&l, false)
  3029  
  3030  	baseCtx := context.Background()
  3031  	if srv.BaseContext != nil {
  3032  		baseCtx = srv.BaseContext(origListener)
  3033  		if baseCtx == nil {
  3034  			panic("BaseContext returned a nil context")
  3035  		}
  3036  	}
  3037  
  3038  	var tempDelay time.Duration // how long to sleep on accept failure
  3039  
  3040  	ctx := context.WithValue(baseCtx, ServerContextKey, srv)
  3041  	for {
  3042  		rw, err := l.Accept()
  3043  		if err != nil {
  3044  			if srv.shuttingDown() {
  3045  				return ErrServerClosed
  3046  			}
  3047  			if ne, ok := err.(net.Error); ok && ne.Temporary() {
  3048  				if tempDelay == 0 {
  3049  					tempDelay = 5 * time.Millisecond
  3050  				} else {
  3051  					tempDelay *= 2
  3052  				}
  3053  				if max := 1 * time.Second; tempDelay > max {
  3054  					tempDelay = max
  3055  				}
  3056  				srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
  3057  				time.Sleep(tempDelay)
  3058  				continue
  3059  			}
  3060  			return err
  3061  		}
  3062  		connCtx := ctx
  3063  		if cc := srv.ConnContext; cc != nil {
  3064  			connCtx = cc(connCtx, rw)
  3065  			if connCtx == nil {
  3066  				panic("ConnContext returned nil")
  3067  			}
  3068  		}
  3069  		tempDelay = 0
  3070  		c := srv.newConn(rw)
  3071  		c.setState(c.rwc, StateNew, runHooks) // before Serve can return
  3072  		go c.serve(connCtx)
  3073  	}
  3074  }
  3075  
  3076  // ServeTLS accepts incoming connections on the Listener l, creating a
  3077  // new service goroutine for each. The service goroutines perform TLS
  3078  // setup and then read requests, calling srv.Handler to reply to them.
  3079  //
  3080  // Files containing a certificate and matching private key for the
  3081  // server must be provided if neither the Server's
  3082  // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
  3083  // If the certificate is signed by a certificate authority, the
  3084  // certFile should be the concatenation of the server's certificate,
  3085  // any intermediates, and the CA's certificate.
  3086  //
  3087  // ServeTLS always returns a non-nil error. After Shutdown or Close, the
  3088  // returned error is ErrServerClosed.
  3089  func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
  3090  	// Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
  3091  	// before we clone it and create the TLS Listener.
  3092  	if err := srv.setupHTTP2_ServeTLS(); err != nil {
  3093  		return err
  3094  	}
  3095  
  3096  	config := cloneTLSConfig(srv.TLSConfig)
  3097  	if !strSliceContains(config.NextProtos, "http/1.1") {
  3098  		config.NextProtos = append(config.NextProtos, "http/1.1")
  3099  	}
  3100  
  3101  	configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
  3102  	if !configHasCert || certFile != "" || keyFile != "" {
  3103  		var err error
  3104  		config.Certificates = make([]tls.Certificate, 1)
  3105  		config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
  3106  		if err != nil {
  3107  			return err
  3108  		}
  3109  	}
  3110  
  3111  	tlsListener := tls.NewListener(l, config)
  3112  	return srv.Serve(tlsListener)
  3113  }
  3114  
  3115  // trackListener adds or removes a net.Listener to the set of tracked
  3116  // listeners.
  3117  //
  3118  // We store a pointer to interface in the map set, in case the
  3119  // net.Listener is not comparable. This is safe because we only call
  3120  // trackListener via Serve and can track+defer untrack the same
  3121  // pointer to local variable there. We never need to compare a
  3122  // Listener from another caller.
  3123  //
  3124  // It reports whether the server is still up (not Shutdown or Closed).
  3125  func (s *Server) trackListener(ln *net.Listener, add bool) bool {
  3126  	s.mu.Lock()
  3127  	defer s.mu.Unlock()
  3128  	if s.listeners == nil {
  3129  		s.listeners = make(map[*net.Listener]struct{})
  3130  	}
  3131  	if add {
  3132  		if s.shuttingDown() {
  3133  			return false
  3134  		}
  3135  		s.listeners[ln] = struct{}{}
  3136  		s.listenerGroup.Add(1)
  3137  	} else {
  3138  		delete(s.listeners, ln)
  3139  		s.listenerGroup.Done()
  3140  	}
  3141  	return true
  3142  }
  3143  
  3144  func (s *Server) trackConn(c *conn, add bool) {
  3145  	s.mu.Lock()
  3146  	defer s.mu.Unlock()
  3147  	if s.activeConn == nil {
  3148  		s.activeConn = make(map[*conn]struct{})
  3149  	}
  3150  	if add {
  3151  		s.activeConn[c] = struct{}{}
  3152  	} else {
  3153  		delete(s.activeConn, c)
  3154  	}
  3155  }
  3156  
  3157  func (s *Server) idleTimeout() time.Duration {
  3158  	if s.IdleTimeout != 0 {
  3159  		return s.IdleTimeout
  3160  	}
  3161  	return s.ReadTimeout
  3162  }
  3163  
  3164  func (s *Server) readHeaderTimeout() time.Duration {
  3165  	if s.ReadHeaderTimeout != 0 {
  3166  		return s.ReadHeaderTimeout
  3167  	}
  3168  	return s.ReadTimeout
  3169  }
  3170  
  3171  func (s *Server) doKeepAlives() bool {
  3172  	return !s.disableKeepAlives.Load() && !s.shuttingDown()
  3173  }
  3174  
  3175  func (s *Server) shuttingDown() bool {
  3176  	return s.inShutdown.Load()
  3177  }
  3178  
  3179  // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
  3180  // By default, keep-alives are always enabled. Only very
  3181  // resource-constrained environments or servers in the process of
  3182  // shutting down should disable them.
  3183  func (srv *Server) SetKeepAlivesEnabled(v bool) {
  3184  	if v {
  3185  		srv.disableKeepAlives.Store(false)
  3186  		return
  3187  	}
  3188  	srv.disableKeepAlives.Store(true)
  3189  
  3190  	// Close idle HTTP/1 conns:
  3191  	srv.closeIdleConns()
  3192  
  3193  	// TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
  3194  }
  3195  
  3196  func (s *Server) logf(format string, args ...any) {
  3197  	if s.ErrorLog != nil {
  3198  		s.ErrorLog.Printf(format, args...)
  3199  	} else {
  3200  		log.Printf(format, args...)
  3201  	}
  3202  }
  3203  
  3204  // logf prints to the ErrorLog of the *Server associated with request r
  3205  // via ServerContextKey. If there's no associated server, or if ErrorLog
  3206  // is nil, logging is done via the log package's standard logger.
  3207  func logf(r *Request, format string, args ...any) {
  3208  	s, _ := r.Context().Value(ServerContextKey).(*Server)
  3209  	if s != nil && s.ErrorLog != nil {
  3210  		s.ErrorLog.Printf(format, args...)
  3211  	} else {
  3212  		log.Printf(format, args...)
  3213  	}
  3214  }
  3215  
  3216  // ListenAndServe listens on the TCP network address addr and then calls
  3217  // Serve with handler to handle requests on incoming connections.
  3218  // Accepted connections are configured to enable TCP keep-alives.
  3219  //
  3220  // The handler is typically nil, in which case the DefaultServeMux is used.
  3221  //
  3222  // ListenAndServe always returns a non-nil error.
  3223  func ListenAndServe(addr string, handler Handler) error {
  3224  	server := &Server{Addr: addr, Handler: handler}
  3225  	return server.ListenAndServe()
  3226  }
  3227  
  3228  // ListenAndServeTLS acts identically to ListenAndServe, except that it
  3229  // expects HTTPS connections. Additionally, files containing a certificate and
  3230  // matching private key for the server must be provided. If the certificate
  3231  // is signed by a certificate authority, the certFile should be the concatenation
  3232  // of the server's certificate, any intermediates, and the CA's certificate.
  3233  func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
  3234  	server := &Server{Addr: addr, Handler: handler}
  3235  	return server.ListenAndServeTLS(certFile, keyFile)
  3236  }
  3237  
  3238  // ListenAndServeTLS listens on the TCP network address srv.Addr and
  3239  // then calls ServeTLS to handle requests on incoming TLS connections.
  3240  // Accepted connections are configured to enable TCP keep-alives.
  3241  //
  3242  // Filenames containing a certificate and matching private key for the
  3243  // server must be provided if neither the Server's TLSConfig.Certificates
  3244  // nor TLSConfig.GetCertificate are populated. If the certificate is
  3245  // signed by a certificate authority, the certFile should be the
  3246  // concatenation of the server's certificate, any intermediates, and
  3247  // the CA's certificate.
  3248  //
  3249  // If srv.Addr is blank, ":https" is used.
  3250  //
  3251  // ListenAndServeTLS always returns a non-nil error. After Shutdown or
  3252  // Close, the returned error is ErrServerClosed.
  3253  func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
  3254  	if srv.shuttingDown() {
  3255  		return ErrServerClosed
  3256  	}
  3257  	addr := srv.Addr
  3258  	if addr == "" {
  3259  		addr = ":https"
  3260  	}
  3261  
  3262  	ln, err := net.Listen("tcp", addr)
  3263  	if err != nil {
  3264  		return err
  3265  	}
  3266  
  3267  	defer ln.Close()
  3268  
  3269  	return srv.ServeTLS(ln, certFile, keyFile)
  3270  }
  3271  
  3272  // setupHTTP2_ServeTLS conditionally configures HTTP/2 on
  3273  // srv and reports whether there was an error setting it up. If it is
  3274  // not configured for policy reasons, nil is returned.
  3275  func (srv *Server) setupHTTP2_ServeTLS() error {
  3276  	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
  3277  	return srv.nextProtoErr
  3278  }
  3279  
  3280  // setupHTTP2_Serve is called from (*Server).Serve and conditionally
  3281  // configures HTTP/2 on srv using a more conservative policy than
  3282  // setupHTTP2_ServeTLS because Serve is called after tls.Listen,
  3283  // and may be called concurrently. See shouldConfigureHTTP2ForServe.
  3284  //
  3285  // The tests named TestTransportAutomaticHTTP2* and
  3286  // TestConcurrentServerServe in server_test.go demonstrate some
  3287  // of the supported use cases and motivations.
  3288  func (srv *Server) setupHTTP2_Serve() error {
  3289  	srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
  3290  	return srv.nextProtoErr
  3291  }
  3292  
  3293  func (srv *Server) onceSetNextProtoDefaults_Serve() {
  3294  	if srv.shouldConfigureHTTP2ForServe() {
  3295  		srv.onceSetNextProtoDefaults()
  3296  	}
  3297  }
  3298  
  3299  // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
  3300  // configured otherwise. (by setting srv.TLSNextProto non-nil)
  3301  // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
  3302  func (srv *Server) onceSetNextProtoDefaults() {
  3303  	if omitBundledHTTP2 || godebug.Get("http2server") == "0" {
  3304  		return
  3305  	}
  3306  	// Enable HTTP/2 by default if the user hasn't otherwise
  3307  	// configured their TLSNextProto map.
  3308  	if srv.TLSNextProto == nil {
  3309  		conf := &http2Server{
  3310  			NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
  3311  		}
  3312  		srv.nextProtoErr = http2ConfigureServer(srv, conf)
  3313  	}
  3314  }
  3315  
  3316  // TimeoutHandler returns a Handler that runs h with the given time limit.
  3317  //
  3318  // The new Handler calls h.ServeHTTP to handle each request, but if a
  3319  // call runs for longer than its time limit, the handler responds with
  3320  // a 503 Service Unavailable error and the given message in its body.
  3321  // (If msg is empty, a suitable default message will be sent.)
  3322  // After such a timeout, writes by h to its ResponseWriter will return
  3323  // ErrHandlerTimeout.
  3324  //
  3325  // TimeoutHandler supports the Pusher interface but does not support
  3326  // the Hijacker or Flusher interfaces.
  3327  func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
  3328  	return &timeoutHandler{
  3329  		handler: h,
  3330  		body:    msg,
  3331  		dt:      dt,
  3332  	}
  3333  }
  3334  
  3335  // ErrHandlerTimeout is returned on ResponseWriter Write calls
  3336  // in handlers which have timed out.
  3337  var ErrHandlerTimeout = errors.New("http: Handler timeout")
  3338  
  3339  type timeoutHandler struct {
  3340  	handler Handler
  3341  	body    string
  3342  	dt      time.Duration
  3343  
  3344  	// When set, no context will be created and this context will
  3345  	// be used instead.
  3346  	testContext context.Context
  3347  }
  3348  
  3349  func (h *timeoutHandler) errorBody() string {
  3350  	if h.body != "" {
  3351  		return h.body
  3352  	}
  3353  	return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
  3354  }
  3355  
  3356  func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
  3357  	ctx := h.testContext
  3358  	if ctx == nil {
  3359  		var cancelCtx context.CancelFunc
  3360  		ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
  3361  		defer cancelCtx()
  3362  	}
  3363  	r = r.WithContext(ctx)
  3364  	done := make(chan struct{})
  3365  	tw := &timeoutWriter{
  3366  		w:   w,
  3367  		h:   make(Header),
  3368  		req: r,
  3369  	}
  3370  	panicChan := make(chan any, 1)
  3371  	go func() {
  3372  		defer func() {
  3373  			if p := recover(); p != nil {
  3374  				panicChan <- p
  3375  			}
  3376  		}()
  3377  		h.handler.ServeHTTP(tw, r)
  3378  		close(done)
  3379  	}()
  3380  	select {
  3381  	case p := <-panicChan:
  3382  		panic(p)
  3383  	case <-done:
  3384  		tw.mu.Lock()
  3385  		defer tw.mu.Unlock()
  3386  		dst := w.Header()
  3387  		for k, vv := range tw.h {
  3388  			dst[k] = vv
  3389  		}
  3390  		if !tw.wroteHeader {
  3391  			tw.code = StatusOK
  3392  		}
  3393  		w.WriteHeader(tw.code)
  3394  		w.Write(tw.wbuf.Bytes())
  3395  	case <-ctx.Done():
  3396  		tw.mu.Lock()
  3397  		defer tw.mu.Unlock()
  3398  		switch err := ctx.Err(); err {
  3399  		case context.DeadlineExceeded:
  3400  			w.WriteHeader(StatusServiceUnavailable)
  3401  			io.WriteString(w, h.errorBody())
  3402  			tw.err = ErrHandlerTimeout
  3403  		default:
  3404  			w.WriteHeader(StatusServiceUnavailable)
  3405  			tw.err = err
  3406  		}
  3407  	}
  3408  }
  3409  
  3410  type timeoutWriter struct {
  3411  	w    ResponseWriter
  3412  	h    Header
  3413  	wbuf bytes.Buffer
  3414  	req  *Request
  3415  
  3416  	mu          sync.Mutex
  3417  	err         error
  3418  	wroteHeader bool
  3419  	code        int
  3420  }
  3421  
  3422  var _ Pusher = (*timeoutWriter)(nil)
  3423  
  3424  // Push implements the Pusher interface.
  3425  func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
  3426  	if pusher, ok := tw.w.(Pusher); ok {
  3427  		return pusher.Push(target, opts)
  3428  	}
  3429  	return ErrNotSupported
  3430  }
  3431  
  3432  func (tw *timeoutWriter) Header() Header { return tw.h }
  3433  
  3434  func (tw *timeoutWriter) Write(p []byte) (int, error) {
  3435  	tw.mu.Lock()
  3436  	defer tw.mu.Unlock()
  3437  	if tw.err != nil {
  3438  		return 0, tw.err
  3439  	}
  3440  	if !tw.wroteHeader {
  3441  		tw.writeHeaderLocked(StatusOK)
  3442  	}
  3443  	return tw.wbuf.Write(p)
  3444  }
  3445  
  3446  func (tw *timeoutWriter) writeHeaderLocked(code int) {
  3447  	checkWriteHeaderCode(code)
  3448  
  3449  	switch {
  3450  	case tw.err != nil:
  3451  		return
  3452  	case tw.wroteHeader:
  3453  		if tw.req != nil {
  3454  			caller := relevantCaller()
  3455  			logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
  3456  		}
  3457  	default:
  3458  		tw.wroteHeader = true
  3459  		tw.code = code
  3460  	}
  3461  }
  3462  
  3463  func (tw *timeoutWriter) WriteHeader(code int) {
  3464  	tw.mu.Lock()
  3465  	defer tw.mu.Unlock()
  3466  	tw.writeHeaderLocked(code)
  3467  }
  3468  
  3469  // onceCloseListener wraps a net.Listener, protecting it from
  3470  // multiple Close calls.
  3471  type onceCloseListener struct {
  3472  	net.Listener
  3473  	once     sync.Once
  3474  	closeErr error
  3475  }
  3476  
  3477  func (oc *onceCloseListener) Close() error {
  3478  	oc.once.Do(oc.close)
  3479  	return oc.closeErr
  3480  }
  3481  
  3482  func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
  3483  
  3484  // globalOptionsHandler responds to "OPTIONS *" requests.
  3485  type globalOptionsHandler struct{}
  3486  
  3487  func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
  3488  	w.Header().Set("Content-Length", "0")
  3489  	if r.ContentLength != 0 {
  3490  		// Read up to 4KB of OPTIONS body (as mentioned in the
  3491  		// spec as being reserved for future use), but anything
  3492  		// over that is considered a waste of server resources
  3493  		// (or an attack) and we abort and close the connection,
  3494  		// courtesy of MaxBytesReader's EOF behavior.
  3495  		mb := MaxBytesReader(w, r.Body, 4<<10)
  3496  		io.Copy(io.Discard, mb)
  3497  	}
  3498  }
  3499  
  3500  // initALPNRequest is an HTTP handler that initializes certain
  3501  // uninitialized fields in its *Request. Such partially-initialized
  3502  // Requests come from ALPN protocol handlers.
  3503  type initALPNRequest struct {
  3504  	ctx context.Context
  3505  	c   *tls.Conn
  3506  	h   serverHandler
  3507  }
  3508  
  3509  // BaseContext is an exported but unadvertised http.Handler method
  3510  // recognized by x/net/http2 to pass down a context; the TLSNextProto
  3511  // API predates context support so we shoehorn through the only
  3512  // interface we have available.
  3513  func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
  3514  
  3515  func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
  3516  	if req.TLS == nil {
  3517  		req.TLS = &tls.ConnectionState{}
  3518  		*req.TLS = h.c.ConnectionState()
  3519  	}
  3520  	if req.Body == nil {
  3521  		req.Body = NoBody
  3522  	}
  3523  	if req.RemoteAddr == "" {
  3524  		req.RemoteAddr = h.c.RemoteAddr().String()
  3525  	}
  3526  	h.h.ServeHTTP(rw, req)
  3527  }
  3528  
  3529  // loggingConn is used for debugging.
  3530  type loggingConn struct {
  3531  	name string
  3532  	net.Conn
  3533  }
  3534  
  3535  var (
  3536  	uniqNameMu   sync.Mutex
  3537  	uniqNameNext = make(map[string]int)
  3538  )
  3539  
  3540  func newLoggingConn(baseName string, c net.Conn) net.Conn {
  3541  	uniqNameMu.Lock()
  3542  	defer uniqNameMu.Unlock()
  3543  	uniqNameNext[baseName]++
  3544  	return &loggingConn{
  3545  		name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
  3546  		Conn: c,
  3547  	}
  3548  }
  3549  
  3550  func (c *loggingConn) Write(p []byte) (n int, err error) {
  3551  	log.Printf("%s.Write(%d) = ....", c.name, len(p))
  3552  	n, err = c.Conn.Write(p)
  3553  	log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
  3554  	return
  3555  }
  3556  
  3557  func (c *loggingConn) Read(p []byte) (n int, err error) {
  3558  	log.Printf("%s.Read(%d) = ....", c.name, len(p))
  3559  	n, err = c.Conn.Read(p)
  3560  	log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
  3561  	return
  3562  }
  3563  
  3564  func (c *loggingConn) Close() (err error) {
  3565  	log.Printf("%s.Close() = ...", c.name)
  3566  	err = c.Conn.Close()
  3567  	log.Printf("%s.Close() = %v", c.name, err)
  3568  	return
  3569  }
  3570  
  3571  // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
  3572  // It only contains one field (and a pointer field at that), so it
  3573  // fits in an interface value without an extra allocation.
  3574  type checkConnErrorWriter struct {
  3575  	c *conn
  3576  }
  3577  
  3578  func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
  3579  	n, err = w.c.rwc.Write(p)
  3580  	if err != nil && w.c.werr == nil {
  3581  		w.c.werr = err
  3582  		w.c.cancelCtx()
  3583  	}
  3584  	return
  3585  }
  3586  
  3587  func numLeadingCRorLF(v []byte) (n int) {
  3588  	for _, b := range v {
  3589  		if b == '\r' || b == '\n' {
  3590  			n++
  3591  			continue
  3592  		}
  3593  		break
  3594  	}
  3595  	return
  3596  
  3597  }
  3598  
  3599  func strSliceContains(ss []string, s string) bool {
  3600  	for _, v := range ss {
  3601  		if v == s {
  3602  			return true
  3603  		}
  3604  	}
  3605  	return false
  3606  }
  3607  
  3608  // tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
  3609  // looks like it might've been a misdirected plaintext HTTP request.
  3610  func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
  3611  	switch string(hdr[:]) {
  3612  	case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
  3613  		return true
  3614  	}
  3615  	return false
  3616  }
  3617  
  3618  // MaxBytesHandler returns a Handler that runs h with its ResponseWriter and Request.Body wrapped by a MaxBytesReader.
  3619  func MaxBytesHandler(h Handler, n int64) Handler {
  3620  	return HandlerFunc(func(w ResponseWriter, r *Request) {
  3621  		r2 := *r
  3622  		r2.Body = MaxBytesReader(w, r.Body, n)
  3623  		h.ServeHTTP(w, &r2)
  3624  	})
  3625  }