github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/go/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 7230 through 7235. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 urlpkg "net/url" 23 "os" 24 "path" 25 "runtime" 26 "sort" 27 "strconv" 28 "strings" 29 "sync" 30 "sync/atomic" 31 "time" 32 33 "golang.org/x/net/http/httpguts" 34 ) 35 36 // Errors used by the HTTP server. 37 var ( 38 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 39 // when the HTTP method or response code does not permit a 40 // body. 41 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 42 43 // ErrHijacked is returned by ResponseWriter.Write calls when 44 // the underlying connection has been hijacked using the 45 // Hijacker interface. A zero-byte write on a hijacked 46 // connection will return ErrHijacked without any other side 47 // effects. 48 ErrHijacked = errors.New("http: connection has been hijacked") 49 50 // ErrContentLength is returned by ResponseWriter.Write calls 51 // when a Handler set a Content-Length response header with a 52 // declared size and then attempted to write more bytes than 53 // declared. 54 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 55 56 // Deprecated: ErrWriteAfterFlush is no longer returned by 57 // anything in the net/http package. Callers should not 58 // compare errors against this variable. 59 ErrWriteAfterFlush = errors.New("unused") 60 ) 61 62 // A Handler responds to an HTTP request. 63 // 64 // ServeHTTP should write reply headers and data to the ResponseWriter 65 // and then return. Returning signals that the request is finished; it 66 // is not valid to use the ResponseWriter or read from the 67 // Request.Body after or concurrently with the completion of the 68 // ServeHTTP call. 69 // 70 // Depending on the HTTP client software, HTTP protocol version, and 71 // any intermediaries between the client and the Go server, it may not 72 // be possible to read from the Request.Body after writing to the 73 // ResponseWriter. Cautious handlers should read the Request.Body 74 // first, and then reply. 75 // 76 // Except for reading the body, handlers should not modify the 77 // provided Request. 78 // 79 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 80 // that the effect of the panic was isolated to the active request. 81 // It recovers the panic, logs a stack trace to the server error log, 82 // and either closes the network connection or sends an HTTP/2 83 // RST_STREAM, depending on the HTTP protocol. To abort a handler so 84 // the client sees an interrupted response but the server doesn't log 85 // an error, panic with the value ErrAbortHandler. 86 type Handler interface { 87 ServeHTTP(ResponseWriter, *Request) 88 } 89 90 // A ResponseWriter interface is used by an HTTP handler to 91 // construct an HTTP response. 92 // 93 // A ResponseWriter may not be used after the Handler.ServeHTTP method 94 // has returned. 95 type ResponseWriter interface { 96 // Header returns the header map that will be sent by 97 // WriteHeader. The Header map also is the mechanism with which 98 // Handlers can set HTTP trailers. 99 // 100 // Changing the header map after a call to WriteHeader (or 101 // Write) has no effect unless the modified headers are 102 // trailers. 103 // 104 // There are two ways to set Trailers. The preferred way is to 105 // predeclare in the headers which trailers you will later 106 // send by setting the "Trailer" header to the names of the 107 // trailer keys which will come later. In this case, those 108 // keys of the Header map are treated as if they were 109 // trailers. See the example. The second way, for trailer 110 // keys not known to the Handler until after the first Write, 111 // is to prefix the Header map keys with the TrailerPrefix 112 // constant value. See TrailerPrefix. 113 // 114 // To suppress automatic response headers (such as "Date"), set 115 // their value to nil. 116 Header() Header 117 118 // Write writes the data to the connection as part of an HTTP reply. 119 // 120 // If WriteHeader has not yet been called, Write calls 121 // WriteHeader(http.StatusOK) before writing the data. If the Header 122 // does not contain a Content-Type line, Write adds a Content-Type set 123 // to the result of passing the initial 512 bytes of written data to 124 // DetectContentType. Additionally, if the total size of all written 125 // data is under a few KB and there are no Flush calls, the 126 // Content-Length header is added automatically. 127 // 128 // Depending on the HTTP protocol version and the client, calling 129 // Write or WriteHeader may prevent future reads on the 130 // Request.Body. For HTTP/1.x requests, handlers should read any 131 // needed request body data before writing the response. Once the 132 // headers have been flushed (due to either an explicit Flusher.Flush 133 // call or writing enough data to trigger a flush), the request body 134 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 135 // handlers to continue to read the request body while concurrently 136 // writing the response. However, such behavior may not be supported 137 // by all HTTP/2 clients. Handlers should read before writing if 138 // possible to maximize compatibility. 139 Write([]byte) (int, error) 140 141 // WriteHeader sends an HTTP response header with the provided 142 // status code. 143 // 144 // If WriteHeader is not called explicitly, the first call to Write 145 // will trigger an implicit WriteHeader(http.StatusOK). 146 // Thus explicit calls to WriteHeader are mainly used to 147 // send error codes. 148 // 149 // The provided code must be a valid HTTP 1xx-5xx status code. 150 // Only one header may be written. Go does not currently 151 // support sending user-defined 1xx informational headers, 152 // with the exception of 100-continue response header that the 153 // Server sends automatically when the Request.Body is read. 154 WriteHeader(statusCode int) 155 } 156 157 // The Flusher interface is implemented by ResponseWriters that allow 158 // an HTTP handler to flush buffered data to the client. 159 // 160 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 161 // support Flusher, but ResponseWriter wrappers may not. Handlers 162 // should always test for this ability at runtime. 163 // 164 // Note that even for ResponseWriters that support Flush, 165 // if the client is connected through an HTTP proxy, 166 // the buffered data may not reach the client until the response 167 // completes. 168 type Flusher interface { 169 // Flush sends any buffered data to the client. 170 Flush() 171 } 172 173 // The Hijacker interface is implemented by ResponseWriters that allow 174 // an HTTP handler to take over the connection. 175 // 176 // The default ResponseWriter for HTTP/1.x connections supports 177 // Hijacker, but HTTP/2 connections intentionally do not. 178 // ResponseWriter wrappers may also not support Hijacker. Handlers 179 // should always test for this ability at runtime. 180 type Hijacker interface { 181 // Hijack lets the caller take over the connection. 182 // After a call to Hijack the HTTP server library 183 // will not do anything else with the connection. 184 // 185 // It becomes the caller's responsibility to manage 186 // and close the connection. 187 // 188 // The returned net.Conn may have read or write deadlines 189 // already set, depending on the configuration of the 190 // Server. It is the caller's responsibility to set 191 // or clear those deadlines as needed. 192 // 193 // The returned bufio.Reader may contain unprocessed buffered 194 // data from the client. 195 // 196 // After a call to Hijack, the original Request.Body must not 197 // be used. The original Request's Context remains valid and 198 // is not canceled until the Request's ServeHTTP method 199 // returns. 200 Hijack() (net.Conn, *bufio.ReadWriter, error) 201 } 202 203 // The CloseNotifier interface is implemented by ResponseWriters which 204 // allow detecting when the underlying connection has gone away. 205 // 206 // This mechanism can be used to cancel long operations on the server 207 // if the client has disconnected before the response is ready. 208 // 209 // Deprecated: the CloseNotifier interface predates Go's context package. 210 // New code should use Request.Context instead. 211 type CloseNotifier interface { 212 // CloseNotify returns a channel that receives at most a 213 // single value (true) when the client connection has gone 214 // away. 215 // 216 // CloseNotify may wait to notify until Request.Body has been 217 // fully read. 218 // 219 // After the Handler has returned, there is no guarantee 220 // that the channel receives a value. 221 // 222 // If the protocol is HTTP/1.1 and CloseNotify is called while 223 // processing an idempotent request (such a GET) while 224 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 225 // pipelined request may cause a value to be sent on the 226 // returned channel. In practice HTTP/1.1 pipelining is not 227 // enabled in browsers and not seen often in the wild. If this 228 // is a problem, use HTTP/2 or only use CloseNotify on methods 229 // such as POST. 230 CloseNotify() <-chan bool 231 } 232 233 var ( 234 // ServerContextKey is a context key. It can be used in HTTP 235 // handlers with Context.Value to access the server that 236 // started the handler. The associated value will be of 237 // type *Server. 238 ServerContextKey = &contextKey{"http-server"} 239 240 // LocalAddrContextKey is a context key. It can be used in 241 // HTTP handlers with Context.Value to access the local 242 // address the connection arrived on. 243 // The associated value will be of type net.Addr. 244 LocalAddrContextKey = &contextKey{"local-addr"} 245 ) 246 247 // A conn represents the server side of an HTTP connection. 248 type conn struct { 249 // server is the server on which the connection arrived. 250 // Immutable; never nil. 251 server *Server 252 253 // cancelCtx cancels the connection-level context. 254 cancelCtx context.CancelFunc 255 256 // rwc is the underlying network connection. 257 // This is never wrapped by other types and is the value given out 258 // to CloseNotifier callers. It is usually of type *net.TCPConn or 259 // *tls.Conn. 260 rwc net.Conn 261 262 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 263 // inside the Listener's Accept goroutine, as some implementations block. 264 // It is populated immediately inside the (*conn).serve goroutine. 265 // This is the value of a Handler's (*Request).RemoteAddr. 266 remoteAddr string 267 268 // tlsState is the TLS connection state when using TLS. 269 // nil means not TLS. 270 tlsState *tls.ConnectionState 271 272 // werr is set to the first write error to rwc. 273 // It is set via checkConnErrorWriter{w}, where bufw writes. 274 werr error 275 276 // r is bufr's read source. It's a wrapper around rwc that provides 277 // io.LimitedReader-style limiting (while reading request headers) 278 // and functionality to support CloseNotifier. See *connReader docs. 279 r *connReader 280 281 // bufr reads from r. 282 bufr *bufio.Reader 283 284 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 285 bufw *bufio.Writer 286 287 // lastMethod is the method of the most recent request 288 // on this connection, if any. 289 lastMethod string 290 291 curReq atomic.Value // of *response (which has a Request in it) 292 293 curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState)) 294 295 // mu guards hijackedv 296 mu sync.Mutex 297 298 // hijackedv is whether this connection has been hijacked 299 // by a Handler with the Hijacker interface. 300 // It is guarded by mu. 301 hijackedv bool 302 } 303 304 func (c *conn) hijacked() bool { 305 c.mu.Lock() 306 defer c.mu.Unlock() 307 return c.hijackedv 308 } 309 310 // c.mu must be held. 311 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 312 if c.hijackedv { 313 return nil, nil, ErrHijacked 314 } 315 c.r.abortPendingRead() 316 317 c.hijackedv = true 318 rwc = c.rwc 319 rwc.SetDeadline(time.Time{}) 320 321 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 322 if c.r.hasByte { 323 if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { 324 return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) 325 } 326 } 327 c.setState(rwc, StateHijacked) 328 return 329 } 330 331 // This should be >= 512 bytes for DetectContentType, 332 // but otherwise it's somewhat arbitrary. 333 const bufferBeforeChunkingSize = 2048 334 335 // chunkWriter writes to a response's conn buffer, and is the writer 336 // wrapped by the response.bufw buffered writer. 337 // 338 // chunkWriter also is responsible for finalizing the Header, including 339 // conditionally setting the Content-Type and setting a Content-Length 340 // in cases where the handler's final output is smaller than the buffer 341 // size. It also conditionally adds chunk headers, when in chunking mode. 342 // 343 // See the comment above (*response).Write for the entire write flow. 344 type chunkWriter struct { 345 res *response 346 347 // header is either nil or a deep clone of res.handlerHeader 348 // at the time of res.writeHeader, if res.writeHeader is 349 // called and extra buffering is being done to calculate 350 // Content-Type and/or Content-Length. 351 header Header 352 353 // wroteHeader tells whether the header's been written to "the 354 // wire" (or rather: w.conn.buf). this is unlike 355 // (*response).wroteHeader, which tells only whether it was 356 // logically written. 357 wroteHeader bool 358 359 // set by the writeHeader method: 360 chunking bool // using chunked transfer encoding for reply body 361 } 362 363 var ( 364 crlf = []byte("\r\n") 365 colonSpace = []byte(": ") 366 ) 367 368 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 369 if !cw.wroteHeader { 370 cw.writeHeader(p) 371 } 372 if cw.res.req.Method == "HEAD" { 373 // Eat writes. 374 return len(p), nil 375 } 376 if cw.chunking { 377 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 378 if err != nil { 379 cw.res.conn.rwc.Close() 380 return 381 } 382 } 383 n, err = cw.res.conn.bufw.Write(p) 384 if cw.chunking && err == nil { 385 _, err = cw.res.conn.bufw.Write(crlf) 386 } 387 if err != nil { 388 cw.res.conn.rwc.Close() 389 } 390 return 391 } 392 393 func (cw *chunkWriter) flush() { 394 if !cw.wroteHeader { 395 cw.writeHeader(nil) 396 } 397 cw.res.conn.bufw.Flush() 398 } 399 400 func (cw *chunkWriter) close() { 401 if !cw.wroteHeader { 402 cw.writeHeader(nil) 403 } 404 if cw.chunking { 405 bw := cw.res.conn.bufw // conn's bufio writer 406 // zero chunk to mark EOF 407 bw.WriteString("0\r\n") 408 if trailers := cw.res.finalTrailers(); trailers != nil { 409 trailers.Write(bw) // the writer handles noting errors 410 } 411 // final blank line after the trailers (whether 412 // present or not) 413 bw.WriteString("\r\n") 414 } 415 } 416 417 // A response represents the server side of an HTTP response. 418 type response struct { 419 conn *conn 420 req *Request // request for this response 421 reqBody io.ReadCloser 422 cancelCtx context.CancelFunc // when ServeHTTP exits 423 wroteHeader bool // reply header has been (logically) written 424 wroteContinue bool // 100 Continue response was written 425 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 426 wantsClose bool // HTTP request has Connection "close" 427 428 w *bufio.Writer // buffers output in chunks to chunkWriter 429 cw chunkWriter 430 431 // handlerHeader is the Header that Handlers get access to, 432 // which may be retained and mutated even after WriteHeader. 433 // handlerHeader is copied into cw.header at WriteHeader 434 // time, and privately mutated thereafter. 435 handlerHeader Header 436 calledHeader bool // handler accessed handlerHeader via Header 437 438 written int64 // number of bytes written in body 439 contentLength int64 // explicitly-declared Content-Length; or -1 440 status int // status code passed to WriteHeader 441 442 // close connection after this reply. set on request and 443 // updated after response from handler if there's a 444 // "Connection: keep-alive" response header and a 445 // Content-Length. 446 closeAfterReply bool 447 448 // requestBodyLimitHit is set by requestTooLarge when 449 // maxBytesReader hits its max size. It is checked in 450 // WriteHeader, to make sure we don't consume the 451 // remaining request body to try to advance to the next HTTP 452 // request. Instead, when this is set, we stop reading 453 // subsequent requests on this connection and stop reading 454 // input from it. 455 requestBodyLimitHit bool 456 457 // trailers are the headers to be sent after the handler 458 // finishes writing the body. This field is initialized from 459 // the Trailer response header when the response header is 460 // written. 461 trailers []string 462 463 handlerDone atomicBool // set true when the handler exits 464 465 // Buffers for Date, Content-Length, and status code 466 dateBuf [len(TimeFormat)]byte 467 clenBuf [10]byte 468 statusBuf [3]byte 469 470 // closeNotifyCh is the channel returned by CloseNotify. 471 // TODO(bradfitz): this is currently (for Go 1.8) always 472 // non-nil. Make this lazily-created again as it used to be? 473 closeNotifyCh chan bool 474 didCloseNotify int32 // atomic (only 0->1 winner should send) 475 } 476 477 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys 478 // that, if present, signals that the map entry is actually for 479 // the response trailers, and not the response headers. The prefix 480 // is stripped after the ServeHTTP call finishes and the values are 481 // sent in the trailers. 482 // 483 // This mechanism is intended only for trailers that are not known 484 // prior to the headers being written. If the set of trailers is fixed 485 // or known before the header is written, the normal Go trailers mechanism 486 // is preferred: 487 // https://golang.org/pkg/net/http/#ResponseWriter 488 // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 489 const TrailerPrefix = "Trailer:" 490 491 // finalTrailers is called after the Handler exits and returns a non-nil 492 // value if the Handler set any trailers. 493 func (w *response) finalTrailers() Header { 494 var t Header 495 for k, vv := range w.handlerHeader { 496 if strings.HasPrefix(k, TrailerPrefix) { 497 if t == nil { 498 t = make(Header) 499 } 500 t[strings.TrimPrefix(k, TrailerPrefix)] = vv 501 } 502 } 503 for _, k := range w.trailers { 504 if t == nil { 505 t = make(Header) 506 } 507 for _, v := range w.handlerHeader[k] { 508 t.Add(k, v) 509 } 510 } 511 return t 512 } 513 514 type atomicBool int32 515 516 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 517 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 518 519 // declareTrailer is called for each Trailer header when the 520 // response header is written. It notes that a header will need to be 521 // written in the trailers at the end of the response. 522 func (w *response) declareTrailer(k string) { 523 k = CanonicalHeaderKey(k) 524 if !httpguts.ValidTrailerHeader(k) { 525 // Forbidden by RFC 7230, section 4.1.2 526 return 527 } 528 w.trailers = append(w.trailers, k) 529 } 530 531 // requestTooLarge is called by maxBytesReader when too much input has 532 // been read from the client. 533 func (w *response) requestTooLarge() { 534 w.closeAfterReply = true 535 w.requestBodyLimitHit = true 536 if !w.wroteHeader { 537 w.Header().Set("Connection", "close") 538 } 539 } 540 541 // needsSniff reports whether a Content-Type still needs to be sniffed. 542 func (w *response) needsSniff() bool { 543 _, haveType := w.handlerHeader["Content-Type"] 544 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 545 } 546 547 // writerOnly hides an io.Writer value's optional ReadFrom method 548 // from io.Copy. 549 type writerOnly struct { 550 io.Writer 551 } 552 553 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 554 switch v := src.(type) { 555 case *os.File: 556 fi, err := v.Stat() 557 if err != nil { 558 return false, err 559 } 560 return fi.Mode().IsRegular(), nil 561 case *io.LimitedReader: 562 return srcIsRegularFile(v.R) 563 default: 564 return 565 } 566 } 567 568 // ReadFrom is here to optimize copying from an *os.File regular file 569 // to a *net.TCPConn with sendfile. 570 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 571 // Our underlying w.conn.rwc is usually a *TCPConn (with its 572 // own ReadFrom method). If not, or if our src isn't a regular 573 // file, just fall back to the normal copy method. 574 rf, ok := w.conn.rwc.(io.ReaderFrom) 575 regFile, err := srcIsRegularFile(src) 576 if err != nil { 577 return 0, err 578 } 579 if !ok || !regFile { 580 bufp := copyBufPool.Get().(*[]byte) 581 defer copyBufPool.Put(bufp) 582 return io.CopyBuffer(writerOnly{w}, src, *bufp) 583 } 584 585 // sendfile path: 586 587 if !w.wroteHeader { 588 w.WriteHeader(StatusOK) 589 } 590 591 if w.needsSniff() { 592 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 593 n += n0 594 if err != nil { 595 return n, err 596 } 597 } 598 599 w.w.Flush() // get rid of any previous writes 600 w.cw.flush() // make sure Header is written; flush data to rwc 601 602 // Now that cw has been flushed, its chunking field is guaranteed initialized. 603 if !w.cw.chunking && w.bodyAllowed() { 604 n0, err := rf.ReadFrom(src) 605 n += n0 606 w.written += n0 607 return n, err 608 } 609 610 n0, err := io.Copy(writerOnly{w}, src) 611 n += n0 612 return n, err 613 } 614 615 // debugServerConnections controls whether all server connections are wrapped 616 // with a verbose logging wrapper. 617 const debugServerConnections = false 618 619 // Create new connection from rwc. 620 func (srv *Server) newConn(rwc net.Conn) *conn { 621 c := &conn{ 622 server: srv, 623 rwc: rwc, 624 } 625 if debugServerConnections { 626 c.rwc = newLoggingConn("server", c.rwc) 627 } 628 return c 629 } 630 631 type readResult struct { 632 n int 633 err error 634 b byte // byte read, if n == 1 635 } 636 637 // connReader is the io.Reader wrapper used by *conn. It combines a 638 // selectively-activated io.LimitedReader (to bound request header 639 // read sizes) with support for selectively keeping an io.Reader.Read 640 // call blocked in a background goroutine to wait for activity and 641 // trigger a CloseNotifier channel. 642 type connReader struct { 643 conn *conn 644 645 mu sync.Mutex // guards following 646 hasByte bool 647 byteBuf [1]byte 648 cond *sync.Cond 649 inRead bool 650 aborted bool // set true before conn.rwc deadline is set to past 651 remain int64 // bytes remaining 652 } 653 654 func (cr *connReader) lock() { 655 cr.mu.Lock() 656 if cr.cond == nil { 657 cr.cond = sync.NewCond(&cr.mu) 658 } 659 } 660 661 func (cr *connReader) unlock() { cr.mu.Unlock() } 662 663 func (cr *connReader) startBackgroundRead() { 664 cr.lock() 665 defer cr.unlock() 666 if cr.inRead { 667 panic("invalid concurrent Body.Read call") 668 } 669 if cr.hasByte { 670 return 671 } 672 cr.inRead = true 673 cr.conn.rwc.SetReadDeadline(time.Time{}) 674 go cr.backgroundRead() 675 } 676 677 func (cr *connReader) backgroundRead() { 678 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 679 cr.lock() 680 if n == 1 { 681 cr.hasByte = true 682 // We were past the end of the previous request's body already 683 // (since we wouldn't be in a background read otherwise), so 684 // this is a pipelined HTTP request. Prior to Go 1.11 we used to 685 // send on the CloseNotify channel and cancel the context here, 686 // but the behavior was documented as only "may", and we only 687 // did that because that's how CloseNotify accidentally behaved 688 // in very early Go releases prior to context support. Once we 689 // added context support, people used a Handler's 690 // Request.Context() and passed it along. Having that context 691 // cancel on pipelined HTTP requests caused problems. 692 // Fortunately, almost nothing uses HTTP/1.x pipelining. 693 // Unfortunately, apt-get does, or sometimes does. 694 // New Go 1.11 behavior: don't fire CloseNotify or cancel 695 // contexts on pipelined requests. Shouldn't affect people, but 696 // fixes cases like Issue 23921. This does mean that a client 697 // closing their TCP connection after sending a pipelined 698 // request won't cancel the context, but we'll catch that on any 699 // write failure (in checkConnErrorWriter.Write). 700 // If the server never writes, yes, there are still contrived 701 // server & client behaviors where this fails to ever cancel the 702 // context, but that's kinda why HTTP/1.x pipelining died 703 // anyway. 704 } 705 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 706 // Ignore this error. It's the expected error from 707 // another goroutine calling abortPendingRead. 708 } else if err != nil { 709 cr.handleReadError(err) 710 } 711 cr.aborted = false 712 cr.inRead = false 713 cr.unlock() 714 cr.cond.Broadcast() 715 } 716 717 func (cr *connReader) abortPendingRead() { 718 cr.lock() 719 defer cr.unlock() 720 if !cr.inRead { 721 return 722 } 723 cr.aborted = true 724 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 725 for cr.inRead { 726 cr.cond.Wait() 727 } 728 cr.conn.rwc.SetReadDeadline(time.Time{}) 729 } 730 731 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 732 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 733 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 734 735 // handleReadError is called whenever a Read from the client returns a 736 // non-nil error. 737 // 738 // The provided non-nil err is almost always io.EOF or a "use of 739 // closed network connection". In any case, the error is not 740 // particularly interesting, except perhaps for debugging during 741 // development. Any error means the connection is dead and we should 742 // down its context. 743 // 744 // It may be called from multiple goroutines. 745 func (cr *connReader) handleReadError(_ error) { 746 cr.conn.cancelCtx() 747 cr.closeNotify() 748 } 749 750 // may be called from multiple goroutines. 751 func (cr *connReader) closeNotify() { 752 res, _ := cr.conn.curReq.Load().(*response) 753 if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 754 res.closeNotifyCh <- true 755 } 756 } 757 758 func (cr *connReader) Read(p []byte) (n int, err error) { 759 cr.lock() 760 if cr.inRead { 761 cr.unlock() 762 if cr.conn.hijacked() { 763 panic("invalid Body.Read call. After hijacked, the original Request must not be used") 764 } 765 panic("invalid concurrent Body.Read call") 766 } 767 if cr.hitReadLimit() { 768 cr.unlock() 769 return 0, io.EOF 770 } 771 if len(p) == 0 { 772 cr.unlock() 773 return 0, nil 774 } 775 if int64(len(p)) > cr.remain { 776 p = p[:cr.remain] 777 } 778 if cr.hasByte { 779 p[0] = cr.byteBuf[0] 780 cr.hasByte = false 781 cr.unlock() 782 return 1, nil 783 } 784 cr.inRead = true 785 cr.unlock() 786 n, err = cr.conn.rwc.Read(p) 787 788 cr.lock() 789 cr.inRead = false 790 if err != nil { 791 cr.handleReadError(err) 792 } 793 cr.remain -= int64(n) 794 cr.unlock() 795 796 cr.cond.Broadcast() 797 return n, err 798 } 799 800 var ( 801 bufioReaderPool sync.Pool 802 bufioWriter2kPool sync.Pool 803 bufioWriter4kPool sync.Pool 804 ) 805 806 var copyBufPool = sync.Pool{ 807 New: func() interface{} { 808 b := make([]byte, 32*1024) 809 return &b 810 }, 811 } 812 813 func bufioWriterPool(size int) *sync.Pool { 814 switch size { 815 case 2 << 10: 816 return &bufioWriter2kPool 817 case 4 << 10: 818 return &bufioWriter4kPool 819 } 820 return nil 821 } 822 823 func newBufioReader(r io.Reader) *bufio.Reader { 824 if v := bufioReaderPool.Get(); v != nil { 825 br := v.(*bufio.Reader) 826 br.Reset(r) 827 return br 828 } 829 // Note: if this reader size is ever changed, update 830 // TestHandlerBodyClose's assumptions. 831 return bufio.NewReader(r) 832 } 833 834 func putBufioReader(br *bufio.Reader) { 835 br.Reset(nil) 836 bufioReaderPool.Put(br) 837 } 838 839 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 840 pool := bufioWriterPool(size) 841 if pool != nil { 842 if v := pool.Get(); v != nil { 843 bw := v.(*bufio.Writer) 844 bw.Reset(w) 845 return bw 846 } 847 } 848 return bufio.NewWriterSize(w, size) 849 } 850 851 func putBufioWriter(bw *bufio.Writer) { 852 bw.Reset(nil) 853 if pool := bufioWriterPool(bw.Available()); pool != nil { 854 pool.Put(bw) 855 } 856 } 857 858 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 859 // in an HTTP request. 860 // This can be overridden by setting Server.MaxHeaderBytes. 861 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 862 863 func (srv *Server) maxHeaderBytes() int { 864 if srv.MaxHeaderBytes > 0 { 865 return srv.MaxHeaderBytes 866 } 867 return DefaultMaxHeaderBytes 868 } 869 870 func (srv *Server) initialReadLimitSize() int64 { 871 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 872 } 873 874 // wrapper around io.ReadCloser which on first read, sends an 875 // HTTP/1.1 100 Continue header 876 type expectContinueReader struct { 877 resp *response 878 readCloser io.ReadCloser 879 closed bool 880 sawEOF bool 881 } 882 883 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 884 if ecr.closed { 885 return 0, ErrBodyReadAfterClose 886 } 887 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 888 ecr.resp.wroteContinue = true 889 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 890 ecr.resp.conn.bufw.Flush() 891 } 892 n, err = ecr.readCloser.Read(p) 893 if err == io.EOF { 894 ecr.sawEOF = true 895 } 896 return 897 } 898 899 func (ecr *expectContinueReader) Close() error { 900 ecr.closed = true 901 return ecr.readCloser.Close() 902 } 903 904 // TimeFormat is the time format to use when generating times in HTTP 905 // headers. It is like time.RFC1123 but hard-codes GMT as the time 906 // zone. The time being formatted must be in UTC for Format to 907 // generate the correct format. 908 // 909 // For parsing this time format, see ParseTime. 910 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 911 912 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 913 func appendTime(b []byte, t time.Time) []byte { 914 const days = "SunMonTueWedThuFriSat" 915 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 916 917 t = t.UTC() 918 yy, mm, dd := t.Date() 919 hh, mn, ss := t.Clock() 920 day := days[3*t.Weekday():] 921 mon := months[3*(mm-1):] 922 923 return append(b, 924 day[0], day[1], day[2], ',', ' ', 925 byte('0'+dd/10), byte('0'+dd%10), ' ', 926 mon[0], mon[1], mon[2], ' ', 927 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 928 byte('0'+hh/10), byte('0'+hh%10), ':', 929 byte('0'+mn/10), byte('0'+mn%10), ':', 930 byte('0'+ss/10), byte('0'+ss%10), ' ', 931 'G', 'M', 'T') 932 } 933 934 var errTooLarge = errors.New("http: request too large") 935 936 // Read next request from connection. 937 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 938 if c.hijacked() { 939 return nil, ErrHijacked 940 } 941 942 var ( 943 wholeReqDeadline time.Time // or zero if none 944 hdrDeadline time.Time // or zero if none 945 ) 946 t0 := time.Now() 947 if d := c.server.readHeaderTimeout(); d != 0 { 948 hdrDeadline = t0.Add(d) 949 } 950 if d := c.server.ReadTimeout; d != 0 { 951 wholeReqDeadline = t0.Add(d) 952 } 953 c.rwc.SetReadDeadline(hdrDeadline) 954 if d := c.server.WriteTimeout; d != 0 { 955 defer func() { 956 c.rwc.SetWriteDeadline(time.Now().Add(d)) 957 }() 958 } 959 960 c.r.setReadLimit(c.server.initialReadLimitSize()) 961 if c.lastMethod == "POST" { 962 // RFC 7230 section 3 tolerance for old buggy clients. 963 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 964 c.bufr.Discard(numLeadingCRorLF(peek)) 965 } 966 req, err := readRequest(c.bufr, keepHostHeader) 967 if err != nil { 968 if c.r.hitReadLimit() { 969 return nil, errTooLarge 970 } 971 return nil, err 972 } 973 974 if !http1ServerSupportsRequest(req) { 975 return nil, badRequestError("unsupported protocol version") 976 } 977 978 c.lastMethod = req.Method 979 c.r.setInfiniteReadLimit() 980 981 hosts, haveHost := req.Header["Host"] 982 isH2Upgrade := req.isH2Upgrade() 983 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { 984 return nil, badRequestError("missing required Host header") 985 } 986 if len(hosts) > 1 { 987 return nil, badRequestError("too many Host headers") 988 } 989 if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) { 990 return nil, badRequestError("malformed Host header") 991 } 992 for k, vv := range req.Header { 993 if !httpguts.ValidHeaderFieldName(k) { 994 return nil, badRequestError("invalid header name") 995 } 996 for _, v := range vv { 997 if !httpguts.ValidHeaderFieldValue(v) { 998 return nil, badRequestError("invalid header value") 999 } 1000 } 1001 } 1002 delete(req.Header, "Host") 1003 1004 ctx, cancelCtx := context.WithCancel(ctx) 1005 req.ctx = ctx 1006 req.RemoteAddr = c.remoteAddr 1007 req.TLS = c.tlsState 1008 if body, ok := req.Body.(*body); ok { 1009 body.doEarlyClose = true 1010 } 1011 1012 // Adjust the read deadline if necessary. 1013 if !hdrDeadline.Equal(wholeReqDeadline) { 1014 c.rwc.SetReadDeadline(wholeReqDeadline) 1015 } 1016 1017 w = &response{ 1018 conn: c, 1019 cancelCtx: cancelCtx, 1020 req: req, 1021 reqBody: req.Body, 1022 handlerHeader: make(Header), 1023 contentLength: -1, 1024 closeNotifyCh: make(chan bool, 1), 1025 1026 // We populate these ahead of time so we're not 1027 // reading from req.Header after their Handler starts 1028 // and maybe mutates it (Issue 14940) 1029 wants10KeepAlive: req.wantsHttp10KeepAlive(), 1030 wantsClose: req.wantsClose(), 1031 } 1032 if isH2Upgrade { 1033 w.closeAfterReply = true 1034 } 1035 w.cw.res = w 1036 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 1037 return w, nil 1038 } 1039 1040 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 1041 // supports the given request. 1042 func http1ServerSupportsRequest(req *Request) bool { 1043 if req.ProtoMajor == 1 { 1044 return true 1045 } 1046 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 1047 // wire up their own HTTP/2 upgrades. 1048 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 1049 req.Method == "PRI" && req.RequestURI == "*" { 1050 return true 1051 } 1052 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 1053 // aren't encoded in ASCII anyway). 1054 return false 1055 } 1056 1057 func (w *response) Header() Header { 1058 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 1059 // Accessing the header between logically writing it 1060 // and physically writing it means we need to allocate 1061 // a clone to snapshot the logically written state. 1062 w.cw.header = w.handlerHeader.Clone() 1063 } 1064 w.calledHeader = true 1065 return w.handlerHeader 1066 } 1067 1068 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 1069 // consumed by a handler that the server will read from the client 1070 // in order to keep a connection alive. If there are more bytes than 1071 // this then the server to be paranoid instead sends a "Connection: 1072 // close" response. 1073 // 1074 // This number is approximately what a typical machine's TCP buffer 1075 // size is anyway. (if we have the bytes on the machine, we might as 1076 // well read them) 1077 const maxPostHandlerReadBytes = 256 << 10 1078 1079 func checkWriteHeaderCode(code int) { 1080 // Issue 22880: require valid WriteHeader status codes. 1081 // For now we only enforce that it's three digits. 1082 // In the future we might block things over 599 (600 and above aren't defined 1083 // at https://httpwg.org/specs/rfc7231.html#status.codes) 1084 // and we might block under 200 (once we have more mature 1xx support). 1085 // But for now any three digits. 1086 // 1087 // We used to send "HTTP/1.1 000 0" on the wire in responses but there's 1088 // no equivalent bogus thing we can realistically send in HTTP/2, 1089 // so we'll consistently panic instead and help people find their bugs 1090 // early. (We can't return an error from WriteHeader even if we wanted to.) 1091 if code < 100 || code > 999 { 1092 panic(fmt.Sprintf("invalid WriteHeader code %v", code)) 1093 } 1094 } 1095 1096 // relevantCaller searches the call stack for the first function outside of net/http. 1097 // The purpose of this function is to provide more helpful error messages. 1098 func relevantCaller() runtime.Frame { 1099 pc := make([]uintptr, 16) 1100 n := runtime.Callers(1, pc) 1101 frames := runtime.CallersFrames(pc[:n]) 1102 prefix1 := "net/http." 1103 prefix2 := "net/http." 1104 if runtime.Compiler == "gccgo" { 1105 prefix2 = "http." 1106 } 1107 var frame runtime.Frame 1108 for { 1109 frame, more := frames.Next() 1110 if !strings.HasPrefix(frame.Function, prefix1) && !strings.HasPrefix(frame.Function, prefix2) { 1111 return frame 1112 } 1113 if !more { 1114 break 1115 } 1116 } 1117 return frame 1118 } 1119 1120 func (w *response) WriteHeader(code int) { 1121 if w.conn.hijacked() { 1122 caller := relevantCaller() 1123 w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) 1124 return 1125 } 1126 if w.wroteHeader { 1127 caller := relevantCaller() 1128 w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) 1129 return 1130 } 1131 checkWriteHeaderCode(code) 1132 w.wroteHeader = true 1133 w.status = code 1134 1135 if w.calledHeader && w.cw.header == nil { 1136 w.cw.header = w.handlerHeader.Clone() 1137 } 1138 1139 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1140 v, err := strconv.ParseInt(cl, 10, 64) 1141 if err == nil && v >= 0 { 1142 w.contentLength = v 1143 } else { 1144 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1145 w.handlerHeader.Del("Content-Length") 1146 } 1147 } 1148 } 1149 1150 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1151 // This type is used to avoid extra allocations from cloning and/or populating 1152 // the response Header map and all its 1-element slices. 1153 type extraHeader struct { 1154 contentType string 1155 connection string 1156 transferEncoding string 1157 date []byte // written if not nil 1158 contentLength []byte // written if not nil 1159 } 1160 1161 // Sorted the same as extraHeader.Write's loop. 1162 var extraHeaderKeys = [][]byte{ 1163 []byte("Content-Type"), 1164 []byte("Connection"), 1165 []byte("Transfer-Encoding"), 1166 } 1167 1168 var ( 1169 headerContentLength = []byte("Content-Length: ") 1170 headerDate = []byte("Date: ") 1171 ) 1172 1173 // Write writes the headers described in h to w. 1174 // 1175 // This method has a value receiver, despite the somewhat large size 1176 // of h, because it prevents an allocation. The escape analysis isn't 1177 // smart enough to realize this function doesn't mutate h. 1178 func (h extraHeader) Write(w *bufio.Writer) { 1179 if h.date != nil { 1180 w.Write(headerDate) 1181 w.Write(h.date) 1182 w.Write(crlf) 1183 } 1184 if h.contentLength != nil { 1185 w.Write(headerContentLength) 1186 w.Write(h.contentLength) 1187 w.Write(crlf) 1188 } 1189 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1190 if v != "" { 1191 w.Write(extraHeaderKeys[i]) 1192 w.Write(colonSpace) 1193 w.WriteString(v) 1194 w.Write(crlf) 1195 } 1196 } 1197 } 1198 1199 // writeHeader finalizes the header sent to the client and writes it 1200 // to cw.res.conn.bufw. 1201 // 1202 // p is not written by writeHeader, but is the first chunk of the body 1203 // that will be written. It is sniffed for a Content-Type if none is 1204 // set explicitly. It's also used to set the Content-Length, if the 1205 // total body size was small and the handler has already finished 1206 // running. 1207 func (cw *chunkWriter) writeHeader(p []byte) { 1208 if cw.wroteHeader { 1209 return 1210 } 1211 cw.wroteHeader = true 1212 1213 w := cw.res 1214 keepAlivesEnabled := w.conn.server.doKeepAlives() 1215 isHEAD := w.req.Method == "HEAD" 1216 1217 // header is written out to w.conn.buf below. Depending on the 1218 // state of the handler, we either own the map or not. If we 1219 // don't own it, the exclude map is created lazily for 1220 // WriteSubset to remove headers. The setHeader struct holds 1221 // headers we need to add. 1222 header := cw.header 1223 owned := header != nil 1224 if !owned { 1225 header = w.handlerHeader 1226 } 1227 var excludeHeader map[string]bool 1228 delHeader := func(key string) { 1229 if owned { 1230 header.Del(key) 1231 return 1232 } 1233 if _, ok := header[key]; !ok { 1234 return 1235 } 1236 if excludeHeader == nil { 1237 excludeHeader = make(map[string]bool) 1238 } 1239 excludeHeader[key] = true 1240 } 1241 var setHeader extraHeader 1242 1243 // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. 1244 trailers := false 1245 for k := range cw.header { 1246 if strings.HasPrefix(k, TrailerPrefix) { 1247 if excludeHeader == nil { 1248 excludeHeader = make(map[string]bool) 1249 } 1250 excludeHeader[k] = true 1251 trailers = true 1252 } 1253 } 1254 for _, v := range cw.header["Trailer"] { 1255 trailers = true 1256 foreachHeaderElement(v, cw.res.declareTrailer) 1257 } 1258 1259 te := header.get("Transfer-Encoding") 1260 hasTE := te != "" 1261 1262 // If the handler is done but never sent a Content-Length 1263 // response header and this is our first (and last) write, set 1264 // it, even to zero. This helps HTTP/1.0 clients keep their 1265 // "keep-alive" connections alive. 1266 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1267 // it was a HEAD request, we don't know the difference between 1268 // 0 actual bytes and 0 bytes because the handler noticed it 1269 // was a HEAD request and chose not to write anything. So for 1270 // HEAD, the handler should either write the Content-Length or 1271 // write non-zero bytes. If it's actually 0 bytes and the 1272 // handler never looked at the Request.Method, we just don't 1273 // send a Content-Length header. 1274 // Further, we don't send an automatic Content-Length if they 1275 // set a Transfer-Encoding, because they're generally incompatible. 1276 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1277 w.contentLength = int64(len(p)) 1278 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1279 } 1280 1281 // If this was an HTTP/1.0 request with keep-alive and we sent a 1282 // Content-Length back, we can make this a keep-alive response ... 1283 if w.wants10KeepAlive && keepAlivesEnabled { 1284 sentLength := header.get("Content-Length") != "" 1285 if sentLength && header.get("Connection") == "keep-alive" { 1286 w.closeAfterReply = false 1287 } 1288 } 1289 1290 // Check for an explicit (and valid) Content-Length header. 1291 hasCL := w.contentLength != -1 1292 1293 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1294 _, connectionHeaderSet := header["Connection"] 1295 if !connectionHeaderSet { 1296 setHeader.connection = "keep-alive" 1297 } 1298 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1299 w.closeAfterReply = true 1300 } 1301 1302 if header.get("Connection") == "close" || !keepAlivesEnabled { 1303 w.closeAfterReply = true 1304 } 1305 1306 // If the client wanted a 100-continue but we never sent it to 1307 // them (or, more strictly: we never finished reading their 1308 // request body), don't reuse this connection because it's now 1309 // in an unknown state: we might be sending this response at 1310 // the same time the client is now sending its request body 1311 // after a timeout. (Some HTTP clients send Expect: 1312 // 100-continue but knowing that some servers don't support 1313 // it, the clients set a timer and send the body later anyway) 1314 // If we haven't seen EOF, we can't skip over the unread body 1315 // because we don't know if the next bytes on the wire will be 1316 // the body-following-the-timer or the subsequent request. 1317 // See Issue 11549. 1318 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1319 w.closeAfterReply = true 1320 } 1321 1322 // Per RFC 2616, we should consume the request body before 1323 // replying, if the handler hasn't already done so. But we 1324 // don't want to do an unbounded amount of reading here for 1325 // DoS reasons, so we only try up to a threshold. 1326 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1327 // about HTTP/1.x Handlers concurrently reading and writing, like 1328 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1329 if w.req.ContentLength != 0 && !w.closeAfterReply { 1330 var discard, tooBig bool 1331 1332 switch bdy := w.req.Body.(type) { 1333 case *expectContinueReader: 1334 if bdy.resp.wroteContinue { 1335 discard = true 1336 } 1337 case *body: 1338 bdy.mu.Lock() 1339 switch { 1340 case bdy.closed: 1341 if !bdy.sawEOF { 1342 // Body was closed in handler with non-EOF error. 1343 w.closeAfterReply = true 1344 } 1345 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1346 tooBig = true 1347 default: 1348 discard = true 1349 } 1350 bdy.mu.Unlock() 1351 default: 1352 discard = true 1353 } 1354 1355 if discard { 1356 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1357 switch err { 1358 case nil: 1359 // There must be even more data left over. 1360 tooBig = true 1361 case ErrBodyReadAfterClose: 1362 // Body was already consumed and closed. 1363 case io.EOF: 1364 // The remaining body was just consumed, close it. 1365 err = w.reqBody.Close() 1366 if err != nil { 1367 w.closeAfterReply = true 1368 } 1369 default: 1370 // Some other kind of error occurred, like a read timeout, or 1371 // corrupt chunked encoding. In any case, whatever remains 1372 // on the wire must not be parsed as another HTTP request. 1373 w.closeAfterReply = true 1374 } 1375 } 1376 1377 if tooBig { 1378 w.requestTooLarge() 1379 delHeader("Connection") 1380 setHeader.connection = "close" 1381 } 1382 } 1383 1384 code := w.status 1385 if bodyAllowedForStatus(code) { 1386 // If no content type, apply sniffing algorithm to body. 1387 _, haveType := header["Content-Type"] 1388 1389 // If the Content-Encoding was set and is non-blank, 1390 // we shouldn't sniff the body. See Issue 31753. 1391 ce := header.Get("Content-Encoding") 1392 hasCE := len(ce) > 0 1393 if !hasCE && !haveType && !hasTE && len(p) > 0 { 1394 setHeader.contentType = DetectContentType(p) 1395 } 1396 } else { 1397 for _, k := range suppressedHeaders(code) { 1398 delHeader(k) 1399 } 1400 } 1401 1402 if !header.has("Date") { 1403 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1404 } 1405 1406 if hasCL && hasTE && te != "identity" { 1407 // TODO: return an error if WriteHeader gets a return parameter 1408 // For now just ignore the Content-Length. 1409 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1410 te, w.contentLength) 1411 delHeader("Content-Length") 1412 hasCL = false 1413 } 1414 1415 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1416 // do nothing 1417 } else if code == StatusNoContent { 1418 delHeader("Transfer-Encoding") 1419 } else if hasCL { 1420 delHeader("Transfer-Encoding") 1421 } else if w.req.ProtoAtLeast(1, 1) { 1422 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1423 // content-length has been provided. The connection must be closed after the 1424 // reply is written, and no chunking is to be done. This is the setup 1425 // recommended in the Server-Sent Events candidate recommendation 11, 1426 // section 8. 1427 if hasTE && te == "identity" { 1428 cw.chunking = false 1429 w.closeAfterReply = true 1430 } else { 1431 // HTTP/1.1 or greater: use chunked transfer encoding 1432 // to avoid closing the connection at EOF. 1433 cw.chunking = true 1434 setHeader.transferEncoding = "chunked" 1435 if hasTE && te == "chunked" { 1436 // We will send the chunked Transfer-Encoding header later. 1437 delHeader("Transfer-Encoding") 1438 } 1439 } 1440 } else { 1441 // HTTP version < 1.1: cannot do chunked transfer 1442 // encoding and we don't know the Content-Length so 1443 // signal EOF by closing connection. 1444 w.closeAfterReply = true 1445 delHeader("Transfer-Encoding") // in case already set 1446 } 1447 1448 // Cannot use Content-Length with non-identity Transfer-Encoding. 1449 if cw.chunking { 1450 delHeader("Content-Length") 1451 } 1452 if !w.req.ProtoAtLeast(1, 0) { 1453 return 1454 } 1455 1456 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1457 delHeader("Connection") 1458 if w.req.ProtoAtLeast(1, 1) { 1459 setHeader.connection = "close" 1460 } 1461 } 1462 1463 writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) 1464 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1465 setHeader.Write(w.conn.bufw) 1466 w.conn.bufw.Write(crlf) 1467 } 1468 1469 // foreachHeaderElement splits v according to the "#rule" construction 1470 // in RFC 7230 section 7 and calls fn for each non-empty element. 1471 func foreachHeaderElement(v string, fn func(string)) { 1472 v = textproto.TrimString(v) 1473 if v == "" { 1474 return 1475 } 1476 if !strings.Contains(v, ",") { 1477 fn(v) 1478 return 1479 } 1480 for _, f := range strings.Split(v, ",") { 1481 if f = textproto.TrimString(f); f != "" { 1482 fn(f) 1483 } 1484 } 1485 } 1486 1487 // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2) 1488 // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. 1489 // code is the response status code. 1490 // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. 1491 func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { 1492 if is11 { 1493 bw.WriteString("HTTP/1.1 ") 1494 } else { 1495 bw.WriteString("HTTP/1.0 ") 1496 } 1497 if text, ok := statusText[code]; ok { 1498 bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) 1499 bw.WriteByte(' ') 1500 bw.WriteString(text) 1501 bw.WriteString("\r\n") 1502 } else { 1503 // don't worry about performance 1504 fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) 1505 } 1506 } 1507 1508 // bodyAllowed reports whether a Write is allowed for this response type. 1509 // It's illegal to call this before the header has been flushed. 1510 func (w *response) bodyAllowed() bool { 1511 if !w.wroteHeader { 1512 panic("") 1513 } 1514 return bodyAllowedForStatus(w.status) 1515 } 1516 1517 // The Life Of A Write is like this: 1518 // 1519 // Handler starts. No header has been sent. The handler can either 1520 // write a header, or just start writing. Writing before sending a header 1521 // sends an implicitly empty 200 OK header. 1522 // 1523 // If the handler didn't declare a Content-Length up front, we either 1524 // go into chunking mode or, if the handler finishes running before 1525 // the chunking buffer size, we compute a Content-Length and send that 1526 // in the header instead. 1527 // 1528 // Likewise, if the handler didn't set a Content-Type, we sniff that 1529 // from the initial chunk of output. 1530 // 1531 // The Writers are wired together like: 1532 // 1533 // 1. *response (the ResponseWriter) -> 1534 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1535 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1536 // and which writes the chunk headers, if needed. 1537 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1538 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1539 // and populates c.werr with it if so. but otherwise writes to: 1540 // 6. the rwc, the net.Conn. 1541 // 1542 // TODO(bradfitz): short-circuit some of the buffering when the 1543 // initial header contains both a Content-Type and Content-Length. 1544 // Also short-circuit in (1) when the header's been sent and not in 1545 // chunking mode, writing directly to (4) instead, if (2) has no 1546 // buffered data. More generally, we could short-circuit from (1) to 1547 // (3) even in chunking mode if the write size from (1) is over some 1548 // threshold and nothing is in (2). The answer might be mostly making 1549 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1550 // with this instead. 1551 func (w *response) Write(data []byte) (n int, err error) { 1552 return w.write(len(data), data, "") 1553 } 1554 1555 func (w *response) WriteString(data string) (n int, err error) { 1556 return w.write(len(data), nil, data) 1557 } 1558 1559 // either dataB or dataS is non-zero. 1560 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1561 if w.conn.hijacked() { 1562 if lenData > 0 { 1563 caller := relevantCaller() 1564 w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) 1565 } 1566 return 0, ErrHijacked 1567 } 1568 if !w.wroteHeader { 1569 w.WriteHeader(StatusOK) 1570 } 1571 if lenData == 0 { 1572 return 0, nil 1573 } 1574 if !w.bodyAllowed() { 1575 return 0, ErrBodyNotAllowed 1576 } 1577 1578 w.written += int64(lenData) // ignoring errors, for errorKludge 1579 if w.contentLength != -1 && w.written > w.contentLength { 1580 return 0, ErrContentLength 1581 } 1582 if dataB != nil { 1583 return w.w.Write(dataB) 1584 } else { 1585 return w.w.WriteString(dataS) 1586 } 1587 } 1588 1589 func (w *response) finishRequest() { 1590 w.handlerDone.setTrue() 1591 1592 if !w.wroteHeader { 1593 w.WriteHeader(StatusOK) 1594 } 1595 1596 w.w.Flush() 1597 putBufioWriter(w.w) 1598 w.cw.close() 1599 w.conn.bufw.Flush() 1600 1601 w.conn.r.abortPendingRead() 1602 1603 // Close the body (regardless of w.closeAfterReply) so we can 1604 // re-use its bufio.Reader later safely. 1605 w.reqBody.Close() 1606 1607 if w.req.MultipartForm != nil { 1608 w.req.MultipartForm.RemoveAll() 1609 } 1610 } 1611 1612 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1613 // It must only be called after the handler is done executing. 1614 func (w *response) shouldReuseConnection() bool { 1615 if w.closeAfterReply { 1616 // The request or something set while executing the 1617 // handler indicated we shouldn't reuse this 1618 // connection. 1619 return false 1620 } 1621 1622 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1623 // Did not write enough. Avoid getting out of sync. 1624 return false 1625 } 1626 1627 // There was some error writing to the underlying connection 1628 // during the request, so don't re-use this conn. 1629 if w.conn.werr != nil { 1630 return false 1631 } 1632 1633 if w.closedRequestBodyEarly() { 1634 return false 1635 } 1636 1637 return true 1638 } 1639 1640 func (w *response) closedRequestBodyEarly() bool { 1641 body, ok := w.req.Body.(*body) 1642 return ok && body.didEarlyClose() 1643 } 1644 1645 func (w *response) Flush() { 1646 if !w.wroteHeader { 1647 w.WriteHeader(StatusOK) 1648 } 1649 w.w.Flush() 1650 w.cw.flush() 1651 } 1652 1653 func (c *conn) finalFlush() { 1654 if c.bufr != nil { 1655 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1656 // reader for a future connection. 1657 putBufioReader(c.bufr) 1658 c.bufr = nil 1659 } 1660 1661 if c.bufw != nil { 1662 c.bufw.Flush() 1663 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1664 // writer for a future connection. 1665 putBufioWriter(c.bufw) 1666 c.bufw = nil 1667 } 1668 } 1669 1670 // Close the connection. 1671 func (c *conn) close() { 1672 c.finalFlush() 1673 c.rwc.Close() 1674 } 1675 1676 // rstAvoidanceDelay is the amount of time we sleep after closing the 1677 // write side of a TCP connection before closing the entire socket. 1678 // By sleeping, we increase the chances that the client sees our FIN 1679 // and processes its final data before they process the subsequent RST 1680 // from closing a connection with known unread data. 1681 // This RST seems to occur mostly on BSD systems. (And Windows?) 1682 // This timeout is somewhat arbitrary (~latency around the planet). 1683 const rstAvoidanceDelay = 500 * time.Millisecond 1684 1685 type closeWriter interface { 1686 CloseWrite() error 1687 } 1688 1689 var _ closeWriter = (*net.TCPConn)(nil) 1690 1691 // closeWrite flushes any outstanding data and sends a FIN packet (if 1692 // client is connected via TCP), signalling that we're done. We then 1693 // pause for a bit, hoping the client processes it before any 1694 // subsequent RST. 1695 // 1696 // See https://golang.org/issue/3595 1697 func (c *conn) closeWriteAndWait() { 1698 c.finalFlush() 1699 if tcp, ok := c.rwc.(closeWriter); ok { 1700 tcp.CloseWrite() 1701 } 1702 time.Sleep(rstAvoidanceDelay) 1703 } 1704 1705 // validNextProto reports whether the proto is not a blacklisted ALPN 1706 // protocol name. Empty and built-in protocol types are blacklisted 1707 // and can't be overridden with alternate implementations. 1708 func validNextProto(proto string) bool { 1709 switch proto { 1710 case "", "http/1.1", "http/1.0": 1711 return false 1712 } 1713 return true 1714 } 1715 1716 func (c *conn) setState(nc net.Conn, state ConnState) { 1717 srv := c.server 1718 switch state { 1719 case StateNew: 1720 srv.trackConn(c, true) 1721 case StateHijacked, StateClosed: 1722 srv.trackConn(c, false) 1723 } 1724 if state > 0xff || state < 0 { 1725 panic("internal error") 1726 } 1727 packedState := uint64(time.Now().Unix()<<8) | uint64(state) 1728 atomic.StoreUint64(&c.curState.atomic, packedState) 1729 if hook := srv.ConnState; hook != nil { 1730 hook(nc, state) 1731 } 1732 } 1733 1734 func (c *conn) getState() (state ConnState, unixSec int64) { 1735 packedState := atomic.LoadUint64(&c.curState.atomic) 1736 return ConnState(packedState & 0xff), int64(packedState >> 8) 1737 } 1738 1739 // badRequestError is a literal string (used by in the server in HTML, 1740 // unescaped) to tell the user why their request was bad. It should 1741 // be plain text without user info or other embedded errors. 1742 type badRequestError string 1743 1744 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1745 1746 // ErrAbortHandler is a sentinel panic value to abort a handler. 1747 // While any panic from ServeHTTP aborts the response to the client, 1748 // panicking with ErrAbortHandler also suppresses logging of a stack 1749 // trace to the server's error log. 1750 var ErrAbortHandler = errors.New("net/http: abort Handler") 1751 1752 // isCommonNetReadError reports whether err is a common error 1753 // encountered during reading a request off the network when the 1754 // client has gone away or had its read fail somehow. This is used to 1755 // determine which logs are interesting enough to log about. 1756 func isCommonNetReadError(err error) bool { 1757 if err == io.EOF { 1758 return true 1759 } 1760 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1761 return true 1762 } 1763 if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { 1764 return true 1765 } 1766 return false 1767 } 1768 1769 // Serve a new connection. 1770 func (c *conn) serve(ctx context.Context) { 1771 c.remoteAddr = c.rwc.RemoteAddr().String() 1772 ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) 1773 defer func() { 1774 if err := recover(); err != nil && err != ErrAbortHandler { 1775 const size = 64 << 10 1776 buf := make([]byte, size) 1777 buf = buf[:runtime.Stack(buf, false)] 1778 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1779 } 1780 if !c.hijacked() { 1781 c.close() 1782 c.setState(c.rwc, StateClosed) 1783 } 1784 }() 1785 1786 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1787 if d := c.server.ReadTimeout; d != 0 { 1788 c.rwc.SetReadDeadline(time.Now().Add(d)) 1789 } 1790 if d := c.server.WriteTimeout; d != 0 { 1791 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1792 } 1793 if err := tlsConn.Handshake(); err != nil { 1794 // If the handshake failed due to the client not speaking 1795 // TLS, assume they're speaking plaintext HTTP and write a 1796 // 400 response on the TLS conn's underlying net.Conn. 1797 if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) { 1798 io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n") 1799 re.Conn.Close() 1800 return 1801 } 1802 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1803 return 1804 } 1805 c.tlsState = new(tls.ConnectionState) 1806 *c.tlsState = tlsConn.ConnectionState() 1807 if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) { 1808 if fn := c.server.TLSNextProto[proto]; fn != nil { 1809 h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}} 1810 fn(c.server, tlsConn, h) 1811 } 1812 return 1813 } 1814 } 1815 1816 // HTTP/1.x from here on. 1817 1818 ctx, cancelCtx := context.WithCancel(ctx) 1819 c.cancelCtx = cancelCtx 1820 defer cancelCtx() 1821 1822 c.r = &connReader{conn: c} 1823 c.bufr = newBufioReader(c.r) 1824 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1825 1826 for { 1827 w, err := c.readRequest(ctx) 1828 if c.r.remain != c.server.initialReadLimitSize() { 1829 // If we read any bytes off the wire, we're active. 1830 c.setState(c.rwc, StateActive) 1831 } 1832 if err != nil { 1833 const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" 1834 1835 switch { 1836 case err == errTooLarge: 1837 // Their HTTP client may or may not be 1838 // able to read this if we're 1839 // responding to them and hanging up 1840 // while they're still writing their 1841 // request. Undefined behavior. 1842 const publicErr = "431 Request Header Fields Too Large" 1843 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1844 c.closeWriteAndWait() 1845 return 1846 1847 case isUnsupportedTEError(err): 1848 // Respond as per RFC 7230 Section 3.3.1 which says, 1849 // A server that receives a request message with a 1850 // transfer coding it does not understand SHOULD 1851 // respond with 501 (Unimplemented). 1852 code := StatusNotImplemented 1853 1854 // We purposefully aren't echoing back the transfer-encoding's value, 1855 // so as to mitigate the risk of cross side scripting by an attacker. 1856 fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders) 1857 return 1858 1859 case isCommonNetReadError(err): 1860 return // don't reply 1861 1862 default: 1863 publicErr := "400 Bad Request" 1864 if v, ok := err.(badRequestError); ok { 1865 publicErr = publicErr + ": " + string(v) 1866 } 1867 1868 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1869 return 1870 } 1871 } 1872 1873 // Expect 100 Continue support 1874 req := w.req 1875 if req.expectsContinue() { 1876 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1877 // Wrap the Body reader with one that replies on the connection 1878 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1879 } 1880 } else if req.Header.get("Expect") != "" { 1881 w.sendExpectationFailed() 1882 return 1883 } 1884 1885 c.curReq.Store(w) 1886 1887 if requestBodyRemains(req.Body) { 1888 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1889 } else { 1890 w.conn.r.startBackgroundRead() 1891 } 1892 1893 // HTTP cannot have multiple simultaneous active requests.[*] 1894 // Until the server replies to this request, it can't read another, 1895 // so we might as well run the handler in this goroutine. 1896 // [*] Not strictly true: HTTP pipelining. We could let them all process 1897 // in parallel even if their responses need to be serialized. 1898 // But we're not going to implement HTTP pipelining because it 1899 // was never deployed in the wild and the answer is HTTP/2. 1900 serverHandler{c.server}.ServeHTTP(w, w.req) 1901 w.cancelCtx() 1902 if c.hijacked() { 1903 return 1904 } 1905 w.finishRequest() 1906 if !w.shouldReuseConnection() { 1907 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1908 c.closeWriteAndWait() 1909 } 1910 return 1911 } 1912 c.setState(c.rwc, StateIdle) 1913 c.curReq.Store((*response)(nil)) 1914 1915 if !w.conn.server.doKeepAlives() { 1916 // We're in shutdown mode. We might've replied 1917 // to the user without "Connection: close" and 1918 // they might think they can send another 1919 // request, but such is life with HTTP/1.1. 1920 return 1921 } 1922 1923 if d := c.server.idleTimeout(); d != 0 { 1924 c.rwc.SetReadDeadline(time.Now().Add(d)) 1925 if _, err := c.bufr.Peek(4); err != nil { 1926 return 1927 } 1928 } 1929 c.rwc.SetReadDeadline(time.Time{}) 1930 } 1931 } 1932 1933 func (w *response) sendExpectationFailed() { 1934 // TODO(bradfitz): let ServeHTTP handlers handle 1935 // requests with non-standard expectation[s]? Seems 1936 // theoretical at best, and doesn't fit into the 1937 // current ServeHTTP model anyway. We'd need to 1938 // make the ResponseWriter an optional 1939 // "ExpectReplier" interface or something. 1940 // 1941 // For now we'll just obey RFC 7231 5.1.1 which says 1942 // "A server that receives an Expect field-value other 1943 // than 100-continue MAY respond with a 417 (Expectation 1944 // Failed) status code to indicate that the unexpected 1945 // expectation cannot be met." 1946 w.Header().Set("Connection", "close") 1947 w.WriteHeader(StatusExpectationFailed) 1948 w.finishRequest() 1949 } 1950 1951 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1952 // and a Hijacker. 1953 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1954 if w.handlerDone.isSet() { 1955 panic("net/http: Hijack called after ServeHTTP finished") 1956 } 1957 if w.wroteHeader { 1958 w.cw.flush() 1959 } 1960 1961 c := w.conn 1962 c.mu.Lock() 1963 defer c.mu.Unlock() 1964 1965 // Release the bufioWriter that writes to the chunk writer, it is not 1966 // used after a connection has been hijacked. 1967 rwc, buf, err = c.hijackLocked() 1968 if err == nil { 1969 putBufioWriter(w.w) 1970 w.w = nil 1971 } 1972 return rwc, buf, err 1973 } 1974 1975 func (w *response) CloseNotify() <-chan bool { 1976 if w.handlerDone.isSet() { 1977 panic("net/http: CloseNotify called after ServeHTTP finished") 1978 } 1979 return w.closeNotifyCh 1980 } 1981 1982 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1983 switch v := rc.(type) { 1984 case *expectContinueReader: 1985 registerOnHitEOF(v.readCloser, fn) 1986 case *body: 1987 v.registerOnHitEOF(fn) 1988 default: 1989 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1990 } 1991 } 1992 1993 // requestBodyRemains reports whether future calls to Read 1994 // on rc might yield more data. 1995 func requestBodyRemains(rc io.ReadCloser) bool { 1996 if rc == NoBody { 1997 return false 1998 } 1999 switch v := rc.(type) { 2000 case *expectContinueReader: 2001 return requestBodyRemains(v.readCloser) 2002 case *body: 2003 return v.bodyRemains() 2004 default: 2005 panic("unexpected type " + fmt.Sprintf("%T", rc)) 2006 } 2007 } 2008 2009 // The HandlerFunc type is an adapter to allow the use of 2010 // ordinary functions as HTTP handlers. If f is a function 2011 // with the appropriate signature, HandlerFunc(f) is a 2012 // Handler that calls f. 2013 type HandlerFunc func(ResponseWriter, *Request) 2014 2015 // ServeHTTP calls f(w, r). 2016 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 2017 f(w, r) 2018 } 2019 2020 // Helper handlers 2021 2022 // Error replies to the request with the specified error message and HTTP code. 2023 // It does not otherwise end the request; the caller should ensure no further 2024 // writes are done to w. 2025 // The error message should be plain text. 2026 func Error(w ResponseWriter, error string, code int) { 2027 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 2028 w.Header().Set("X-Content-Type-Options", "nosniff") 2029 w.WriteHeader(code) 2030 fmt.Fprintln(w, error) 2031 } 2032 2033 // NotFound replies to the request with an HTTP 404 not found error. 2034 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 2035 2036 // NotFoundHandler returns a simple request handler 2037 // that replies to each request with a ``404 page not found'' reply. 2038 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 2039 2040 // StripPrefix returns a handler that serves HTTP requests 2041 // by removing the given prefix from the request URL's Path 2042 // and invoking the handler h. StripPrefix handles a 2043 // request for a path that doesn't begin with prefix by 2044 // replying with an HTTP 404 not found error. 2045 func StripPrefix(prefix string, h Handler) Handler { 2046 if prefix == "" { 2047 return h 2048 } 2049 return HandlerFunc(func(w ResponseWriter, r *Request) { 2050 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 2051 r2 := new(Request) 2052 *r2 = *r 2053 r2.URL = new(url.URL) 2054 *r2.URL = *r.URL 2055 r2.URL.Path = p 2056 h.ServeHTTP(w, r2) 2057 } else { 2058 NotFound(w, r) 2059 } 2060 }) 2061 } 2062 2063 // Redirect replies to the request with a redirect to url, 2064 // which may be a path relative to the request path. 2065 // 2066 // The provided code should be in the 3xx range and is usually 2067 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2068 // 2069 // If the Content-Type header has not been set, Redirect sets it 2070 // to "text/html; charset=utf-8" and writes a small HTML body. 2071 // Setting the Content-Type header to any value, including nil, 2072 // disables that behavior. 2073 func Redirect(w ResponseWriter, r *Request, url string, code int) { 2074 if u, err := urlpkg.Parse(url); err == nil { 2075 // If url was relative, make its path absolute by 2076 // combining with request path. 2077 // The client would probably do this for us, 2078 // but doing it ourselves is more reliable. 2079 // See RFC 7231, section 7.1.2 2080 if u.Scheme == "" && u.Host == "" { 2081 oldpath := r.URL.Path 2082 if oldpath == "" { // should not happen, but avoid a crash if it does 2083 oldpath = "/" 2084 } 2085 2086 // no leading http://server 2087 if url == "" || url[0] != '/' { 2088 // make relative path absolute 2089 olddir, _ := path.Split(oldpath) 2090 url = olddir + url 2091 } 2092 2093 var query string 2094 if i := strings.Index(url, "?"); i != -1 { 2095 url, query = url[:i], url[i:] 2096 } 2097 2098 // clean up but preserve trailing slash 2099 trailing := strings.HasSuffix(url, "/") 2100 url = path.Clean(url) 2101 if trailing && !strings.HasSuffix(url, "/") { 2102 url += "/" 2103 } 2104 url += query 2105 } 2106 } 2107 2108 h := w.Header() 2109 2110 // RFC 7231 notes that a short HTML body is usually included in 2111 // the response because older user agents may not understand 301/307. 2112 // Do it only if the request didn't already have a Content-Type header. 2113 _, hadCT := h["Content-Type"] 2114 2115 h.Set("Location", hexEscapeNonASCII(url)) 2116 if !hadCT && (r.Method == "GET" || r.Method == "HEAD") { 2117 h.Set("Content-Type", "text/html; charset=utf-8") 2118 } 2119 w.WriteHeader(code) 2120 2121 // Shouldn't send the body for POST or HEAD; that leaves GET. 2122 if !hadCT && r.Method == "GET" { 2123 body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" 2124 fmt.Fprintln(w, body) 2125 } 2126 } 2127 2128 var htmlReplacer = strings.NewReplacer( 2129 "&", "&", 2130 "<", "<", 2131 ">", ">", 2132 // """ is shorter than """. 2133 `"`, """, 2134 // "'" is shorter than "'" and apos was not in HTML until HTML5. 2135 "'", "'", 2136 ) 2137 2138 func htmlEscape(s string) string { 2139 return htmlReplacer.Replace(s) 2140 } 2141 2142 // Redirect to a fixed URL 2143 type redirectHandler struct { 2144 url string 2145 code int 2146 } 2147 2148 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 2149 Redirect(w, r, rh.url, rh.code) 2150 } 2151 2152 // RedirectHandler returns a request handler that redirects 2153 // each request it receives to the given url using the given 2154 // status code. 2155 // 2156 // The provided code should be in the 3xx range and is usually 2157 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2158 func RedirectHandler(url string, code int) Handler { 2159 return &redirectHandler{url, code} 2160 } 2161 2162 // ServeMux is an HTTP request multiplexer. 2163 // It matches the URL of each incoming request against a list of registered 2164 // patterns and calls the handler for the pattern that 2165 // most closely matches the URL. 2166 // 2167 // Patterns name fixed, rooted paths, like "/favicon.ico", 2168 // or rooted subtrees, like "/images/" (note the trailing slash). 2169 // Longer patterns take precedence over shorter ones, so that 2170 // if there are handlers registered for both "/images/" 2171 // and "/images/thumbnails/", the latter handler will be 2172 // called for paths beginning "/images/thumbnails/" and the 2173 // former will receive requests for any other paths in the 2174 // "/images/" subtree. 2175 // 2176 // Note that since a pattern ending in a slash names a rooted subtree, 2177 // the pattern "/" matches all paths not matched by other registered 2178 // patterns, not just the URL with Path == "/". 2179 // 2180 // If a subtree has been registered and a request is received naming the 2181 // subtree root without its trailing slash, ServeMux redirects that 2182 // request to the subtree root (adding the trailing slash). This behavior can 2183 // be overridden with a separate registration for the path without 2184 // the trailing slash. For example, registering "/images/" causes ServeMux 2185 // to redirect a request for "/images" to "/images/", unless "/images" has 2186 // been registered separately. 2187 // 2188 // Patterns may optionally begin with a host name, restricting matches to 2189 // URLs on that host only. Host-specific patterns take precedence over 2190 // general patterns, so that a handler might register for the two patterns 2191 // "/codesearch" and "codesearch.google.com/" without also taking over 2192 // requests for "http://www.google.com/". 2193 // 2194 // ServeMux also takes care of sanitizing the URL request path and the Host 2195 // header, stripping the port number and redirecting any request containing . or 2196 // .. elements or repeated slashes to an equivalent, cleaner URL. 2197 type ServeMux struct { 2198 mu sync.RWMutex 2199 m map[string]muxEntry 2200 es []muxEntry // slice of entries sorted from longest to shortest. 2201 hosts bool // whether any patterns contain hostnames 2202 } 2203 2204 type muxEntry struct { 2205 h Handler 2206 pattern string 2207 } 2208 2209 // NewServeMux allocates and returns a new ServeMux. 2210 func NewServeMux() *ServeMux { return new(ServeMux) } 2211 2212 // DefaultServeMux is the default ServeMux used by Serve. 2213 var DefaultServeMux = &defaultServeMux 2214 2215 var defaultServeMux ServeMux 2216 2217 // cleanPath returns the canonical path for p, eliminating . and .. elements. 2218 func cleanPath(p string) string { 2219 if p == "" { 2220 return "/" 2221 } 2222 if p[0] != '/' { 2223 p = "/" + p 2224 } 2225 np := path.Clean(p) 2226 // path.Clean removes trailing slash except for root; 2227 // put the trailing slash back if necessary. 2228 if p[len(p)-1] == '/' && np != "/" { 2229 // Fast path for common case of p being the string we want: 2230 if len(p) == len(np)+1 && strings.HasPrefix(p, np) { 2231 np = p 2232 } else { 2233 np += "/" 2234 } 2235 } 2236 return np 2237 } 2238 2239 // stripHostPort returns h without any trailing ":<port>". 2240 func stripHostPort(h string) string { 2241 // If no port on host, return unchanged 2242 if strings.IndexByte(h, ':') == -1 { 2243 return h 2244 } 2245 host, _, err := net.SplitHostPort(h) 2246 if err != nil { 2247 return h // on error, return unchanged 2248 } 2249 return host 2250 } 2251 2252 // Find a handler on a handler map given a path string. 2253 // Most-specific (longest) pattern wins. 2254 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2255 // Check for exact match first. 2256 v, ok := mux.m[path] 2257 if ok { 2258 return v.h, v.pattern 2259 } 2260 2261 // Check for longest valid match. mux.es contains all patterns 2262 // that end in / sorted from longest to shortest. 2263 for _, e := range mux.es { 2264 if strings.HasPrefix(path, e.pattern) { 2265 return e.h, e.pattern 2266 } 2267 } 2268 return nil, "" 2269 } 2270 2271 // redirectToPathSlash determines if the given path needs appending "/" to it. 2272 // This occurs when a handler for path + "/" was already registered, but 2273 // not for path itself. If the path needs appending to, it creates a new 2274 // URL, setting the path to u.Path + "/" and returning true to indicate so. 2275 func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) { 2276 mux.mu.RLock() 2277 shouldRedirect := mux.shouldRedirectRLocked(host, path) 2278 mux.mu.RUnlock() 2279 if !shouldRedirect { 2280 return u, false 2281 } 2282 path = path + "/" 2283 u = &url.URL{Path: path, RawQuery: u.RawQuery} 2284 return u, true 2285 } 2286 2287 // shouldRedirectRLocked reports whether the given path and host should be redirected to 2288 // path+"/". This should happen if a handler is registered for path+"/" but 2289 // not path -- see comments at ServeMux. 2290 func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool { 2291 p := []string{path, host + path} 2292 2293 for _, c := range p { 2294 if _, exist := mux.m[c]; exist { 2295 return false 2296 } 2297 } 2298 2299 n := len(path) 2300 if n == 0 { 2301 return false 2302 } 2303 for _, c := range p { 2304 if _, exist := mux.m[c+"/"]; exist { 2305 return path[n-1] != '/' 2306 } 2307 } 2308 2309 return false 2310 } 2311 2312 // Handler returns the handler to use for the given request, 2313 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2314 // a non-nil handler. If the path is not in its canonical form, the 2315 // handler will be an internally-generated handler that redirects 2316 // to the canonical path. If the host contains a port, it is ignored 2317 // when matching handlers. 2318 // 2319 // The path and host are used unchanged for CONNECT requests. 2320 // 2321 // Handler also returns the registered pattern that matches the 2322 // request or, in the case of internally-generated redirects, 2323 // the pattern that will match after following the redirect. 2324 // 2325 // If there is no registered handler that applies to the request, 2326 // Handler returns a ``page not found'' handler and an empty pattern. 2327 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2328 2329 // CONNECT requests are not canonicalized. 2330 if r.Method == "CONNECT" { 2331 // If r.URL.Path is /tree and its handler is not registered, 2332 // the /tree -> /tree/ redirect applies to CONNECT requests 2333 // but the path canonicalization does not. 2334 if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok { 2335 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2336 } 2337 2338 return mux.handler(r.Host, r.URL.Path) 2339 } 2340 2341 // All other requests have any port stripped and path cleaned 2342 // before passing to mux.handler. 2343 host := stripHostPort(r.Host) 2344 path := cleanPath(r.URL.Path) 2345 2346 // If the given path is /tree and its handler is not registered, 2347 // redirect for /tree/. 2348 if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok { 2349 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2350 } 2351 2352 if path != r.URL.Path { 2353 _, pattern = mux.handler(host, path) 2354 url := *r.URL 2355 url.Path = path 2356 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2357 } 2358 2359 return mux.handler(host, r.URL.Path) 2360 } 2361 2362 // handler is the main implementation of Handler. 2363 // The path is known to be in canonical form, except for CONNECT methods. 2364 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2365 mux.mu.RLock() 2366 defer mux.mu.RUnlock() 2367 2368 // Host-specific pattern takes precedence over generic ones 2369 if mux.hosts { 2370 h, pattern = mux.match(host + path) 2371 } 2372 if h == nil { 2373 h, pattern = mux.match(path) 2374 } 2375 if h == nil { 2376 h, pattern = NotFoundHandler(), "" 2377 } 2378 return 2379 } 2380 2381 // ServeHTTP dispatches the request to the handler whose 2382 // pattern most closely matches the request URL. 2383 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2384 if r.RequestURI == "*" { 2385 if r.ProtoAtLeast(1, 1) { 2386 w.Header().Set("Connection", "close") 2387 } 2388 w.WriteHeader(StatusBadRequest) 2389 return 2390 } 2391 h, _ := mux.Handler(r) 2392 h.ServeHTTP(w, r) 2393 } 2394 2395 // Handle registers the handler for the given pattern. 2396 // If a handler already exists for pattern, Handle panics. 2397 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2398 mux.mu.Lock() 2399 defer mux.mu.Unlock() 2400 2401 if pattern == "" { 2402 panic("http: invalid pattern") 2403 } 2404 if handler == nil { 2405 panic("http: nil handler") 2406 } 2407 if _, exist := mux.m[pattern]; exist { 2408 panic("http: multiple registrations for " + pattern) 2409 } 2410 2411 if mux.m == nil { 2412 mux.m = make(map[string]muxEntry) 2413 } 2414 e := muxEntry{h: handler, pattern: pattern} 2415 mux.m[pattern] = e 2416 if pattern[len(pattern)-1] == '/' { 2417 mux.es = appendSorted(mux.es, e) 2418 } 2419 2420 if pattern[0] != '/' { 2421 mux.hosts = true 2422 } 2423 } 2424 2425 func appendSorted(es []muxEntry, e muxEntry) []muxEntry { 2426 n := len(es) 2427 i := sort.Search(n, func(i int) bool { 2428 return len(es[i].pattern) < len(e.pattern) 2429 }) 2430 if i == n { 2431 return append(es, e) 2432 } 2433 // we now know that i points at where we want to insert 2434 es = append(es, muxEntry{}) // try to grow the slice in place, any entry works. 2435 copy(es[i+1:], es[i:]) // Move shorter entries down 2436 es[i] = e 2437 return es 2438 } 2439 2440 // HandleFunc registers the handler function for the given pattern. 2441 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2442 if handler == nil { 2443 panic("http: nil handler") 2444 } 2445 mux.Handle(pattern, HandlerFunc(handler)) 2446 } 2447 2448 // Handle registers the handler for the given pattern 2449 // in the DefaultServeMux. 2450 // The documentation for ServeMux explains how patterns are matched. 2451 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2452 2453 // HandleFunc registers the handler function for the given pattern 2454 // in the DefaultServeMux. 2455 // The documentation for ServeMux explains how patterns are matched. 2456 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2457 DefaultServeMux.HandleFunc(pattern, handler) 2458 } 2459 2460 // Serve accepts incoming HTTP connections on the listener l, 2461 // creating a new service goroutine for each. The service goroutines 2462 // read requests and then call handler to reply to them. 2463 // 2464 // The handler is typically nil, in which case the DefaultServeMux is used. 2465 // 2466 // HTTP/2 support is only enabled if the Listener returns *tls.Conn 2467 // connections and they were configured with "h2" in the TLS 2468 // Config.NextProtos. 2469 // 2470 // Serve always returns a non-nil error. 2471 func Serve(l net.Listener, handler Handler) error { 2472 srv := &Server{Handler: handler} 2473 return srv.Serve(l) 2474 } 2475 2476 // ServeTLS accepts incoming HTTPS connections on the listener l, 2477 // creating a new service goroutine for each. The service goroutines 2478 // read requests and then call handler to reply to them. 2479 // 2480 // The handler is typically nil, in which case the DefaultServeMux is used. 2481 // 2482 // Additionally, files containing a certificate and matching private key 2483 // for the server must be provided. If the certificate is signed by a 2484 // certificate authority, the certFile should be the concatenation 2485 // of the server's certificate, any intermediates, and the CA's certificate. 2486 // 2487 // ServeTLS always returns a non-nil error. 2488 func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { 2489 srv := &Server{Handler: handler} 2490 return srv.ServeTLS(l, certFile, keyFile) 2491 } 2492 2493 // A Server defines parameters for running an HTTP server. 2494 // The zero value for Server is a valid configuration. 2495 type Server struct { 2496 // Addr optionally specifies the TCP address for the server to listen on, 2497 // in the form "host:port". If empty, ":http" (port 80) is used. 2498 // The service names are defined in RFC 6335 and assigned by IANA. 2499 // See net.Dial for details of the address format. 2500 Addr string 2501 2502 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2503 2504 // TLSConfig optionally provides a TLS configuration for use 2505 // by ServeTLS and ListenAndServeTLS. Note that this value is 2506 // cloned by ServeTLS and ListenAndServeTLS, so it's not 2507 // possible to modify the configuration with methods like 2508 // tls.Config.SetSessionTicketKeys. To use 2509 // SetSessionTicketKeys, use Server.Serve with a TLS Listener 2510 // instead. 2511 TLSConfig *tls.Config 2512 2513 // ReadTimeout is the maximum duration for reading the entire 2514 // request, including the body. 2515 // 2516 // Because ReadTimeout does not let Handlers make per-request 2517 // decisions on each request body's acceptable deadline or 2518 // upload rate, most users will prefer to use 2519 // ReadHeaderTimeout. It is valid to use them both. 2520 ReadTimeout time.Duration 2521 2522 // ReadHeaderTimeout is the amount of time allowed to read 2523 // request headers. The connection's read deadline is reset 2524 // after reading the headers and the Handler can decide what 2525 // is considered too slow for the body. If ReadHeaderTimeout 2526 // is zero, the value of ReadTimeout is used. If both are 2527 // zero, there is no timeout. 2528 ReadHeaderTimeout time.Duration 2529 2530 // WriteTimeout is the maximum duration before timing out 2531 // writes of the response. It is reset whenever a new 2532 // request's header is read. Like ReadTimeout, it does not 2533 // let Handlers make decisions on a per-request basis. 2534 WriteTimeout time.Duration 2535 2536 // IdleTimeout is the maximum amount of time to wait for the 2537 // next request when keep-alives are enabled. If IdleTimeout 2538 // is zero, the value of ReadTimeout is used. If both are 2539 // zero, there is no timeout. 2540 IdleTimeout time.Duration 2541 2542 // MaxHeaderBytes controls the maximum number of bytes the 2543 // server will read parsing the request header's keys and 2544 // values, including the request line. It does not limit the 2545 // size of the request body. 2546 // If zero, DefaultMaxHeaderBytes is used. 2547 MaxHeaderBytes int 2548 2549 // TLSNextProto optionally specifies a function to take over 2550 // ownership of the provided TLS connection when an ALPN 2551 // protocol upgrade has occurred. The map key is the protocol 2552 // name negotiated. The Handler argument should be used to 2553 // handle HTTP requests and will initialize the Request's TLS 2554 // and RemoteAddr if not already set. The connection is 2555 // automatically closed when the function returns. 2556 // If TLSNextProto is not nil, HTTP/2 support is not enabled 2557 // automatically. 2558 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2559 2560 // ConnState specifies an optional callback function that is 2561 // called when a client connection changes state. See the 2562 // ConnState type and associated constants for details. 2563 ConnState func(net.Conn, ConnState) 2564 2565 // ErrorLog specifies an optional logger for errors accepting 2566 // connections, unexpected behavior from handlers, and 2567 // underlying FileSystem errors. 2568 // If nil, logging is done via the log package's standard logger. 2569 ErrorLog *log.Logger 2570 2571 // BaseContext optionally specifies a function that returns 2572 // the base context for incoming requests on this server. 2573 // The provided Listener is the specific Listener that's 2574 // about to start accepting requests. 2575 // If BaseContext is nil, the default is context.Background(). 2576 // If non-nil, it must return a non-nil context. 2577 BaseContext func(net.Listener) context.Context 2578 2579 // ConnContext optionally specifies a function that modifies 2580 // the context used for a new connection c. The provided ctx 2581 // is derived from the base context and has a ServerContextKey 2582 // value. 2583 ConnContext func(ctx context.Context, c net.Conn) context.Context 2584 2585 disableKeepAlives int32 // accessed atomically. 2586 inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) 2587 nextProtoOnce sync.Once // guards setupHTTP2_* init 2588 nextProtoErr error // result of http2.ConfigureServer if used 2589 2590 mu sync.Mutex 2591 listeners map[*net.Listener]struct{} 2592 activeConn map[*conn]struct{} 2593 doneChan chan struct{} 2594 onShutdown []func() 2595 } 2596 2597 func (s *Server) getDoneChan() <-chan struct{} { 2598 s.mu.Lock() 2599 defer s.mu.Unlock() 2600 return s.getDoneChanLocked() 2601 } 2602 2603 func (s *Server) getDoneChanLocked() chan struct{} { 2604 if s.doneChan == nil { 2605 s.doneChan = make(chan struct{}) 2606 } 2607 return s.doneChan 2608 } 2609 2610 func (s *Server) closeDoneChanLocked() { 2611 ch := s.getDoneChanLocked() 2612 select { 2613 case <-ch: 2614 // Already closed. Don't close again. 2615 default: 2616 // Safe to close here. We're the only closer, guarded 2617 // by s.mu. 2618 close(ch) 2619 } 2620 } 2621 2622 // Close immediately closes all active net.Listeners and any 2623 // connections in state StateNew, StateActive, or StateIdle. For a 2624 // graceful shutdown, use Shutdown. 2625 // 2626 // Close does not attempt to close (and does not even know about) 2627 // any hijacked connections, such as WebSockets. 2628 // 2629 // Close returns any error returned from closing the Server's 2630 // underlying Listener(s). 2631 func (srv *Server) Close() error { 2632 atomic.StoreInt32(&srv.inShutdown, 1) 2633 srv.mu.Lock() 2634 defer srv.mu.Unlock() 2635 srv.closeDoneChanLocked() 2636 err := srv.closeListenersLocked() 2637 for c := range srv.activeConn { 2638 c.rwc.Close() 2639 delete(srv.activeConn, c) 2640 } 2641 return err 2642 } 2643 2644 // shutdownPollInterval is how often we poll for quiescence 2645 // during Server.Shutdown. This is lower during tests, to 2646 // speed up tests. 2647 // Ideally we could find a solution that doesn't involve polling, 2648 // but which also doesn't have a high runtime cost (and doesn't 2649 // involve any contentious mutexes), but that is left as an 2650 // exercise for the reader. 2651 var shutdownPollInterval = 500 * time.Millisecond 2652 2653 // Shutdown gracefully shuts down the server without interrupting any 2654 // active connections. Shutdown works by first closing all open 2655 // listeners, then closing all idle connections, and then waiting 2656 // indefinitely for connections to return to idle and then shut down. 2657 // If the provided context expires before the shutdown is complete, 2658 // Shutdown returns the context's error, otherwise it returns any 2659 // error returned from closing the Server's underlying Listener(s). 2660 // 2661 // When Shutdown is called, Serve, ListenAndServe, and 2662 // ListenAndServeTLS immediately return ErrServerClosed. Make sure the 2663 // program doesn't exit and waits instead for Shutdown to return. 2664 // 2665 // Shutdown does not attempt to close nor wait for hijacked 2666 // connections such as WebSockets. The caller of Shutdown should 2667 // separately notify such long-lived connections of shutdown and wait 2668 // for them to close, if desired. See RegisterOnShutdown for a way to 2669 // register shutdown notification functions. 2670 // 2671 // Once Shutdown has been called on a server, it may not be reused; 2672 // future calls to methods such as Serve will return ErrServerClosed. 2673 func (srv *Server) Shutdown(ctx context.Context) error { 2674 atomic.StoreInt32(&srv.inShutdown, 1) 2675 2676 srv.mu.Lock() 2677 lnerr := srv.closeListenersLocked() 2678 srv.closeDoneChanLocked() 2679 for _, f := range srv.onShutdown { 2680 go f() 2681 } 2682 srv.mu.Unlock() 2683 2684 ticker := time.NewTicker(shutdownPollInterval) 2685 defer ticker.Stop() 2686 for { 2687 if srv.closeIdleConns() { 2688 return lnerr 2689 } 2690 select { 2691 case <-ctx.Done(): 2692 return ctx.Err() 2693 case <-ticker.C: 2694 } 2695 } 2696 } 2697 2698 // RegisterOnShutdown registers a function to call on Shutdown. 2699 // This can be used to gracefully shutdown connections that have 2700 // undergone ALPN protocol upgrade or that have been hijacked. 2701 // This function should start protocol-specific graceful shutdown, 2702 // but should not wait for shutdown to complete. 2703 func (srv *Server) RegisterOnShutdown(f func()) { 2704 srv.mu.Lock() 2705 srv.onShutdown = append(srv.onShutdown, f) 2706 srv.mu.Unlock() 2707 } 2708 2709 // closeIdleConns closes all idle connections and reports whether the 2710 // server is quiescent. 2711 func (s *Server) closeIdleConns() bool { 2712 s.mu.Lock() 2713 defer s.mu.Unlock() 2714 quiescent := true 2715 for c := range s.activeConn { 2716 st, unixSec := c.getState() 2717 // Issue 22682: treat StateNew connections as if 2718 // they're idle if we haven't read the first request's 2719 // header in over 5 seconds. 2720 if st == StateNew && unixSec < time.Now().Unix()-5 { 2721 st = StateIdle 2722 } 2723 if st != StateIdle || unixSec == 0 { 2724 // Assume unixSec == 0 means it's a very new 2725 // connection, without state set yet. 2726 quiescent = false 2727 continue 2728 } 2729 c.rwc.Close() 2730 delete(s.activeConn, c) 2731 } 2732 return quiescent 2733 } 2734 2735 func (s *Server) closeListenersLocked() error { 2736 var err error 2737 for ln := range s.listeners { 2738 if cerr := (*ln).Close(); cerr != nil && err == nil { 2739 err = cerr 2740 } 2741 delete(s.listeners, ln) 2742 } 2743 return err 2744 } 2745 2746 // A ConnState represents the state of a client connection to a server. 2747 // It's used by the optional Server.ConnState hook. 2748 type ConnState int 2749 2750 const ( 2751 // StateNew represents a new connection that is expected to 2752 // send a request immediately. Connections begin at this 2753 // state and then transition to either StateActive or 2754 // StateClosed. 2755 StateNew ConnState = iota 2756 2757 // StateActive represents a connection that has read 1 or more 2758 // bytes of a request. The Server.ConnState hook for 2759 // StateActive fires before the request has entered a handler 2760 // and doesn't fire again until the request has been 2761 // handled. After the request is handled, the state 2762 // transitions to StateClosed, StateHijacked, or StateIdle. 2763 // For HTTP/2, StateActive fires on the transition from zero 2764 // to one active request, and only transitions away once all 2765 // active requests are complete. That means that ConnState 2766 // cannot be used to do per-request work; ConnState only notes 2767 // the overall state of the connection. 2768 StateActive 2769 2770 // StateIdle represents a connection that has finished 2771 // handling a request and is in the keep-alive state, waiting 2772 // for a new request. Connections transition from StateIdle 2773 // to either StateActive or StateClosed. 2774 StateIdle 2775 2776 // StateHijacked represents a hijacked connection. 2777 // This is a terminal state. It does not transition to StateClosed. 2778 StateHijacked 2779 2780 // StateClosed represents a closed connection. 2781 // This is a terminal state. Hijacked connections do not 2782 // transition to StateClosed. 2783 StateClosed 2784 ) 2785 2786 var stateName = map[ConnState]string{ 2787 StateNew: "new", 2788 StateActive: "active", 2789 StateIdle: "idle", 2790 StateHijacked: "hijacked", 2791 StateClosed: "closed", 2792 } 2793 2794 func (c ConnState) String() string { 2795 return stateName[c] 2796 } 2797 2798 // serverHandler delegates to either the server's Handler or 2799 // DefaultServeMux and also handles "OPTIONS *" requests. 2800 type serverHandler struct { 2801 srv *Server 2802 } 2803 2804 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2805 handler := sh.srv.Handler 2806 if handler == nil { 2807 handler = DefaultServeMux 2808 } 2809 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2810 handler = globalOptionsHandler{} 2811 } 2812 handler.ServeHTTP(rw, req) 2813 } 2814 2815 // ListenAndServe listens on the TCP network address srv.Addr and then 2816 // calls Serve to handle requests on incoming connections. 2817 // Accepted connections are configured to enable TCP keep-alives. 2818 // 2819 // If srv.Addr is blank, ":http" is used. 2820 // 2821 // ListenAndServe always returns a non-nil error. After Shutdown or Close, 2822 // the returned error is ErrServerClosed. 2823 func (srv *Server) ListenAndServe() error { 2824 if srv.shuttingDown() { 2825 return ErrServerClosed 2826 } 2827 addr := srv.Addr 2828 if addr == "" { 2829 addr = ":http" 2830 } 2831 ln, err := net.Listen("tcp", addr) 2832 if err != nil { 2833 return err 2834 } 2835 return srv.Serve(ln) 2836 } 2837 2838 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2839 2840 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2841 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2842 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2843 if srv.TLSConfig == nil { 2844 // Compatibility with Go 1.6: 2845 // If there's no TLSConfig, it's possible that the user just 2846 // didn't set it on the http.Server, but did pass it to 2847 // tls.NewListener and passed that listener to Serve. 2848 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2849 // in case the listener returns an "h2" *tls.Conn. 2850 return true 2851 } 2852 // The user specified a TLSConfig on their http.Server. 2853 // In this, case, only configure HTTP/2 if their tls.Config 2854 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2855 // would modify the tls.Config to add it, but they probably already 2856 // passed this tls.Config to tls.NewListener. And if they did, 2857 // it's too late anyway to fix it. It would only be potentially racy. 2858 // See Issue 15908. 2859 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2860 } 2861 2862 // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, 2863 // and ListenAndServeTLS methods after a call to Shutdown or Close. 2864 var ErrServerClosed = errors.New("http: Server closed") 2865 2866 // Serve accepts incoming connections on the Listener l, creating a 2867 // new service goroutine for each. The service goroutines read requests and 2868 // then call srv.Handler to reply to them. 2869 // 2870 // HTTP/2 support is only enabled if the Listener returns *tls.Conn 2871 // connections and they were configured with "h2" in the TLS 2872 // Config.NextProtos. 2873 // 2874 // Serve always returns a non-nil error and closes l. 2875 // After Shutdown or Close, the returned error is ErrServerClosed. 2876 func (srv *Server) Serve(l net.Listener) error { 2877 if fn := testHookServerServe; fn != nil { 2878 fn(srv, l) // call hook with unwrapped listener 2879 } 2880 2881 origListener := l 2882 l = &onceCloseListener{Listener: l} 2883 defer l.Close() 2884 2885 if err := srv.setupHTTP2_Serve(); err != nil { 2886 return err 2887 } 2888 2889 if !srv.trackListener(&l, true) { 2890 return ErrServerClosed 2891 } 2892 defer srv.trackListener(&l, false) 2893 2894 baseCtx := context.Background() 2895 if srv.BaseContext != nil { 2896 baseCtx = srv.BaseContext(origListener) 2897 if baseCtx == nil { 2898 panic("BaseContext returned a nil context") 2899 } 2900 } 2901 2902 var tempDelay time.Duration // how long to sleep on accept failure 2903 2904 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2905 for { 2906 rw, err := l.Accept() 2907 if err != nil { 2908 select { 2909 case <-srv.getDoneChan(): 2910 return ErrServerClosed 2911 default: 2912 } 2913 if ne, ok := err.(net.Error); ok && ne.Temporary() { 2914 if tempDelay == 0 { 2915 tempDelay = 5 * time.Millisecond 2916 } else { 2917 tempDelay *= 2 2918 } 2919 if max := 1 * time.Second; tempDelay > max { 2920 tempDelay = max 2921 } 2922 srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay) 2923 time.Sleep(tempDelay) 2924 continue 2925 } 2926 return err 2927 } 2928 connCtx := ctx 2929 if cc := srv.ConnContext; cc != nil { 2930 connCtx = cc(connCtx, rw) 2931 if connCtx == nil { 2932 panic("ConnContext returned nil") 2933 } 2934 } 2935 tempDelay = 0 2936 c := srv.newConn(rw) 2937 c.setState(c.rwc, StateNew) // before Serve can return 2938 go c.serve(connCtx) 2939 } 2940 } 2941 2942 // ServeTLS accepts incoming connections on the Listener l, creating a 2943 // new service goroutine for each. The service goroutines perform TLS 2944 // setup and then read requests, calling srv.Handler to reply to them. 2945 // 2946 // Files containing a certificate and matching private key for the 2947 // server must be provided if neither the Server's 2948 // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. 2949 // If the certificate is signed by a certificate authority, the 2950 // certFile should be the concatenation of the server's certificate, 2951 // any intermediates, and the CA's certificate. 2952 // 2953 // ServeTLS always returns a non-nil error. After Shutdown or Close, the 2954 // returned error is ErrServerClosed. 2955 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { 2956 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2957 // before we clone it and create the TLS Listener. 2958 if err := srv.setupHTTP2_ServeTLS(); err != nil { 2959 return err 2960 } 2961 2962 config := cloneTLSConfig(srv.TLSConfig) 2963 if !strSliceContains(config.NextProtos, "http/1.1") { 2964 config.NextProtos = append(config.NextProtos, "http/1.1") 2965 } 2966 2967 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2968 if !configHasCert || certFile != "" || keyFile != "" { 2969 var err error 2970 config.Certificates = make([]tls.Certificate, 1) 2971 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2972 if err != nil { 2973 return err 2974 } 2975 } 2976 2977 tlsListener := tls.NewListener(l, config) 2978 return srv.Serve(tlsListener) 2979 } 2980 2981 // trackListener adds or removes a net.Listener to the set of tracked 2982 // listeners. 2983 // 2984 // We store a pointer to interface in the map set, in case the 2985 // net.Listener is not comparable. This is safe because we only call 2986 // trackListener via Serve and can track+defer untrack the same 2987 // pointer to local variable there. We never need to compare a 2988 // Listener from another caller. 2989 // 2990 // It reports whether the server is still up (not Shutdown or Closed). 2991 func (s *Server) trackListener(ln *net.Listener, add bool) bool { 2992 s.mu.Lock() 2993 defer s.mu.Unlock() 2994 if s.listeners == nil { 2995 s.listeners = make(map[*net.Listener]struct{}) 2996 } 2997 if add { 2998 if s.shuttingDown() { 2999 return false 3000 } 3001 s.listeners[ln] = struct{}{} 3002 } else { 3003 delete(s.listeners, ln) 3004 } 3005 return true 3006 } 3007 3008 func (s *Server) trackConn(c *conn, add bool) { 3009 s.mu.Lock() 3010 defer s.mu.Unlock() 3011 if s.activeConn == nil { 3012 s.activeConn = make(map[*conn]struct{}) 3013 } 3014 if add { 3015 s.activeConn[c] = struct{}{} 3016 } else { 3017 delete(s.activeConn, c) 3018 } 3019 } 3020 3021 func (s *Server) idleTimeout() time.Duration { 3022 if s.IdleTimeout != 0 { 3023 return s.IdleTimeout 3024 } 3025 return s.ReadTimeout 3026 } 3027 3028 func (s *Server) readHeaderTimeout() time.Duration { 3029 if s.ReadHeaderTimeout != 0 { 3030 return s.ReadHeaderTimeout 3031 } 3032 return s.ReadTimeout 3033 } 3034 3035 func (s *Server) doKeepAlives() bool { 3036 return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() 3037 } 3038 3039 func (s *Server) shuttingDown() bool { 3040 // TODO: replace inShutdown with the existing atomicBool type; 3041 // see https://github.com/golang/go/issues/20239#issuecomment-381434582 3042 return atomic.LoadInt32(&s.inShutdown) != 0 3043 } 3044 3045 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 3046 // By default, keep-alives are always enabled. Only very 3047 // resource-constrained environments or servers in the process of 3048 // shutting down should disable them. 3049 func (srv *Server) SetKeepAlivesEnabled(v bool) { 3050 if v { 3051 atomic.StoreInt32(&srv.disableKeepAlives, 0) 3052 return 3053 } 3054 atomic.StoreInt32(&srv.disableKeepAlives, 1) 3055 3056 // Close idle HTTP/1 conns: 3057 srv.closeIdleConns() 3058 3059 // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle. 3060 } 3061 3062 func (s *Server) logf(format string, args ...interface{}) { 3063 if s.ErrorLog != nil { 3064 s.ErrorLog.Printf(format, args...) 3065 } else { 3066 log.Printf(format, args...) 3067 } 3068 } 3069 3070 // logf prints to the ErrorLog of the *Server associated with request r 3071 // via ServerContextKey. If there's no associated server, or if ErrorLog 3072 // is nil, logging is done via the log package's standard logger. 3073 func logf(r *Request, format string, args ...interface{}) { 3074 s, _ := r.Context().Value(ServerContextKey).(*Server) 3075 if s != nil && s.ErrorLog != nil { 3076 s.ErrorLog.Printf(format, args...) 3077 } else { 3078 log.Printf(format, args...) 3079 } 3080 } 3081 3082 // ListenAndServe listens on the TCP network address addr and then calls 3083 // Serve with handler to handle requests on incoming connections. 3084 // Accepted connections are configured to enable TCP keep-alives. 3085 // 3086 // The handler is typically nil, in which case the DefaultServeMux is used. 3087 // 3088 // ListenAndServe always returns a non-nil error. 3089 func ListenAndServe(addr string, handler Handler) error { 3090 server := &Server{Addr: addr, Handler: handler} 3091 return server.ListenAndServe() 3092 } 3093 3094 // ListenAndServeTLS acts identically to ListenAndServe, except that it 3095 // expects HTTPS connections. Additionally, files containing a certificate and 3096 // matching private key for the server must be provided. If the certificate 3097 // is signed by a certificate authority, the certFile should be the concatenation 3098 // of the server's certificate, any intermediates, and the CA's certificate. 3099 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 3100 server := &Server{Addr: addr, Handler: handler} 3101 return server.ListenAndServeTLS(certFile, keyFile) 3102 } 3103 3104 // ListenAndServeTLS listens on the TCP network address srv.Addr and 3105 // then calls ServeTLS to handle requests on incoming TLS connections. 3106 // Accepted connections are configured to enable TCP keep-alives. 3107 // 3108 // Filenames containing a certificate and matching private key for the 3109 // server must be provided if neither the Server's TLSConfig.Certificates 3110 // nor TLSConfig.GetCertificate are populated. If the certificate is 3111 // signed by a certificate authority, the certFile should be the 3112 // concatenation of the server's certificate, any intermediates, and 3113 // the CA's certificate. 3114 // 3115 // If srv.Addr is blank, ":https" is used. 3116 // 3117 // ListenAndServeTLS always returns a non-nil error. After Shutdown or 3118 // Close, the returned error is ErrServerClosed. 3119 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 3120 if srv.shuttingDown() { 3121 return ErrServerClosed 3122 } 3123 addr := srv.Addr 3124 if addr == "" { 3125 addr = ":https" 3126 } 3127 3128 ln, err := net.Listen("tcp", addr) 3129 if err != nil { 3130 return err 3131 } 3132 3133 defer ln.Close() 3134 3135 return srv.ServeTLS(ln, certFile, keyFile) 3136 } 3137 3138 // setupHTTP2_ServeTLS conditionally configures HTTP/2 on 3139 // srv and reports whether there was an error setting it up. If it is 3140 // not configured for policy reasons, nil is returned. 3141 func (srv *Server) setupHTTP2_ServeTLS() error { 3142 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 3143 return srv.nextProtoErr 3144 } 3145 3146 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 3147 // configures HTTP/2 on srv using a more conservative policy than 3148 // setupHTTP2_ServeTLS because Serve is called after tls.Listen, 3149 // and may be called concurrently. See shouldConfigureHTTP2ForServe. 3150 // 3151 // The tests named TestTransportAutomaticHTTP2* and 3152 // TestConcurrentServerServe in server_test.go demonstrate some 3153 // of the supported use cases and motivations. 3154 func (srv *Server) setupHTTP2_Serve() error { 3155 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 3156 return srv.nextProtoErr 3157 } 3158 3159 func (srv *Server) onceSetNextProtoDefaults_Serve() { 3160 if srv.shouldConfigureHTTP2ForServe() { 3161 srv.onceSetNextProtoDefaults() 3162 } 3163 } 3164 3165 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 3166 // configured otherwise. (by setting srv.TLSNextProto non-nil) 3167 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 3168 func (srv *Server) onceSetNextProtoDefaults() { 3169 if omitBundledHTTP2 || strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 3170 return 3171 } 3172 // Enable HTTP/2 by default if the user hasn't otherwise 3173 // configured their TLSNextProto map. 3174 if srv.TLSNextProto == nil { 3175 conf := &http2Server{ 3176 NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, 3177 } 3178 srv.nextProtoErr = http2ConfigureServer(srv, conf) 3179 } 3180 } 3181 3182 // TimeoutHandler returns a Handler that runs h with the given time limit. 3183 // 3184 // The new Handler calls h.ServeHTTP to handle each request, but if a 3185 // call runs for longer than its time limit, the handler responds with 3186 // a 503 Service Unavailable error and the given message in its body. 3187 // (If msg is empty, a suitable default message will be sent.) 3188 // After such a timeout, writes by h to its ResponseWriter will return 3189 // ErrHandlerTimeout. 3190 // 3191 // TimeoutHandler supports the Pusher interface but does not support 3192 // the Hijacker or Flusher interfaces. 3193 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 3194 return &timeoutHandler{ 3195 handler: h, 3196 body: msg, 3197 dt: dt, 3198 } 3199 } 3200 3201 // ErrHandlerTimeout is returned on ResponseWriter Write calls 3202 // in handlers which have timed out. 3203 var ErrHandlerTimeout = errors.New("http: Handler timeout") 3204 3205 type timeoutHandler struct { 3206 handler Handler 3207 body string 3208 dt time.Duration 3209 3210 // When set, no context will be created and this context will 3211 // be used instead. 3212 testContext context.Context 3213 } 3214 3215 func (h *timeoutHandler) errorBody() string { 3216 if h.body != "" { 3217 return h.body 3218 } 3219 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 3220 } 3221 3222 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 3223 ctx := h.testContext 3224 if ctx == nil { 3225 var cancelCtx context.CancelFunc 3226 ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) 3227 defer cancelCtx() 3228 } 3229 r = r.WithContext(ctx) 3230 done := make(chan struct{}) 3231 tw := &timeoutWriter{ 3232 w: w, 3233 h: make(Header), 3234 req: r, 3235 } 3236 panicChan := make(chan interface{}, 1) 3237 go func() { 3238 defer func() { 3239 if p := recover(); p != nil { 3240 panicChan <- p 3241 } 3242 }() 3243 h.handler.ServeHTTP(tw, r) 3244 close(done) 3245 }() 3246 select { 3247 case p := <-panicChan: 3248 panic(p) 3249 case <-done: 3250 tw.mu.Lock() 3251 defer tw.mu.Unlock() 3252 dst := w.Header() 3253 for k, vv := range tw.h { 3254 dst[k] = vv 3255 } 3256 if !tw.wroteHeader { 3257 tw.code = StatusOK 3258 } 3259 w.WriteHeader(tw.code) 3260 w.Write(tw.wbuf.Bytes()) 3261 case <-ctx.Done(): 3262 tw.mu.Lock() 3263 defer tw.mu.Unlock() 3264 w.WriteHeader(StatusServiceUnavailable) 3265 io.WriteString(w, h.errorBody()) 3266 tw.timedOut = true 3267 } 3268 } 3269 3270 type timeoutWriter struct { 3271 w ResponseWriter 3272 h Header 3273 wbuf bytes.Buffer 3274 req *Request 3275 3276 mu sync.Mutex 3277 timedOut bool 3278 wroteHeader bool 3279 code int 3280 } 3281 3282 var _ Pusher = (*timeoutWriter)(nil) 3283 3284 // Push implements the Pusher interface. 3285 func (tw *timeoutWriter) Push(target string, opts *PushOptions) error { 3286 if pusher, ok := tw.w.(Pusher); ok { 3287 return pusher.Push(target, opts) 3288 } 3289 return ErrNotSupported 3290 } 3291 3292 func (tw *timeoutWriter) Header() Header { return tw.h } 3293 3294 func (tw *timeoutWriter) Write(p []byte) (int, error) { 3295 tw.mu.Lock() 3296 defer tw.mu.Unlock() 3297 if tw.timedOut { 3298 return 0, ErrHandlerTimeout 3299 } 3300 if !tw.wroteHeader { 3301 tw.writeHeaderLocked(StatusOK) 3302 } 3303 return tw.wbuf.Write(p) 3304 } 3305 3306 func (tw *timeoutWriter) writeHeaderLocked(code int) { 3307 checkWriteHeaderCode(code) 3308 3309 switch { 3310 case tw.timedOut: 3311 return 3312 case tw.wroteHeader: 3313 if tw.req != nil { 3314 caller := relevantCaller() 3315 logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) 3316 } 3317 default: 3318 tw.wroteHeader = true 3319 tw.code = code 3320 } 3321 } 3322 3323 func (tw *timeoutWriter) WriteHeader(code int) { 3324 tw.mu.Lock() 3325 defer tw.mu.Unlock() 3326 tw.writeHeaderLocked(code) 3327 } 3328 3329 // onceCloseListener wraps a net.Listener, protecting it from 3330 // multiple Close calls. 3331 type onceCloseListener struct { 3332 net.Listener 3333 once sync.Once 3334 closeErr error 3335 } 3336 3337 func (oc *onceCloseListener) Close() error { 3338 oc.once.Do(oc.close) 3339 return oc.closeErr 3340 } 3341 3342 func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() } 3343 3344 // globalOptionsHandler responds to "OPTIONS *" requests. 3345 type globalOptionsHandler struct{} 3346 3347 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 3348 w.Header().Set("Content-Length", "0") 3349 if r.ContentLength != 0 { 3350 // Read up to 4KB of OPTIONS body (as mentioned in the 3351 // spec as being reserved for future use), but anything 3352 // over that is considered a waste of server resources 3353 // (or an attack) and we abort and close the connection, 3354 // courtesy of MaxBytesReader's EOF behavior. 3355 mb := MaxBytesReader(w, r.Body, 4<<10) 3356 io.Copy(ioutil.Discard, mb) 3357 } 3358 } 3359 3360 // initALPNRequest is an HTTP handler that initializes certain 3361 // uninitialized fields in its *Request. Such partially-initialized 3362 // Requests come from ALPN protocol handlers. 3363 type initALPNRequest struct { 3364 ctx context.Context 3365 c *tls.Conn 3366 h serverHandler 3367 } 3368 3369 // BaseContext is an exported but unadvertised http.Handler method 3370 // recognized by x/net/http2 to pass down a context; the TLSNextProto 3371 // API predates context support so we shoehorn through the only 3372 // interface we have available. 3373 func (h initALPNRequest) BaseContext() context.Context { return h.ctx } 3374 3375 func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 3376 if req.TLS == nil { 3377 req.TLS = &tls.ConnectionState{} 3378 *req.TLS = h.c.ConnectionState() 3379 } 3380 if req.Body == nil { 3381 req.Body = NoBody 3382 } 3383 if req.RemoteAddr == "" { 3384 req.RemoteAddr = h.c.RemoteAddr().String() 3385 } 3386 h.h.ServeHTTP(rw, req) 3387 } 3388 3389 // loggingConn is used for debugging. 3390 type loggingConn struct { 3391 name string 3392 net.Conn 3393 } 3394 3395 var ( 3396 uniqNameMu sync.Mutex 3397 uniqNameNext = make(map[string]int) 3398 ) 3399 3400 func newLoggingConn(baseName string, c net.Conn) net.Conn { 3401 uniqNameMu.Lock() 3402 defer uniqNameMu.Unlock() 3403 uniqNameNext[baseName]++ 3404 return &loggingConn{ 3405 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 3406 Conn: c, 3407 } 3408 } 3409 3410 func (c *loggingConn) Write(p []byte) (n int, err error) { 3411 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 3412 n, err = c.Conn.Write(p) 3413 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 3414 return 3415 } 3416 3417 func (c *loggingConn) Read(p []byte) (n int, err error) { 3418 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 3419 n, err = c.Conn.Read(p) 3420 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 3421 return 3422 } 3423 3424 func (c *loggingConn) Close() (err error) { 3425 log.Printf("%s.Close() = ...", c.name) 3426 err = c.Conn.Close() 3427 log.Printf("%s.Close() = %v", c.name, err) 3428 return 3429 } 3430 3431 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 3432 // It only contains one field (and a pointer field at that), so it 3433 // fits in an interface value without an extra allocation. 3434 type checkConnErrorWriter struct { 3435 c *conn 3436 } 3437 3438 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 3439 n, err = w.c.rwc.Write(p) 3440 if err != nil && w.c.werr == nil { 3441 w.c.werr = err 3442 w.c.cancelCtx() 3443 } 3444 return 3445 } 3446 3447 func numLeadingCRorLF(v []byte) (n int) { 3448 for _, b := range v { 3449 if b == '\r' || b == '\n' { 3450 n++ 3451 continue 3452 } 3453 break 3454 } 3455 return 3456 3457 } 3458 3459 func strSliceContains(ss []string, s string) bool { 3460 for _, v := range ss { 3461 if v == s { 3462 return true 3463 } 3464 } 3465 return false 3466 } 3467 3468 // tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header 3469 // looks like it might've been a misdirected plaintext HTTP request. 3470 func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { 3471 switch string(hdr[:]) { 3472 case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": 3473 return true 3474 } 3475 return false 3476 }