github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang_org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interface. A zero-byte write on a hijacked 44 // connection will return ErrHijacked without any other side 45 // effects. 46 ErrHijacked = errors.New("http: connection has been hijacked") 47 48 // ErrContentLength is returned by ResponseWriter.Write calls 49 // when a Handler set a Content-Length response header with a 50 // declared size and then attempted to write more bytes than 51 // declared. 52 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 53 54 // Deprecated: ErrWriteAfterFlush is no longer used. 55 ErrWriteAfterFlush = errors.New("unused") 56 ) 57 58 // A Handler responds to an HTTP request. 59 // 60 // ServeHTTP should write reply headers and data to the ResponseWriter 61 // and then return. Returning signals that the request is finished; it 62 // is not valid to use the ResponseWriter or read from the 63 // Request.Body after or concurrently with the completion of the 64 // ServeHTTP call. 65 // 66 // Depending on the HTTP client software, HTTP protocol version, and 67 // any intermediaries between the client and the Go server, it may not 68 // be possible to read from the Request.Body after writing to the 69 // ResponseWriter. Cautious handlers should read the Request.Body 70 // first, and then reply. 71 // 72 // Except for reading the body, handlers should not modify the 73 // provided Request. 74 // 75 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 76 // that the effect of the panic was isolated to the active request. 77 // It recovers the panic, logs a stack trace to the server error log, 78 // and either closes the network connection or sends an HTTP/2 79 // RST_STREAM, depending on the HTTP protocol. To abort a handler so 80 // the client sees an interrupted response but the server doesn't log 81 // an error, panic with the value ErrAbortHandler. 82 type Handler interface { 83 ServeHTTP(ResponseWriter, *Request) 84 } 85 86 // A ResponseWriter interface is used by an HTTP handler to 87 // construct an HTTP response. 88 // 89 // A ResponseWriter may not be used after the Handler.ServeHTTP method 90 // has returned. 91 type ResponseWriter interface { 92 // Header returns the header map that will be sent by 93 // WriteHeader. The Header map also is the mechanism with which 94 // Handlers can set HTTP trailers. 95 // 96 // Changing the header map after a call to WriteHeader (or 97 // Write) has no effect unless the modified headers are 98 // trailers. 99 // 100 // There are two ways to set Trailers. The preferred way is to 101 // predeclare in the headers which trailers you will later 102 // send by setting the "Trailer" header to the names of the 103 // trailer keys which will come later. In this case, those 104 // keys of the Header map are treated as if they were 105 // trailers. See the example. The second way, for trailer 106 // keys not known to the Handler until after the first Write, 107 // is to prefix the Header map keys with the TrailerPrefix 108 // constant value. See TrailerPrefix. 109 // 110 // To suppress implicit response headers (such as "Date"), set 111 // their value to nil. 112 Header() Header 113 114 // Write writes the data to the connection as part of an HTTP reply. 115 // 116 // If WriteHeader has not yet been called, Write calls 117 // WriteHeader(http.StatusOK) before writing the data. If the Header 118 // does not contain a Content-Type line, Write adds a Content-Type set 119 // to the result of passing the initial 512 bytes of written data to 120 // DetectContentType. 121 // 122 // Depending on the HTTP protocol version and the client, calling 123 // Write or WriteHeader may prevent future reads on the 124 // Request.Body. For HTTP/1.x requests, handlers should read any 125 // needed request body data before writing the response. Once the 126 // headers have been flushed (due to either an explicit Flusher.Flush 127 // call or writing enough data to trigger a flush), the request body 128 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 129 // handlers to continue to read the request body while concurrently 130 // writing the response. However, such behavior may not be supported 131 // by all HTTP/2 clients. Handlers should read before writing if 132 // possible to maximize compatibility. 133 Write([]byte) (int, error) 134 135 // WriteHeader sends an HTTP response header with status code. 136 // If WriteHeader is not called explicitly, the first call to Write 137 // will trigger an implicit WriteHeader(http.StatusOK). 138 // Thus explicit calls to WriteHeader are mainly used to 139 // send error codes. 140 WriteHeader(int) 141 } 142 143 // The Flusher interface is implemented by ResponseWriters that allow 144 // an HTTP handler to flush buffered data to the client. 145 // 146 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 147 // support Flusher, but ResponseWriter wrappers may not. Handlers 148 // should always test for this ability at runtime. 149 // 150 // Note that even for ResponseWriters that support Flush, 151 // if the client is connected through an HTTP proxy, 152 // the buffered data may not reach the client until the response 153 // completes. 154 type Flusher interface { 155 // Flush sends any buffered data to the client. 156 Flush() 157 } 158 159 // The Hijacker interface is implemented by ResponseWriters that allow 160 // an HTTP handler to take over the connection. 161 // 162 // The default ResponseWriter for HTTP/1.x connections supports 163 // Hijacker, but HTTP/2 connections intentionally do not. 164 // ResponseWriter wrappers may also not support Hijacker. Handlers 165 // should always test for this ability at runtime. 166 type Hijacker interface { 167 // Hijack lets the caller take over the connection. 168 // After a call to Hijack the HTTP server library 169 // will not do anything else with the connection. 170 // 171 // It becomes the caller's responsibility to manage 172 // and close the connection. 173 // 174 // The returned net.Conn may have read or write deadlines 175 // already set, depending on the configuration of the 176 // Server. It is the caller's responsibility to set 177 // or clear those deadlines as needed. 178 // 179 // The returned bufio.Reader may contain unprocessed buffered 180 // data from the client. 181 // 182 // After a call to Hijack, the original Request.Body should 183 // not be used. 184 Hijack() (net.Conn, *bufio.ReadWriter, error) 185 } 186 187 // The CloseNotifier interface is implemented by ResponseWriters which 188 // allow detecting when the underlying connection has gone away. 189 // 190 // This mechanism can be used to cancel long operations on the server 191 // if the client has disconnected before the response is ready. 192 type CloseNotifier interface { 193 // CloseNotify returns a channel that receives at most a 194 // single value (true) when the client connection has gone 195 // away. 196 // 197 // CloseNotify may wait to notify until Request.Body has been 198 // fully read. 199 // 200 // After the Handler has returned, there is no guarantee 201 // that the channel receives a value. 202 // 203 // If the protocol is HTTP/1.1 and CloseNotify is called while 204 // processing an idempotent request (such a GET) while 205 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 206 // pipelined request may cause a value to be sent on the 207 // returned channel. In practice HTTP/1.1 pipelining is not 208 // enabled in browsers and not seen often in the wild. If this 209 // is a problem, use HTTP/2 or only use CloseNotify on methods 210 // such as POST. 211 CloseNotify() <-chan bool 212 } 213 214 var ( 215 // ServerContextKey is a context key. It can be used in HTTP 216 // handlers with context.WithValue to access the server that 217 // started the handler. The associated value will be of 218 // type *Server. 219 ServerContextKey = &contextKey{"http-server"} 220 221 // LocalAddrContextKey is a context key. It can be used in 222 // HTTP handlers with context.WithValue to access the address 223 // the local address the connection arrived on. 224 // The associated value will be of type net.Addr. 225 LocalAddrContextKey = &contextKey{"local-addr"} 226 ) 227 228 // A conn represents the server side of an HTTP connection. 229 type conn struct { 230 // server is the server on which the connection arrived. 231 // Immutable; never nil. 232 server *Server 233 234 // cancelCtx cancels the connection-level context. 235 cancelCtx context.CancelFunc 236 237 // rwc is the underlying network connection. 238 // This is never wrapped by other types and is the value given out 239 // to CloseNotifier callers. It is usually of type *net.TCPConn or 240 // *tls.Conn. 241 rwc net.Conn 242 243 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 244 // inside the Listener's Accept goroutine, as some implementations block. 245 // It is populated immediately inside the (*conn).serve goroutine. 246 // This is the value of a Handler's (*Request).RemoteAddr. 247 remoteAddr string 248 249 // tlsState is the TLS connection state when using TLS. 250 // nil means not TLS. 251 tlsState *tls.ConnectionState 252 253 // werr is set to the first write error to rwc. 254 // It is set via checkConnErrorWriter{w}, where bufw writes. 255 werr error 256 257 // r is bufr's read source. It's a wrapper around rwc that provides 258 // io.LimitedReader-style limiting (while reading request headers) 259 // and functionality to support CloseNotifier. See *connReader docs. 260 r *connReader 261 262 // bufr reads from r. 263 bufr *bufio.Reader 264 265 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 266 bufw *bufio.Writer 267 268 // lastMethod is the method of the most recent request 269 // on this connection, if any. 270 lastMethod string 271 272 curReq atomic.Value // of *response (which has a Request in it) 273 274 curState atomic.Value // of ConnState 275 276 // mu guards hijackedv 277 mu sync.Mutex 278 279 // hijackedv is whether this connection has been hijacked 280 // by a Handler with the Hijacker interface. 281 // It is guarded by mu. 282 hijackedv bool 283 } 284 285 func (c *conn) hijacked() bool { 286 c.mu.Lock() 287 defer c.mu.Unlock() 288 return c.hijackedv 289 } 290 291 // c.mu must be held. 292 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 293 if c.hijackedv { 294 return nil, nil, ErrHijacked 295 } 296 c.r.abortPendingRead() 297 298 c.hijackedv = true 299 rwc = c.rwc 300 rwc.SetDeadline(time.Time{}) 301 302 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 303 if c.r.hasByte { 304 if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { 305 return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) 306 } 307 } 308 c.setState(rwc, StateHijacked) 309 return 310 } 311 312 // This should be >= 512 bytes for DetectContentType, 313 // but otherwise it's somewhat arbitrary. 314 const bufferBeforeChunkingSize = 2048 315 316 // chunkWriter writes to a response's conn buffer, and is the writer 317 // wrapped by the response.bufw buffered writer. 318 // 319 // chunkWriter also is responsible for finalizing the Header, including 320 // conditionally setting the Content-Type and setting a Content-Length 321 // in cases where the handler's final output is smaller than the buffer 322 // size. It also conditionally adds chunk headers, when in chunking mode. 323 // 324 // See the comment above (*response).Write for the entire write flow. 325 type chunkWriter struct { 326 res *response 327 328 // header is either nil or a deep clone of res.handlerHeader 329 // at the time of res.WriteHeader, if res.WriteHeader is 330 // called and extra buffering is being done to calculate 331 // Content-Type and/or Content-Length. 332 header Header 333 334 // wroteHeader tells whether the header's been written to "the 335 // wire" (or rather: w.conn.buf). this is unlike 336 // (*response).wroteHeader, which tells only whether it was 337 // logically written. 338 wroteHeader bool 339 340 // set by the writeHeader method: 341 chunking bool // using chunked transfer encoding for reply body 342 } 343 344 var ( 345 crlf = []byte("\r\n") 346 colonSpace = []byte(": ") 347 ) 348 349 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 350 if !cw.wroteHeader { 351 cw.writeHeader(p) 352 } 353 if cw.res.req.Method == "HEAD" { 354 // Eat writes. 355 return len(p), nil 356 } 357 if cw.chunking { 358 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 359 if err != nil { 360 cw.res.conn.rwc.Close() 361 return 362 } 363 } 364 n, err = cw.res.conn.bufw.Write(p) 365 if cw.chunking && err == nil { 366 _, err = cw.res.conn.bufw.Write(crlf) 367 } 368 if err != nil { 369 cw.res.conn.rwc.Close() 370 } 371 return 372 } 373 374 func (cw *chunkWriter) flush() { 375 if !cw.wroteHeader { 376 cw.writeHeader(nil) 377 } 378 cw.res.conn.bufw.Flush() 379 } 380 381 func (cw *chunkWriter) close() { 382 if !cw.wroteHeader { 383 cw.writeHeader(nil) 384 } 385 if cw.chunking { 386 bw := cw.res.conn.bufw // conn's bufio writer 387 // zero chunk to mark EOF 388 bw.WriteString("0\r\n") 389 if trailers := cw.res.finalTrailers(); trailers != nil { 390 trailers.Write(bw) // the writer handles noting errors 391 } 392 // final blank line after the trailers (whether 393 // present or not) 394 bw.WriteString("\r\n") 395 } 396 } 397 398 // A response represents the server side of an HTTP response. 399 type response struct { 400 conn *conn 401 req *Request // request for this response 402 reqBody io.ReadCloser 403 cancelCtx context.CancelFunc // when ServeHTTP exits 404 wroteHeader bool // reply header has been (logically) written 405 wroteContinue bool // 100 Continue response was written 406 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 407 wantsClose bool // HTTP request has Connection "close" 408 409 w *bufio.Writer // buffers output in chunks to chunkWriter 410 cw chunkWriter 411 412 // handlerHeader is the Header that Handlers get access to, 413 // which may be retained and mutated even after WriteHeader. 414 // handlerHeader is copied into cw.header at WriteHeader 415 // time, and privately mutated thereafter. 416 handlerHeader Header 417 calledHeader bool // handler accessed handlerHeader via Header 418 419 written int64 // number of bytes written in body 420 contentLength int64 // explicitly-declared Content-Length; or -1 421 status int // status code passed to WriteHeader 422 423 // close connection after this reply. set on request and 424 // updated after response from handler if there's a 425 // "Connection: keep-alive" response header and a 426 // Content-Length. 427 closeAfterReply bool 428 429 // requestBodyLimitHit is set by requestTooLarge when 430 // maxBytesReader hits its max size. It is checked in 431 // WriteHeader, to make sure we don't consume the 432 // remaining request body to try to advance to the next HTTP 433 // request. Instead, when this is set, we stop reading 434 // subsequent requests on this connection and stop reading 435 // input from it. 436 requestBodyLimitHit bool 437 438 // trailers are the headers to be sent after the handler 439 // finishes writing the body. This field is initialized from 440 // the Trailer response header when the response header is 441 // written. 442 trailers []string 443 444 handlerDone atomicBool // set true when the handler exits 445 446 // Buffers for Date, Content-Length, and status code 447 dateBuf [len(TimeFormat)]byte 448 clenBuf [10]byte 449 statusBuf [3]byte 450 451 // closeNotifyCh is the channel returned by CloseNotify. 452 // TODO(bradfitz): this is currently (for Go 1.8) always 453 // non-nil. Make this lazily-created again as it used to be? 454 closeNotifyCh chan bool 455 didCloseNotify int32 // atomic (only 0->1 winner should send) 456 } 457 458 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys 459 // that, if present, signals that the map entry is actually for 460 // the response trailers, and not the response headers. The prefix 461 // is stripped after the ServeHTTP call finishes and the values are 462 // sent in the trailers. 463 // 464 // This mechanism is intended only for trailers that are not known 465 // prior to the headers being written. If the set of trailers is fixed 466 // or known before the header is written, the normal Go trailers mechanism 467 // is preferred: 468 // https://golang.org/pkg/net/http/#ResponseWriter 469 // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 470 const TrailerPrefix = "Trailer:" 471 472 // finalTrailers is called after the Handler exits and returns a non-nil 473 // value if the Handler set any trailers. 474 func (w *response) finalTrailers() Header { 475 var t Header 476 for k, vv := range w.handlerHeader { 477 if strings.HasPrefix(k, TrailerPrefix) { 478 if t == nil { 479 t = make(Header) 480 } 481 t[strings.TrimPrefix(k, TrailerPrefix)] = vv 482 } 483 } 484 for _, k := range w.trailers { 485 if t == nil { 486 t = make(Header) 487 } 488 for _, v := range w.handlerHeader[k] { 489 t.Add(k, v) 490 } 491 } 492 return t 493 } 494 495 type atomicBool int32 496 497 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 498 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 499 500 // declareTrailer is called for each Trailer header when the 501 // response header is written. It notes that a header will need to be 502 // written in the trailers at the end of the response. 503 func (w *response) declareTrailer(k string) { 504 k = CanonicalHeaderKey(k) 505 switch k { 506 case "Transfer-Encoding", "Content-Length", "Trailer": 507 // Forbidden by RFC 2616 14.40. 508 return 509 } 510 w.trailers = append(w.trailers, k) 511 } 512 513 // requestTooLarge is called by maxBytesReader when too much input has 514 // been read from the client. 515 func (w *response) requestTooLarge() { 516 w.closeAfterReply = true 517 w.requestBodyLimitHit = true 518 if !w.wroteHeader { 519 w.Header().Set("Connection", "close") 520 } 521 } 522 523 // needsSniff reports whether a Content-Type still needs to be sniffed. 524 func (w *response) needsSniff() bool { 525 _, haveType := w.handlerHeader["Content-Type"] 526 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 527 } 528 529 // writerOnly hides an io.Writer value's optional ReadFrom method 530 // from io.Copy. 531 type writerOnly struct { 532 io.Writer 533 } 534 535 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 536 switch v := src.(type) { 537 case *os.File: 538 fi, err := v.Stat() 539 if err != nil { 540 return false, err 541 } 542 return fi.Mode().IsRegular(), nil 543 case *io.LimitedReader: 544 return srcIsRegularFile(v.R) 545 default: 546 return 547 } 548 } 549 550 // ReadFrom is here to optimize copying from an *os.File regular file 551 // to a *net.TCPConn with sendfile. 552 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 553 // Our underlying w.conn.rwc is usually a *TCPConn (with its 554 // own ReadFrom method). If not, or if our src isn't a regular 555 // file, just fall back to the normal copy method. 556 rf, ok := w.conn.rwc.(io.ReaderFrom) 557 regFile, err := srcIsRegularFile(src) 558 if err != nil { 559 return 0, err 560 } 561 if !ok || !regFile { 562 bufp := copyBufPool.Get().(*[]byte) 563 defer copyBufPool.Put(bufp) 564 return io.CopyBuffer(writerOnly{w}, src, *bufp) 565 } 566 567 // sendfile path: 568 569 if !w.wroteHeader { 570 w.WriteHeader(StatusOK) 571 } 572 573 if w.needsSniff() { 574 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 575 n += n0 576 if err != nil { 577 return n, err 578 } 579 } 580 581 w.w.Flush() // get rid of any previous writes 582 w.cw.flush() // make sure Header is written; flush data to rwc 583 584 // Now that cw has been flushed, its chunking field is guaranteed initialized. 585 if !w.cw.chunking && w.bodyAllowed() { 586 n0, err := rf.ReadFrom(src) 587 n += n0 588 w.written += n0 589 return n, err 590 } 591 592 n0, err := io.Copy(writerOnly{w}, src) 593 n += n0 594 return n, err 595 } 596 597 // debugServerConnections controls whether all server connections are wrapped 598 // with a verbose logging wrapper. 599 const debugServerConnections = false 600 601 // Create new connection from rwc. 602 func (srv *Server) newConn(rwc net.Conn) *conn { 603 c := &conn{ 604 server: srv, 605 rwc: rwc, 606 } 607 if debugServerConnections { 608 c.rwc = newLoggingConn("server", c.rwc) 609 } 610 return c 611 } 612 613 type readResult struct { 614 n int 615 err error 616 b byte // byte read, if n == 1 617 } 618 619 // connReader is the io.Reader wrapper used by *conn. It combines a 620 // selectively-activated io.LimitedReader (to bound request header 621 // read sizes) with support for selectively keeping an io.Reader.Read 622 // call blocked in a background goroutine to wait for activity and 623 // trigger a CloseNotifier channel. 624 type connReader struct { 625 conn *conn 626 627 mu sync.Mutex // guards following 628 hasByte bool 629 byteBuf [1]byte 630 cond *sync.Cond 631 inRead bool 632 aborted bool // set true before conn.rwc deadline is set to past 633 remain int64 // bytes remaining 634 } 635 636 func (cr *connReader) lock() { 637 cr.mu.Lock() 638 if cr.cond == nil { 639 cr.cond = sync.NewCond(&cr.mu) 640 } 641 } 642 643 func (cr *connReader) unlock() { cr.mu.Unlock() } 644 645 func (cr *connReader) startBackgroundRead() { 646 cr.lock() 647 defer cr.unlock() 648 if cr.inRead { 649 panic("invalid concurrent Body.Read call") 650 } 651 if cr.hasByte { 652 return 653 } 654 cr.inRead = true 655 cr.conn.rwc.SetReadDeadline(time.Time{}) 656 go cr.backgroundRead() 657 } 658 659 func (cr *connReader) backgroundRead() { 660 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 661 cr.lock() 662 if n == 1 { 663 cr.hasByte = true 664 // We were at EOF already (since we wouldn't be in a 665 // background read otherwise), so this is a pipelined 666 // HTTP request. 667 cr.closeNotifyFromPipelinedRequest() 668 } 669 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 670 // Ignore this error. It's the expected error from 671 // another goroutine calling abortPendingRead. 672 } else if err != nil { 673 cr.handleReadError(err) 674 } 675 cr.aborted = false 676 cr.inRead = false 677 cr.unlock() 678 cr.cond.Broadcast() 679 } 680 681 func (cr *connReader) abortPendingRead() { 682 cr.lock() 683 defer cr.unlock() 684 if !cr.inRead { 685 return 686 } 687 cr.aborted = true 688 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 689 for cr.inRead { 690 cr.cond.Wait() 691 } 692 cr.conn.rwc.SetReadDeadline(time.Time{}) 693 } 694 695 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 696 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 697 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 698 699 // may be called from multiple goroutines. 700 func (cr *connReader) handleReadError(err error) { 701 cr.conn.cancelCtx() 702 cr.closeNotify() 703 } 704 705 // closeNotifyFromPipelinedRequest simply calls closeNotify. 706 // 707 // This method wrapper is here for documentation. The callers are the 708 // cases where we send on the closenotify channel because of a 709 // pipelined HTTP request, per the previous Go behavior and 710 // documentation (that this "MAY" happen). 711 // 712 // TODO: consider changing this behavior and making context 713 // cancelation and closenotify work the same. 714 func (cr *connReader) closeNotifyFromPipelinedRequest() { 715 cr.closeNotify() 716 } 717 718 // may be called from multiple goroutines. 719 func (cr *connReader) closeNotify() { 720 res, _ := cr.conn.curReq.Load().(*response) 721 if res != nil { 722 if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 723 res.closeNotifyCh <- true 724 } 725 } 726 } 727 728 func (cr *connReader) Read(p []byte) (n int, err error) { 729 cr.lock() 730 if cr.inRead { 731 cr.unlock() 732 if cr.conn.hijacked() { 733 panic("invalid Body.Read call. After hijacked, the original Request must not be used") 734 } 735 panic("invalid concurrent Body.Read call") 736 } 737 if cr.hitReadLimit() { 738 cr.unlock() 739 return 0, io.EOF 740 } 741 if len(p) == 0 { 742 cr.unlock() 743 return 0, nil 744 } 745 if int64(len(p)) > cr.remain { 746 p = p[:cr.remain] 747 } 748 if cr.hasByte { 749 p[0] = cr.byteBuf[0] 750 cr.hasByte = false 751 cr.unlock() 752 return 1, nil 753 } 754 cr.inRead = true 755 cr.unlock() 756 n, err = cr.conn.rwc.Read(p) 757 758 cr.lock() 759 cr.inRead = false 760 if err != nil { 761 cr.handleReadError(err) 762 } 763 cr.remain -= int64(n) 764 cr.unlock() 765 766 cr.cond.Broadcast() 767 return n, err 768 } 769 770 var ( 771 bufioReaderPool sync.Pool 772 bufioWriter2kPool sync.Pool 773 bufioWriter4kPool sync.Pool 774 ) 775 776 var copyBufPool = sync.Pool{ 777 New: func() interface{} { 778 b := make([]byte, 32*1024) 779 return &b 780 }, 781 } 782 783 func bufioWriterPool(size int) *sync.Pool { 784 switch size { 785 case 2 << 10: 786 return &bufioWriter2kPool 787 case 4 << 10: 788 return &bufioWriter4kPool 789 } 790 return nil 791 } 792 793 func newBufioReader(r io.Reader) *bufio.Reader { 794 if v := bufioReaderPool.Get(); v != nil { 795 br := v.(*bufio.Reader) 796 br.Reset(r) 797 return br 798 } 799 // Note: if this reader size is ever changed, update 800 // TestHandlerBodyClose's assumptions. 801 return bufio.NewReader(r) 802 } 803 804 func putBufioReader(br *bufio.Reader) { 805 br.Reset(nil) 806 bufioReaderPool.Put(br) 807 } 808 809 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 810 pool := bufioWriterPool(size) 811 if pool != nil { 812 if v := pool.Get(); v != nil { 813 bw := v.(*bufio.Writer) 814 bw.Reset(w) 815 return bw 816 } 817 } 818 return bufio.NewWriterSize(w, size) 819 } 820 821 func putBufioWriter(bw *bufio.Writer) { 822 bw.Reset(nil) 823 if pool := bufioWriterPool(bw.Available()); pool != nil { 824 pool.Put(bw) 825 } 826 } 827 828 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 829 // in an HTTP request. 830 // This can be overridden by setting Server.MaxHeaderBytes. 831 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 832 833 func (srv *Server) maxHeaderBytes() int { 834 if srv.MaxHeaderBytes > 0 { 835 return srv.MaxHeaderBytes 836 } 837 return DefaultMaxHeaderBytes 838 } 839 840 func (srv *Server) initialReadLimitSize() int64 { 841 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 842 } 843 844 // wrapper around io.ReadCloser which on first read, sends an 845 // HTTP/1.1 100 Continue header 846 type expectContinueReader struct { 847 resp *response 848 readCloser io.ReadCloser 849 closed bool 850 sawEOF bool 851 } 852 853 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 854 if ecr.closed { 855 return 0, ErrBodyReadAfterClose 856 } 857 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 858 ecr.resp.wroteContinue = true 859 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 860 ecr.resp.conn.bufw.Flush() 861 } 862 n, err = ecr.readCloser.Read(p) 863 if err == io.EOF { 864 ecr.sawEOF = true 865 } 866 return 867 } 868 869 func (ecr *expectContinueReader) Close() error { 870 ecr.closed = true 871 return ecr.readCloser.Close() 872 } 873 874 // TimeFormat is the time format to use when generating times in HTTP 875 // headers. It is like time.RFC1123 but hard-codes GMT as the time 876 // zone. The time being formatted must be in UTC for Format to 877 // generate the correct format. 878 // 879 // For parsing this time format, see ParseTime. 880 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 881 882 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 883 func appendTime(b []byte, t time.Time) []byte { 884 const days = "SunMonTueWedThuFriSat" 885 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 886 887 t = t.UTC() 888 yy, mm, dd := t.Date() 889 hh, mn, ss := t.Clock() 890 day := days[3*t.Weekday():] 891 mon := months[3*(mm-1):] 892 893 return append(b, 894 day[0], day[1], day[2], ',', ' ', 895 byte('0'+dd/10), byte('0'+dd%10), ' ', 896 mon[0], mon[1], mon[2], ' ', 897 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 898 byte('0'+hh/10), byte('0'+hh%10), ':', 899 byte('0'+mn/10), byte('0'+mn%10), ':', 900 byte('0'+ss/10), byte('0'+ss%10), ' ', 901 'G', 'M', 'T') 902 } 903 904 var errTooLarge = errors.New("http: request too large") 905 906 // Read next request from connection. 907 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 908 if c.hijacked() { 909 return nil, ErrHijacked 910 } 911 912 var ( 913 wholeReqDeadline time.Time // or zero if none 914 hdrDeadline time.Time // or zero if none 915 ) 916 t0 := time.Now() 917 if d := c.server.readHeaderTimeout(); d != 0 { 918 hdrDeadline = t0.Add(d) 919 } 920 if d := c.server.ReadTimeout; d != 0 { 921 wholeReqDeadline = t0.Add(d) 922 } 923 c.rwc.SetReadDeadline(hdrDeadline) 924 if d := c.server.WriteTimeout; d != 0 { 925 defer func() { 926 c.rwc.SetWriteDeadline(time.Now().Add(d)) 927 }() 928 } 929 930 c.r.setReadLimit(c.server.initialReadLimitSize()) 931 if c.lastMethod == "POST" { 932 // RFC 2616 section 4.1 tolerance for old buggy clients. 933 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 934 c.bufr.Discard(numLeadingCRorLF(peek)) 935 } 936 req, err := readRequest(c.bufr, keepHostHeader) 937 if err != nil { 938 if c.r.hitReadLimit() { 939 return nil, errTooLarge 940 } 941 return nil, err 942 } 943 944 if !http1ServerSupportsRequest(req) { 945 return nil, badRequestError("unsupported protocol version") 946 } 947 948 c.lastMethod = req.Method 949 c.r.setInfiniteReadLimit() 950 951 hosts, haveHost := req.Header["Host"] 952 isH2Upgrade := req.isH2Upgrade() 953 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { 954 return nil, badRequestError("missing required Host header") 955 } 956 if len(hosts) > 1 { 957 return nil, badRequestError("too many Host headers") 958 } 959 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 960 return nil, badRequestError("malformed Host header") 961 } 962 for k, vv := range req.Header { 963 if !httplex.ValidHeaderFieldName(k) { 964 return nil, badRequestError("invalid header name") 965 } 966 for _, v := range vv { 967 if !httplex.ValidHeaderFieldValue(v) { 968 return nil, badRequestError("invalid header value") 969 } 970 } 971 } 972 delete(req.Header, "Host") 973 974 ctx, cancelCtx := context.WithCancel(ctx) 975 req.ctx = ctx 976 req.RemoteAddr = c.remoteAddr 977 req.TLS = c.tlsState 978 if body, ok := req.Body.(*body); ok { 979 body.doEarlyClose = true 980 } 981 982 // Adjust the read deadline if necessary. 983 if !hdrDeadline.Equal(wholeReqDeadline) { 984 c.rwc.SetReadDeadline(wholeReqDeadline) 985 } 986 987 w = &response{ 988 conn: c, 989 cancelCtx: cancelCtx, 990 req: req, 991 reqBody: req.Body, 992 handlerHeader: make(Header), 993 contentLength: -1, 994 closeNotifyCh: make(chan bool, 1), 995 996 // We populate these ahead of time so we're not 997 // reading from req.Header after their Handler starts 998 // and maybe mutates it (Issue 14940) 999 wants10KeepAlive: req.wantsHttp10KeepAlive(), 1000 wantsClose: req.wantsClose(), 1001 } 1002 if isH2Upgrade { 1003 w.closeAfterReply = true 1004 } 1005 w.cw.res = w 1006 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 1007 return w, nil 1008 } 1009 1010 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 1011 // supports the given request. 1012 func http1ServerSupportsRequest(req *Request) bool { 1013 if req.ProtoMajor == 1 { 1014 return true 1015 } 1016 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 1017 // wire up their own HTTP/2 upgrades. 1018 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 1019 req.Method == "PRI" && req.RequestURI == "*" { 1020 return true 1021 } 1022 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 1023 // aren't encoded in ASCII anyway). 1024 return false 1025 } 1026 1027 func (w *response) Header() Header { 1028 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 1029 // Accessing the header between logically writing it 1030 // and physically writing it means we need to allocate 1031 // a clone to snapshot the logically written state. 1032 w.cw.header = w.handlerHeader.clone() 1033 } 1034 w.calledHeader = true 1035 return w.handlerHeader 1036 } 1037 1038 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 1039 // consumed by a handler that the server will read from the client 1040 // in order to keep a connection alive. If there are more bytes than 1041 // this then the server to be paranoid instead sends a "Connection: 1042 // close" response. 1043 // 1044 // This number is approximately what a typical machine's TCP buffer 1045 // size is anyway. (if we have the bytes on the machine, we might as 1046 // well read them) 1047 const maxPostHandlerReadBytes = 256 << 10 1048 1049 func (w *response) WriteHeader(code int) { 1050 if w.conn.hijacked() { 1051 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 1052 return 1053 } 1054 if w.wroteHeader { 1055 w.conn.server.logf("http: multiple response.WriteHeader calls") 1056 return 1057 } 1058 w.wroteHeader = true 1059 w.status = code 1060 1061 if w.calledHeader && w.cw.header == nil { 1062 w.cw.header = w.handlerHeader.clone() 1063 } 1064 1065 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1066 v, err := strconv.ParseInt(cl, 10, 64) 1067 if err == nil && v >= 0 { 1068 w.contentLength = v 1069 } else { 1070 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1071 w.handlerHeader.Del("Content-Length") 1072 } 1073 } 1074 } 1075 1076 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1077 // This type is used to avoid extra allocations from cloning and/or populating 1078 // the response Header map and all its 1-element slices. 1079 type extraHeader struct { 1080 contentType string 1081 connection string 1082 transferEncoding string 1083 date []byte // written if not nil 1084 contentLength []byte // written if not nil 1085 } 1086 1087 // Sorted the same as extraHeader.Write's loop. 1088 var extraHeaderKeys = [][]byte{ 1089 []byte("Content-Type"), 1090 []byte("Connection"), 1091 []byte("Transfer-Encoding"), 1092 } 1093 1094 var ( 1095 headerContentLength = []byte("Content-Length: ") 1096 headerDate = []byte("Date: ") 1097 ) 1098 1099 // Write writes the headers described in h to w. 1100 // 1101 // This method has a value receiver, despite the somewhat large size 1102 // of h, because it prevents an allocation. The escape analysis isn't 1103 // smart enough to realize this function doesn't mutate h. 1104 func (h extraHeader) Write(w *bufio.Writer) { 1105 if h.date != nil { 1106 w.Write(headerDate) 1107 w.Write(h.date) 1108 w.Write(crlf) 1109 } 1110 if h.contentLength != nil { 1111 w.Write(headerContentLength) 1112 w.Write(h.contentLength) 1113 w.Write(crlf) 1114 } 1115 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1116 if v != "" { 1117 w.Write(extraHeaderKeys[i]) 1118 w.Write(colonSpace) 1119 w.WriteString(v) 1120 w.Write(crlf) 1121 } 1122 } 1123 } 1124 1125 // writeHeader finalizes the header sent to the client and writes it 1126 // to cw.res.conn.bufw. 1127 // 1128 // p is not written by writeHeader, but is the first chunk of the body 1129 // that will be written. It is sniffed for a Content-Type if none is 1130 // set explicitly. It's also used to set the Content-Length, if the 1131 // total body size was small and the handler has already finished 1132 // running. 1133 func (cw *chunkWriter) writeHeader(p []byte) { 1134 if cw.wroteHeader { 1135 return 1136 } 1137 cw.wroteHeader = true 1138 1139 w := cw.res 1140 keepAlivesEnabled := w.conn.server.doKeepAlives() 1141 isHEAD := w.req.Method == "HEAD" 1142 1143 // header is written out to w.conn.buf below. Depending on the 1144 // state of the handler, we either own the map or not. If we 1145 // don't own it, the exclude map is created lazily for 1146 // WriteSubset to remove headers. The setHeader struct holds 1147 // headers we need to add. 1148 header := cw.header 1149 owned := header != nil 1150 if !owned { 1151 header = w.handlerHeader 1152 } 1153 var excludeHeader map[string]bool 1154 delHeader := func(key string) { 1155 if owned { 1156 header.Del(key) 1157 return 1158 } 1159 if _, ok := header[key]; !ok { 1160 return 1161 } 1162 if excludeHeader == nil { 1163 excludeHeader = make(map[string]bool) 1164 } 1165 excludeHeader[key] = true 1166 } 1167 var setHeader extraHeader 1168 1169 // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. 1170 trailers := false 1171 for k := range cw.header { 1172 if strings.HasPrefix(k, TrailerPrefix) { 1173 if excludeHeader == nil { 1174 excludeHeader = make(map[string]bool) 1175 } 1176 excludeHeader[k] = true 1177 trailers = true 1178 } 1179 } 1180 for _, v := range cw.header["Trailer"] { 1181 trailers = true 1182 foreachHeaderElement(v, cw.res.declareTrailer) 1183 } 1184 1185 te := header.get("Transfer-Encoding") 1186 hasTE := te != "" 1187 1188 // If the handler is done but never sent a Content-Length 1189 // response header and this is our first (and last) write, set 1190 // it, even to zero. This helps HTTP/1.0 clients keep their 1191 // "keep-alive" connections alive. 1192 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1193 // it was a HEAD request, we don't know the difference between 1194 // 0 actual bytes and 0 bytes because the handler noticed it 1195 // was a HEAD request and chose not to write anything. So for 1196 // HEAD, the handler should either write the Content-Length or 1197 // write non-zero bytes. If it's actually 0 bytes and the 1198 // handler never looked at the Request.Method, we just don't 1199 // send a Content-Length header. 1200 // Further, we don't send an automatic Content-Length if they 1201 // set a Transfer-Encoding, because they're generally incompatible. 1202 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1203 w.contentLength = int64(len(p)) 1204 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1205 } 1206 1207 // If this was an HTTP/1.0 request with keep-alive and we sent a 1208 // Content-Length back, we can make this a keep-alive response ... 1209 if w.wants10KeepAlive && keepAlivesEnabled { 1210 sentLength := header.get("Content-Length") != "" 1211 if sentLength && header.get("Connection") == "keep-alive" { 1212 w.closeAfterReply = false 1213 } 1214 } 1215 1216 // Check for a explicit (and valid) Content-Length header. 1217 hasCL := w.contentLength != -1 1218 1219 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1220 _, connectionHeaderSet := header["Connection"] 1221 if !connectionHeaderSet { 1222 setHeader.connection = "keep-alive" 1223 } 1224 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1225 w.closeAfterReply = true 1226 } 1227 1228 if header.get("Connection") == "close" || !keepAlivesEnabled { 1229 w.closeAfterReply = true 1230 } 1231 1232 // If the client wanted a 100-continue but we never sent it to 1233 // them (or, more strictly: we never finished reading their 1234 // request body), don't reuse this connection because it's now 1235 // in an unknown state: we might be sending this response at 1236 // the same time the client is now sending its request body 1237 // after a timeout. (Some HTTP clients send Expect: 1238 // 100-continue but knowing that some servers don't support 1239 // it, the clients set a timer and send the body later anyway) 1240 // If we haven't seen EOF, we can't skip over the unread body 1241 // because we don't know if the next bytes on the wire will be 1242 // the body-following-the-timer or the subsequent request. 1243 // See Issue 11549. 1244 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1245 w.closeAfterReply = true 1246 } 1247 1248 // Per RFC 2616, we should consume the request body before 1249 // replying, if the handler hasn't already done so. But we 1250 // don't want to do an unbounded amount of reading here for 1251 // DoS reasons, so we only try up to a threshold. 1252 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1253 // about HTTP/1.x Handlers concurrently reading and writing, like 1254 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1255 if w.req.ContentLength != 0 && !w.closeAfterReply { 1256 var discard, tooBig bool 1257 1258 switch bdy := w.req.Body.(type) { 1259 case *expectContinueReader: 1260 if bdy.resp.wroteContinue { 1261 discard = true 1262 } 1263 case *body: 1264 bdy.mu.Lock() 1265 switch { 1266 case bdy.closed: 1267 if !bdy.sawEOF { 1268 // Body was closed in handler with non-EOF error. 1269 w.closeAfterReply = true 1270 } 1271 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1272 tooBig = true 1273 default: 1274 discard = true 1275 } 1276 bdy.mu.Unlock() 1277 default: 1278 discard = true 1279 } 1280 1281 if discard { 1282 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1283 switch err { 1284 case nil: 1285 // There must be even more data left over. 1286 tooBig = true 1287 case ErrBodyReadAfterClose: 1288 // Body was already consumed and closed. 1289 case io.EOF: 1290 // The remaining body was just consumed, close it. 1291 err = w.reqBody.Close() 1292 if err != nil { 1293 w.closeAfterReply = true 1294 } 1295 default: 1296 // Some other kind of error occurred, like a read timeout, or 1297 // corrupt chunked encoding. In any case, whatever remains 1298 // on the wire must not be parsed as another HTTP request. 1299 w.closeAfterReply = true 1300 } 1301 } 1302 1303 if tooBig { 1304 w.requestTooLarge() 1305 delHeader("Connection") 1306 setHeader.connection = "close" 1307 } 1308 } 1309 1310 code := w.status 1311 if bodyAllowedForStatus(code) { 1312 // If no content type, apply sniffing algorithm to body. 1313 _, haveType := header["Content-Type"] 1314 if !haveType && !hasTE { 1315 setHeader.contentType = DetectContentType(p) 1316 } 1317 } else { 1318 for _, k := range suppressedHeaders(code) { 1319 delHeader(k) 1320 } 1321 } 1322 1323 if _, ok := header["Date"]; !ok { 1324 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1325 } 1326 1327 if hasCL && hasTE && te != "identity" { 1328 // TODO: return an error if WriteHeader gets a return parameter 1329 // For now just ignore the Content-Length. 1330 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1331 te, w.contentLength) 1332 delHeader("Content-Length") 1333 hasCL = false 1334 } 1335 1336 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1337 // do nothing 1338 } else if code == StatusNoContent { 1339 delHeader("Transfer-Encoding") 1340 } else if hasCL { 1341 delHeader("Transfer-Encoding") 1342 } else if w.req.ProtoAtLeast(1, 1) { 1343 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1344 // content-length has been provided. The connection must be closed after the 1345 // reply is written, and no chunking is to be done. This is the setup 1346 // recommended in the Server-Sent Events candidate recommendation 11, 1347 // section 8. 1348 if hasTE && te == "identity" { 1349 cw.chunking = false 1350 w.closeAfterReply = true 1351 } else { 1352 // HTTP/1.1 or greater: use chunked transfer encoding 1353 // to avoid closing the connection at EOF. 1354 cw.chunking = true 1355 setHeader.transferEncoding = "chunked" 1356 if hasTE && te == "chunked" { 1357 // We will send the chunked Transfer-Encoding header later. 1358 delHeader("Transfer-Encoding") 1359 } 1360 } 1361 } else { 1362 // HTTP version < 1.1: cannot do chunked transfer 1363 // encoding and we don't know the Content-Length so 1364 // signal EOF by closing connection. 1365 w.closeAfterReply = true 1366 delHeader("Transfer-Encoding") // in case already set 1367 } 1368 1369 // Cannot use Content-Length with non-identity Transfer-Encoding. 1370 if cw.chunking { 1371 delHeader("Content-Length") 1372 } 1373 if !w.req.ProtoAtLeast(1, 0) { 1374 return 1375 } 1376 1377 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1378 delHeader("Connection") 1379 if w.req.ProtoAtLeast(1, 1) { 1380 setHeader.connection = "close" 1381 } 1382 } 1383 1384 writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) 1385 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1386 setHeader.Write(w.conn.bufw) 1387 w.conn.bufw.Write(crlf) 1388 } 1389 1390 // foreachHeaderElement splits v according to the "#rule" construction 1391 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1392 func foreachHeaderElement(v string, fn func(string)) { 1393 v = textproto.TrimString(v) 1394 if v == "" { 1395 return 1396 } 1397 if !strings.Contains(v, ",") { 1398 fn(v) 1399 return 1400 } 1401 for _, f := range strings.Split(v, ",") { 1402 if f = textproto.TrimString(f); f != "" { 1403 fn(f) 1404 } 1405 } 1406 } 1407 1408 // writeStatusLine writes an HTTP/1.x Status-Line (RFC 2616 Section 6.1) 1409 // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. 1410 // code is the response status code. 1411 // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. 1412 func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { 1413 if is11 { 1414 bw.WriteString("HTTP/1.1 ") 1415 } else { 1416 bw.WriteString("HTTP/1.0 ") 1417 } 1418 if text, ok := statusText[code]; ok { 1419 bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) 1420 bw.WriteByte(' ') 1421 bw.WriteString(text) 1422 bw.WriteString("\r\n") 1423 } else { 1424 // don't worry about performance 1425 fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) 1426 } 1427 } 1428 1429 // bodyAllowed reports whether a Write is allowed for this response type. 1430 // It's illegal to call this before the header has been flushed. 1431 func (w *response) bodyAllowed() bool { 1432 if !w.wroteHeader { 1433 panic("") 1434 } 1435 return bodyAllowedForStatus(w.status) 1436 } 1437 1438 // The Life Of A Write is like this: 1439 // 1440 // Handler starts. No header has been sent. The handler can either 1441 // write a header, or just start writing. Writing before sending a header 1442 // sends an implicitly empty 200 OK header. 1443 // 1444 // If the handler didn't declare a Content-Length up front, we either 1445 // go into chunking mode or, if the handler finishes running before 1446 // the chunking buffer size, we compute a Content-Length and send that 1447 // in the header instead. 1448 // 1449 // Likewise, if the handler didn't set a Content-Type, we sniff that 1450 // from the initial chunk of output. 1451 // 1452 // The Writers are wired together like: 1453 // 1454 // 1. *response (the ResponseWriter) -> 1455 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1456 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1457 // and which writes the chunk headers, if needed. 1458 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1459 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1460 // and populates c.werr with it if so. but otherwise writes to: 1461 // 6. the rwc, the net.Conn. 1462 // 1463 // TODO(bradfitz): short-circuit some of the buffering when the 1464 // initial header contains both a Content-Type and Content-Length. 1465 // Also short-circuit in (1) when the header's been sent and not in 1466 // chunking mode, writing directly to (4) instead, if (2) has no 1467 // buffered data. More generally, we could short-circuit from (1) to 1468 // (3) even in chunking mode if the write size from (1) is over some 1469 // threshold and nothing is in (2). The answer might be mostly making 1470 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1471 // with this instead. 1472 func (w *response) Write(data []byte) (n int, err error) { 1473 return w.write(len(data), data, "") 1474 } 1475 1476 func (w *response) WriteString(data string) (n int, err error) { 1477 return w.write(len(data), nil, data) 1478 } 1479 1480 // either dataB or dataS is non-zero. 1481 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1482 if w.conn.hijacked() { 1483 if lenData > 0 { 1484 w.conn.server.logf("http: response.Write on hijacked connection") 1485 } 1486 return 0, ErrHijacked 1487 } 1488 if !w.wroteHeader { 1489 w.WriteHeader(StatusOK) 1490 } 1491 if lenData == 0 { 1492 return 0, nil 1493 } 1494 if !w.bodyAllowed() { 1495 return 0, ErrBodyNotAllowed 1496 } 1497 1498 w.written += int64(lenData) // ignoring errors, for errorKludge 1499 if w.contentLength != -1 && w.written > w.contentLength { 1500 return 0, ErrContentLength 1501 } 1502 if dataB != nil { 1503 return w.w.Write(dataB) 1504 } else { 1505 return w.w.WriteString(dataS) 1506 } 1507 } 1508 1509 func (w *response) finishRequest() { 1510 w.handlerDone.setTrue() 1511 1512 if !w.wroteHeader { 1513 w.WriteHeader(StatusOK) 1514 } 1515 1516 w.w.Flush() 1517 putBufioWriter(w.w) 1518 w.cw.close() 1519 w.conn.bufw.Flush() 1520 1521 w.conn.r.abortPendingRead() 1522 1523 // Close the body (regardless of w.closeAfterReply) so we can 1524 // re-use its bufio.Reader later safely. 1525 w.reqBody.Close() 1526 1527 if w.req.MultipartForm != nil { 1528 w.req.MultipartForm.RemoveAll() 1529 } 1530 } 1531 1532 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1533 // It must only be called after the handler is done executing. 1534 func (w *response) shouldReuseConnection() bool { 1535 if w.closeAfterReply { 1536 // The request or something set while executing the 1537 // handler indicated we shouldn't reuse this 1538 // connection. 1539 return false 1540 } 1541 1542 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1543 // Did not write enough. Avoid getting out of sync. 1544 return false 1545 } 1546 1547 // There was some error writing to the underlying connection 1548 // during the request, so don't re-use this conn. 1549 if w.conn.werr != nil { 1550 return false 1551 } 1552 1553 if w.closedRequestBodyEarly() { 1554 return false 1555 } 1556 1557 return true 1558 } 1559 1560 func (w *response) closedRequestBodyEarly() bool { 1561 body, ok := w.req.Body.(*body) 1562 return ok && body.didEarlyClose() 1563 } 1564 1565 func (w *response) Flush() { 1566 if !w.wroteHeader { 1567 w.WriteHeader(StatusOK) 1568 } 1569 w.w.Flush() 1570 w.cw.flush() 1571 } 1572 1573 func (c *conn) finalFlush() { 1574 if c.bufr != nil { 1575 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1576 // reader for a future connection. 1577 putBufioReader(c.bufr) 1578 c.bufr = nil 1579 } 1580 1581 if c.bufw != nil { 1582 c.bufw.Flush() 1583 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1584 // writer for a future connection. 1585 putBufioWriter(c.bufw) 1586 c.bufw = nil 1587 } 1588 } 1589 1590 // Close the connection. 1591 func (c *conn) close() { 1592 c.finalFlush() 1593 c.rwc.Close() 1594 } 1595 1596 // rstAvoidanceDelay is the amount of time we sleep after closing the 1597 // write side of a TCP connection before closing the entire socket. 1598 // By sleeping, we increase the chances that the client sees our FIN 1599 // and processes its final data before they process the subsequent RST 1600 // from closing a connection with known unread data. 1601 // This RST seems to occur mostly on BSD systems. (And Windows?) 1602 // This timeout is somewhat arbitrary (~latency around the planet). 1603 const rstAvoidanceDelay = 500 * time.Millisecond 1604 1605 type closeWriter interface { 1606 CloseWrite() error 1607 } 1608 1609 var _ closeWriter = (*net.TCPConn)(nil) 1610 1611 // closeWrite flushes any outstanding data and sends a FIN packet (if 1612 // client is connected via TCP), signalling that we're done. We then 1613 // pause for a bit, hoping the client processes it before any 1614 // subsequent RST. 1615 // 1616 // See https://golang.org/issue/3595 1617 func (c *conn) closeWriteAndWait() { 1618 c.finalFlush() 1619 if tcp, ok := c.rwc.(closeWriter); ok { 1620 tcp.CloseWrite() 1621 } 1622 time.Sleep(rstAvoidanceDelay) 1623 } 1624 1625 // validNPN reports whether the proto is not a blacklisted Next 1626 // Protocol Negotiation protocol. Empty and built-in protocol types 1627 // are blacklisted and can't be overridden with alternate 1628 // implementations. 1629 func validNPN(proto string) bool { 1630 switch proto { 1631 case "", "http/1.1", "http/1.0": 1632 return false 1633 } 1634 return true 1635 } 1636 1637 func (c *conn) setState(nc net.Conn, state ConnState) { 1638 srv := c.server 1639 switch state { 1640 case StateNew: 1641 srv.trackConn(c, true) 1642 case StateHijacked, StateClosed: 1643 srv.trackConn(c, false) 1644 } 1645 c.curState.Store(connStateInterface[state]) 1646 if hook := srv.ConnState; hook != nil { 1647 hook(nc, state) 1648 } 1649 } 1650 1651 // connStateInterface is an array of the interface{} versions of 1652 // ConnState values, so we can use them in atomic.Values later without 1653 // paying the cost of shoving their integers in an interface{}. 1654 var connStateInterface = [...]interface{}{ 1655 StateNew: StateNew, 1656 StateActive: StateActive, 1657 StateIdle: StateIdle, 1658 StateHijacked: StateHijacked, 1659 StateClosed: StateClosed, 1660 } 1661 1662 // badRequestError is a literal string (used by in the server in HTML, 1663 // unescaped) to tell the user why their request was bad. It should 1664 // be plain text without user info or other embedded errors. 1665 type badRequestError string 1666 1667 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1668 1669 // ErrAbortHandler is a sentinel panic value to abort a handler. 1670 // While any panic from ServeHTTP aborts the response to the client, 1671 // panicking with ErrAbortHandler also suppresses logging of a stack 1672 // trace to the server's error log. 1673 var ErrAbortHandler = errors.New("net/http: abort Handler") 1674 1675 // isCommonNetReadError reports whether err is a common error 1676 // encountered during reading a request off the network when the 1677 // client has gone away or had its read fail somehow. This is used to 1678 // determine which logs are interesting enough to log about. 1679 func isCommonNetReadError(err error) bool { 1680 if err == io.EOF { 1681 return true 1682 } 1683 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1684 return true 1685 } 1686 if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { 1687 return true 1688 } 1689 return false 1690 } 1691 1692 // Serve a new connection. 1693 func (c *conn) serve(ctx context.Context) { 1694 c.remoteAddr = c.rwc.RemoteAddr().String() 1695 ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) 1696 defer func() { 1697 if err := recover(); err != nil && err != ErrAbortHandler { 1698 const size = 64 << 10 1699 buf := make([]byte, size) 1700 buf = buf[:runtime.Stack(buf, false)] 1701 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1702 } 1703 if !c.hijacked() { 1704 c.close() 1705 c.setState(c.rwc, StateClosed) 1706 } 1707 }() 1708 1709 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1710 if d := c.server.ReadTimeout; d != 0 { 1711 c.rwc.SetReadDeadline(time.Now().Add(d)) 1712 } 1713 if d := c.server.WriteTimeout; d != 0 { 1714 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1715 } 1716 if err := tlsConn.Handshake(); err != nil { 1717 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1718 return 1719 } 1720 c.tlsState = new(tls.ConnectionState) 1721 *c.tlsState = tlsConn.ConnectionState() 1722 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1723 if fn := c.server.TLSNextProto[proto]; fn != nil { 1724 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1725 fn(c.server, tlsConn, h) 1726 } 1727 return 1728 } 1729 } 1730 1731 // HTTP/1.x from here on. 1732 1733 ctx, cancelCtx := context.WithCancel(ctx) 1734 c.cancelCtx = cancelCtx 1735 defer cancelCtx() 1736 1737 c.r = &connReader{conn: c} 1738 c.bufr = newBufioReader(c.r) 1739 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1740 1741 for { 1742 w, err := c.readRequest(ctx) 1743 if c.r.remain != c.server.initialReadLimitSize() { 1744 // If we read any bytes off the wire, we're active. 1745 c.setState(c.rwc, StateActive) 1746 } 1747 if err != nil { 1748 const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" 1749 1750 if err == errTooLarge { 1751 // Their HTTP client may or may not be 1752 // able to read this if we're 1753 // responding to them and hanging up 1754 // while they're still writing their 1755 // request. Undefined behavior. 1756 const publicErr = "431 Request Header Fields Too Large" 1757 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1758 c.closeWriteAndWait() 1759 return 1760 } 1761 if isCommonNetReadError(err) { 1762 return // don't reply 1763 } 1764 1765 publicErr := "400 Bad Request" 1766 if v, ok := err.(badRequestError); ok { 1767 publicErr = publicErr + ": " + string(v) 1768 } 1769 1770 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1771 return 1772 } 1773 1774 // Expect 100 Continue support 1775 req := w.req 1776 if req.expectsContinue() { 1777 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1778 // Wrap the Body reader with one that replies on the connection 1779 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1780 } 1781 } else if req.Header.get("Expect") != "" { 1782 w.sendExpectationFailed() 1783 return 1784 } 1785 1786 c.curReq.Store(w) 1787 1788 if requestBodyRemains(req.Body) { 1789 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1790 } else { 1791 if w.conn.bufr.Buffered() > 0 { 1792 w.conn.r.closeNotifyFromPipelinedRequest() 1793 } 1794 w.conn.r.startBackgroundRead() 1795 } 1796 1797 // HTTP cannot have multiple simultaneous active requests.[*] 1798 // Until the server replies to this request, it can't read another, 1799 // so we might as well run the handler in this goroutine. 1800 // [*] Not strictly true: HTTP pipelining. We could let them all process 1801 // in parallel even if their responses need to be serialized. 1802 // But we're not going to implement HTTP pipelining because it 1803 // was never deployed in the wild and the answer is HTTP/2. 1804 serverHandler{c.server}.ServeHTTP(w, w.req) 1805 w.cancelCtx() 1806 if c.hijacked() { 1807 return 1808 } 1809 w.finishRequest() 1810 if !w.shouldReuseConnection() { 1811 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1812 c.closeWriteAndWait() 1813 } 1814 return 1815 } 1816 c.setState(c.rwc, StateIdle) 1817 c.curReq.Store((*response)(nil)) 1818 1819 if !w.conn.server.doKeepAlives() { 1820 // We're in shutdown mode. We might've replied 1821 // to the user without "Connection: close" and 1822 // they might think they can send another 1823 // request, but such is life with HTTP/1.1. 1824 return 1825 } 1826 1827 if d := c.server.idleTimeout(); d != 0 { 1828 c.rwc.SetReadDeadline(time.Now().Add(d)) 1829 if _, err := c.bufr.Peek(4); err != nil { 1830 return 1831 } 1832 } 1833 c.rwc.SetReadDeadline(time.Time{}) 1834 } 1835 } 1836 1837 func (w *response) sendExpectationFailed() { 1838 // TODO(bradfitz): let ServeHTTP handlers handle 1839 // requests with non-standard expectation[s]? Seems 1840 // theoretical at best, and doesn't fit into the 1841 // current ServeHTTP model anyway. We'd need to 1842 // make the ResponseWriter an optional 1843 // "ExpectReplier" interface or something. 1844 // 1845 // For now we'll just obey RFC 2616 14.20 which says 1846 // "If a server receives a request containing an 1847 // Expect field that includes an expectation- 1848 // extension that it does not support, it MUST 1849 // respond with a 417 (Expectation Failed) status." 1850 w.Header().Set("Connection", "close") 1851 w.WriteHeader(StatusExpectationFailed) 1852 w.finishRequest() 1853 } 1854 1855 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1856 // and a Hijacker. 1857 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1858 if w.handlerDone.isSet() { 1859 panic("net/http: Hijack called after ServeHTTP finished") 1860 } 1861 if w.wroteHeader { 1862 w.cw.flush() 1863 } 1864 1865 c := w.conn 1866 c.mu.Lock() 1867 defer c.mu.Unlock() 1868 1869 // Release the bufioWriter that writes to the chunk writer, it is not 1870 // used after a connection has been hijacked. 1871 rwc, buf, err = c.hijackLocked() 1872 if err == nil { 1873 putBufioWriter(w.w) 1874 w.w = nil 1875 } 1876 return rwc, buf, err 1877 } 1878 1879 func (w *response) CloseNotify() <-chan bool { 1880 if w.handlerDone.isSet() { 1881 panic("net/http: CloseNotify called after ServeHTTP finished") 1882 } 1883 return w.closeNotifyCh 1884 } 1885 1886 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1887 switch v := rc.(type) { 1888 case *expectContinueReader: 1889 registerOnHitEOF(v.readCloser, fn) 1890 case *body: 1891 v.registerOnHitEOF(fn) 1892 default: 1893 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1894 } 1895 } 1896 1897 // requestBodyRemains reports whether future calls to Read 1898 // on rc might yield more data. 1899 func requestBodyRemains(rc io.ReadCloser) bool { 1900 if rc == NoBody { 1901 return false 1902 } 1903 switch v := rc.(type) { 1904 case *expectContinueReader: 1905 return requestBodyRemains(v.readCloser) 1906 case *body: 1907 return v.bodyRemains() 1908 default: 1909 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1910 } 1911 } 1912 1913 // The HandlerFunc type is an adapter to allow the use of 1914 // ordinary functions as HTTP handlers. If f is a function 1915 // with the appropriate signature, HandlerFunc(f) is a 1916 // Handler that calls f. 1917 type HandlerFunc func(ResponseWriter, *Request) 1918 1919 // ServeHTTP calls f(w, r). 1920 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1921 f(w, r) 1922 } 1923 1924 // Helper handlers 1925 1926 // Error replies to the request with the specified error message and HTTP code. 1927 // It does not otherwise end the request; the caller should ensure no further 1928 // writes are done to w. 1929 // The error message should be plain text. 1930 func Error(w ResponseWriter, error string, code int) { 1931 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1932 w.Header().Set("X-Content-Type-Options", "nosniff") 1933 w.WriteHeader(code) 1934 fmt.Fprintln(w, error) 1935 } 1936 1937 // NotFound replies to the request with an HTTP 404 not found error. 1938 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1939 1940 // NotFoundHandler returns a simple request handler 1941 // that replies to each request with a ``404 page not found'' reply. 1942 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1943 1944 // StripPrefix returns a handler that serves HTTP requests 1945 // by removing the given prefix from the request URL's Path 1946 // and invoking the handler h. StripPrefix handles a 1947 // request for a path that doesn't begin with prefix by 1948 // replying with an HTTP 404 not found error. 1949 func StripPrefix(prefix string, h Handler) Handler { 1950 if prefix == "" { 1951 return h 1952 } 1953 return HandlerFunc(func(w ResponseWriter, r *Request) { 1954 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1955 r2 := new(Request) 1956 *r2 = *r 1957 r2.URL = new(url.URL) 1958 *r2.URL = *r.URL 1959 r2.URL.Path = p 1960 h.ServeHTTP(w, r2) 1961 } else { 1962 NotFound(w, r) 1963 } 1964 }) 1965 } 1966 1967 // Redirect replies to the request with a redirect to url, 1968 // which may be a path relative to the request path. 1969 // 1970 // The provided code should be in the 3xx range and is usually 1971 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1972 func Redirect(w ResponseWriter, r *Request, url string, code int) { 1973 // parseURL is just url.Parse (url is shadowed for godoc). 1974 if u, err := parseURL(url); err == nil { 1975 // If url was relative, make absolute by 1976 // combining with request path. 1977 // The browser would probably do this for us, 1978 // but doing it ourselves is more reliable. 1979 1980 // NOTE(rsc): RFC 2616 says that the Location 1981 // line must be an absolute URI, like 1982 // "http://www.google.com/redirect/", 1983 // not a path like "/redirect/". 1984 // Unfortunately, we don't know what to 1985 // put in the host name section to get the 1986 // client to connect to us again, so we can't 1987 // know the right absolute URI to send back. 1988 // Because of this problem, no one pays attention 1989 // to the RFC; they all send back just a new path. 1990 // So do we. 1991 if u.Scheme == "" && u.Host == "" { 1992 oldpath := r.URL.Path 1993 if oldpath == "" { // should not happen, but avoid a crash if it does 1994 oldpath = "/" 1995 } 1996 1997 // no leading http://server 1998 if url == "" || url[0] != '/' { 1999 // make relative path absolute 2000 olddir, _ := path.Split(oldpath) 2001 url = olddir + url 2002 } 2003 2004 var query string 2005 if i := strings.Index(url, "?"); i != -1 { 2006 url, query = url[:i], url[i:] 2007 } 2008 2009 // clean up but preserve trailing slash 2010 trailing := strings.HasSuffix(url, "/") 2011 url = path.Clean(url) 2012 if trailing && !strings.HasSuffix(url, "/") { 2013 url += "/" 2014 } 2015 url += query 2016 } 2017 } 2018 2019 // RFC 2616 recommends that a short note "SHOULD" be included in the 2020 // response because older user agents may not understand 301/307. 2021 // Shouldn't send the response for POST or HEAD; that leaves GET. 2022 writeNote := r.Method == "GET" 2023 2024 w.Header().Set("Location", hexEscapeNonASCII(url)) 2025 if writeNote { 2026 w.Header().Set("Content-Type", "text/html; charset=utf-8") 2027 } 2028 w.WriteHeader(code) 2029 if writeNote { 2030 note := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" 2031 fmt.Fprintln(w, note) 2032 } 2033 } 2034 2035 // parseURL is just url.Parse. It exists only so that url.Parse can be called 2036 // in places where url is shadowed for godoc. See https://golang.org/cl/49930. 2037 var parseURL = url.Parse 2038 2039 var htmlReplacer = strings.NewReplacer( 2040 "&", "&", 2041 "<", "<", 2042 ">", ">", 2043 // """ is shorter than """. 2044 `"`, """, 2045 // "'" is shorter than "'" and apos was not in HTML until HTML5. 2046 "'", "'", 2047 ) 2048 2049 func htmlEscape(s string) string { 2050 return htmlReplacer.Replace(s) 2051 } 2052 2053 // Redirect to a fixed URL 2054 type redirectHandler struct { 2055 url string 2056 code int 2057 } 2058 2059 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 2060 Redirect(w, r, rh.url, rh.code) 2061 } 2062 2063 // RedirectHandler returns a request handler that redirects 2064 // each request it receives to the given url using the given 2065 // status code. 2066 // 2067 // The provided code should be in the 3xx range and is usually 2068 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2069 func RedirectHandler(url string, code int) Handler { 2070 return &redirectHandler{url, code} 2071 } 2072 2073 // ServeMux is an HTTP request multiplexer. 2074 // It matches the URL of each incoming request against a list of registered 2075 // patterns and calls the handler for the pattern that 2076 // most closely matches the URL. 2077 // 2078 // Patterns name fixed, rooted paths, like "/favicon.ico", 2079 // or rooted subtrees, like "/images/" (note the trailing slash). 2080 // Longer patterns take precedence over shorter ones, so that 2081 // if there are handlers registered for both "/images/" 2082 // and "/images/thumbnails/", the latter handler will be 2083 // called for paths beginning "/images/thumbnails/" and the 2084 // former will receive requests for any other paths in the 2085 // "/images/" subtree. 2086 // 2087 // Note that since a pattern ending in a slash names a rooted subtree, 2088 // the pattern "/" matches all paths not matched by other registered 2089 // patterns, not just the URL with Path == "/". 2090 // 2091 // If a subtree has been registered and a request is received naming the 2092 // subtree root without its trailing slash, ServeMux redirects that 2093 // request to the subtree root (adding the trailing slash). This behavior can 2094 // be overridden with a separate registration for the path without 2095 // the trailing slash. For example, registering "/images/" causes ServeMux 2096 // to redirect a request for "/images" to "/images/", unless "/images" has 2097 // been registered separately. 2098 // 2099 // Patterns may optionally begin with a host name, restricting matches to 2100 // URLs on that host only. Host-specific patterns take precedence over 2101 // general patterns, so that a handler might register for the two patterns 2102 // "/codesearch" and "codesearch.google.com/" without also taking over 2103 // requests for "http://www.google.com/". 2104 // 2105 // ServeMux also takes care of sanitizing the URL request path, 2106 // redirecting any request containing . or .. elements or repeated slashes 2107 // to an equivalent, cleaner URL. 2108 type ServeMux struct { 2109 mu sync.RWMutex 2110 m map[string]muxEntry 2111 hosts bool // whether any patterns contain hostnames 2112 } 2113 2114 type muxEntry struct { 2115 explicit bool 2116 h Handler 2117 pattern string 2118 } 2119 2120 // NewServeMux allocates and returns a new ServeMux. 2121 func NewServeMux() *ServeMux { return new(ServeMux) } 2122 2123 // DefaultServeMux is the default ServeMux used by Serve. 2124 var DefaultServeMux = &defaultServeMux 2125 2126 var defaultServeMux ServeMux 2127 2128 // Does path match pattern? 2129 func pathMatch(pattern, path string) bool { 2130 if len(pattern) == 0 { 2131 // should not happen 2132 return false 2133 } 2134 n := len(pattern) 2135 if pattern[n-1] != '/' { 2136 return pattern == path 2137 } 2138 return len(path) >= n && path[0:n] == pattern 2139 } 2140 2141 // Return the canonical path for p, eliminating . and .. elements. 2142 func cleanPath(p string) string { 2143 if p == "" { 2144 return "/" 2145 } 2146 if p[0] != '/' { 2147 p = "/" + p 2148 } 2149 np := path.Clean(p) 2150 // path.Clean removes trailing slash except for root; 2151 // put the trailing slash back if necessary. 2152 if p[len(p)-1] == '/' && np != "/" { 2153 np += "/" 2154 } 2155 return np 2156 } 2157 2158 // stripHostPort returns h without any trailing ":<port>". 2159 func stripHostPort(h string) string { 2160 // If no port on host, return unchanged 2161 if strings.IndexByte(h, ':') == -1 { 2162 return h 2163 } 2164 host, _, err := net.SplitHostPort(h) 2165 if err != nil { 2166 return h // on error, return unchanged 2167 } 2168 return host 2169 } 2170 2171 // Find a handler on a handler map given a path string. 2172 // Most-specific (longest) pattern wins. 2173 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2174 // Check for exact match first. 2175 v, ok := mux.m[path] 2176 if ok { 2177 return v.h, v.pattern 2178 } 2179 2180 // Check for longest valid match. 2181 var n = 0 2182 for k, v := range mux.m { 2183 if !pathMatch(k, path) { 2184 continue 2185 } 2186 if h == nil || len(k) > n { 2187 n = len(k) 2188 h = v.h 2189 pattern = v.pattern 2190 } 2191 } 2192 return 2193 } 2194 2195 // Handler returns the handler to use for the given request, 2196 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2197 // a non-nil handler. If the path is not in its canonical form, the 2198 // handler will be an internally-generated handler that redirects 2199 // to the canonical path. If the host contains a port, it is ignored 2200 // when matching handlers. 2201 // 2202 // The path and host are used unchanged for CONNECT requests. 2203 // 2204 // Handler also returns the registered pattern that matches the 2205 // request or, in the case of internally-generated redirects, 2206 // the pattern that will match after following the redirect. 2207 // 2208 // If there is no registered handler that applies to the request, 2209 // Handler returns a ``page not found'' handler and an empty pattern. 2210 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2211 2212 // CONNECT requests are not canonicalized. 2213 if r.Method == "CONNECT" { 2214 return mux.handler(r.Host, r.URL.Path) 2215 } 2216 2217 // All other requests have any port stripped and path cleaned 2218 // before passing to mux.handler. 2219 host := stripHostPort(r.Host) 2220 path := cleanPath(r.URL.Path) 2221 if path != r.URL.Path { 2222 _, pattern = mux.handler(host, path) 2223 url := *r.URL 2224 url.Path = path 2225 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2226 } 2227 2228 return mux.handler(host, r.URL.Path) 2229 } 2230 2231 // handler is the main implementation of Handler. 2232 // The path is known to be in canonical form, except for CONNECT methods. 2233 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2234 mux.mu.RLock() 2235 defer mux.mu.RUnlock() 2236 2237 // Host-specific pattern takes precedence over generic ones 2238 if mux.hosts { 2239 h, pattern = mux.match(host + path) 2240 } 2241 if h == nil { 2242 h, pattern = mux.match(path) 2243 } 2244 if h == nil { 2245 h, pattern = NotFoundHandler(), "" 2246 } 2247 return 2248 } 2249 2250 // ServeHTTP dispatches the request to the handler whose 2251 // pattern most closely matches the request URL. 2252 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2253 if r.RequestURI == "*" { 2254 if r.ProtoAtLeast(1, 1) { 2255 w.Header().Set("Connection", "close") 2256 } 2257 w.WriteHeader(StatusBadRequest) 2258 return 2259 } 2260 h, _ := mux.Handler(r) 2261 h.ServeHTTP(w, r) 2262 } 2263 2264 // Handle registers the handler for the given pattern. 2265 // If a handler already exists for pattern, Handle panics. 2266 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2267 mux.mu.Lock() 2268 defer mux.mu.Unlock() 2269 2270 if pattern == "" { 2271 panic("http: invalid pattern") 2272 } 2273 if handler == nil { 2274 panic("http: nil handler") 2275 } 2276 if mux.m[pattern].explicit { 2277 panic("http: multiple registrations for " + pattern) 2278 } 2279 2280 if mux.m == nil { 2281 mux.m = make(map[string]muxEntry) 2282 } 2283 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 2284 2285 if pattern[0] != '/' { 2286 mux.hosts = true 2287 } 2288 2289 // Helpful behavior: 2290 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 2291 // It can be overridden by an explicit registration. 2292 n := len(pattern) 2293 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 2294 // If pattern contains a host name, strip it and use remaining 2295 // path for redirect. 2296 path := pattern 2297 if pattern[0] != '/' { 2298 // In pattern, at least the last character is a '/', so 2299 // strings.Index can't be -1. 2300 path = pattern[strings.Index(pattern, "/"):] 2301 } 2302 url := &url.URL{Path: path} 2303 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 2304 } 2305 } 2306 2307 // HandleFunc registers the handler function for the given pattern. 2308 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2309 mux.Handle(pattern, HandlerFunc(handler)) 2310 } 2311 2312 // Handle registers the handler for the given pattern 2313 // in the DefaultServeMux. 2314 // The documentation for ServeMux explains how patterns are matched. 2315 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2316 2317 // HandleFunc registers the handler function for the given pattern 2318 // in the DefaultServeMux. 2319 // The documentation for ServeMux explains how patterns are matched. 2320 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2321 DefaultServeMux.HandleFunc(pattern, handler) 2322 } 2323 2324 // Serve accepts incoming HTTP connections on the listener l, 2325 // creating a new service goroutine for each. The service goroutines 2326 // read requests and then call handler to reply to them. 2327 // Handler is typically nil, in which case the DefaultServeMux is used. 2328 func Serve(l net.Listener, handler Handler) error { 2329 srv := &Server{Handler: handler} 2330 return srv.Serve(l) 2331 } 2332 2333 // Serve accepts incoming HTTPS connections on the listener l, 2334 // creating a new service goroutine for each. The service goroutines 2335 // read requests and then call handler to reply to them. 2336 // 2337 // Handler is typically nil, in which case the DefaultServeMux is used. 2338 // 2339 // Additionally, files containing a certificate and matching private key 2340 // for the server must be provided. If the certificate is signed by a 2341 // certificate authority, the certFile should be the concatenation 2342 // of the server's certificate, any intermediates, and the CA's certificate. 2343 func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { 2344 srv := &Server{Handler: handler} 2345 return srv.ServeTLS(l, certFile, keyFile) 2346 } 2347 2348 // A Server defines parameters for running an HTTP server. 2349 // The zero value for Server is a valid configuration. 2350 type Server struct { 2351 Addr string // TCP address to listen on, ":http" if empty 2352 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2353 TLSConfig *tls.Config // optional TLS config, used by ServeTLS and ListenAndServeTLS 2354 2355 // ReadTimeout is the maximum duration for reading the entire 2356 // request, including the body. 2357 // 2358 // Because ReadTimeout does not let Handlers make per-request 2359 // decisions on each request body's acceptable deadline or 2360 // upload rate, most users will prefer to use 2361 // ReadHeaderTimeout. It is valid to use them both. 2362 ReadTimeout time.Duration 2363 2364 // ReadHeaderTimeout is the amount of time allowed to read 2365 // request headers. The connection's read deadline is reset 2366 // after reading the headers and the Handler can decide what 2367 // is considered too slow for the body. 2368 ReadHeaderTimeout time.Duration 2369 2370 // WriteTimeout is the maximum duration before timing out 2371 // writes of the response. It is reset whenever a new 2372 // request's header is read. Like ReadTimeout, it does not 2373 // let Handlers make decisions on a per-request basis. 2374 WriteTimeout time.Duration 2375 2376 // IdleTimeout is the maximum amount of time to wait for the 2377 // next request when keep-alives are enabled. If IdleTimeout 2378 // is zero, the value of ReadTimeout is used. If both are 2379 // zero, ReadHeaderTimeout is used. 2380 IdleTimeout time.Duration 2381 2382 // MaxHeaderBytes controls the maximum number of bytes the 2383 // server will read parsing the request header's keys and 2384 // values, including the request line. It does not limit the 2385 // size of the request body. 2386 // If zero, DefaultMaxHeaderBytes is used. 2387 MaxHeaderBytes int 2388 2389 // TLSNextProto optionally specifies a function to take over 2390 // ownership of the provided TLS connection when an NPN/ALPN 2391 // protocol upgrade has occurred. The map key is the protocol 2392 // name negotiated. The Handler argument should be used to 2393 // handle HTTP requests and will initialize the Request's TLS 2394 // and RemoteAddr if not already set. The connection is 2395 // automatically closed when the function returns. 2396 // If TLSNextProto is not nil, HTTP/2 support is not enabled 2397 // automatically. 2398 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2399 2400 // ConnState specifies an optional callback function that is 2401 // called when a client connection changes state. See the 2402 // ConnState type and associated constants for details. 2403 ConnState func(net.Conn, ConnState) 2404 2405 // ErrorLog specifies an optional logger for errors accepting 2406 // connections, unexpected behavior from handlers, and 2407 // underlying FileSystem errors. 2408 // If nil, logging is done via the log package's standard logger. 2409 ErrorLog *log.Logger 2410 2411 disableKeepAlives int32 // accessed atomically. 2412 inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) 2413 nextProtoOnce sync.Once // guards setupHTTP2_* init 2414 nextProtoErr error // result of http2.ConfigureServer if used 2415 2416 mu sync.Mutex 2417 listeners map[net.Listener]struct{} 2418 activeConn map[*conn]struct{} 2419 doneChan chan struct{} 2420 onShutdown []func() 2421 } 2422 2423 func (s *Server) getDoneChan() <-chan struct{} { 2424 s.mu.Lock() 2425 defer s.mu.Unlock() 2426 return s.getDoneChanLocked() 2427 } 2428 2429 func (s *Server) getDoneChanLocked() chan struct{} { 2430 if s.doneChan == nil { 2431 s.doneChan = make(chan struct{}) 2432 } 2433 return s.doneChan 2434 } 2435 2436 func (s *Server) closeDoneChanLocked() { 2437 ch := s.getDoneChanLocked() 2438 select { 2439 case <-ch: 2440 // Already closed. Don't close again. 2441 default: 2442 // Safe to close here. We're the only closer, guarded 2443 // by s.mu. 2444 close(ch) 2445 } 2446 } 2447 2448 // Close immediately closes all active net.Listeners and any 2449 // connections in state StateNew, StateActive, or StateIdle. For a 2450 // graceful shutdown, use Shutdown. 2451 // 2452 // Close does not attempt to close (and does not even know about) 2453 // any hijacked connections, such as WebSockets. 2454 // 2455 // Close returns any error returned from closing the Server's 2456 // underlying Listener(s). 2457 func (srv *Server) Close() error { 2458 srv.mu.Lock() 2459 defer srv.mu.Unlock() 2460 srv.closeDoneChanLocked() 2461 err := srv.closeListenersLocked() 2462 for c := range srv.activeConn { 2463 c.rwc.Close() 2464 delete(srv.activeConn, c) 2465 } 2466 return err 2467 } 2468 2469 // shutdownPollInterval is how often we poll for quiescence 2470 // during Server.Shutdown. This is lower during tests, to 2471 // speed up tests. 2472 // Ideally we could find a solution that doesn't involve polling, 2473 // but which also doesn't have a high runtime cost (and doesn't 2474 // involve any contentious mutexes), but that is left as an 2475 // exercise for the reader. 2476 var shutdownPollInterval = 500 * time.Millisecond 2477 2478 // Shutdown gracefully shuts down the server without interrupting any 2479 // active connections. Shutdown works by first closing all open 2480 // listeners, then closing all idle connections, and then waiting 2481 // indefinitely for connections to return to idle and then shut down. 2482 // If the provided context expires before the shutdown is complete, 2483 // Shutdown returns the context's error, otherwise it returns any 2484 // error returned from closing the Server's underlying Listener(s). 2485 // 2486 // When Shutdown is called, Serve, ListenAndServe, and 2487 // ListenAndServeTLS immediately return ErrServerClosed. Make sure the 2488 // program doesn't exit and waits instead for Shutdown to return. 2489 // 2490 // Shutdown does not attempt to close nor wait for hijacked 2491 // connections such as WebSockets. The caller of Shutdown should 2492 // separately notify such long-lived connections of shutdown and wait 2493 // for them to close, if desired. See RegisterOnShutdown for a way to 2494 // register shutdown notification functions. 2495 func (srv *Server) Shutdown(ctx context.Context) error { 2496 atomic.AddInt32(&srv.inShutdown, 1) 2497 defer atomic.AddInt32(&srv.inShutdown, -1) 2498 2499 srv.mu.Lock() 2500 lnerr := srv.closeListenersLocked() 2501 srv.closeDoneChanLocked() 2502 for _, f := range srv.onShutdown { 2503 go f() 2504 } 2505 srv.mu.Unlock() 2506 2507 ticker := time.NewTicker(shutdownPollInterval) 2508 defer ticker.Stop() 2509 for { 2510 if srv.closeIdleConns() { 2511 return lnerr 2512 } 2513 select { 2514 case <-ctx.Done(): 2515 return ctx.Err() 2516 case <-ticker.C: 2517 } 2518 } 2519 } 2520 2521 // RegisterOnShutdown registers a function to call on Shutdown. 2522 // This can be used to gracefully shutdown connections that have 2523 // undergone NPN/ALPN protocol upgrade or that have been hijacked. 2524 // This function should start protocol-specific graceful shutdown, 2525 // but should not wait for shutdown to complete. 2526 func (srv *Server) RegisterOnShutdown(f func()) { 2527 srv.mu.Lock() 2528 srv.onShutdown = append(srv.onShutdown, f) 2529 srv.mu.Unlock() 2530 } 2531 2532 // closeIdleConns closes all idle connections and reports whether the 2533 // server is quiescent. 2534 func (s *Server) closeIdleConns() bool { 2535 s.mu.Lock() 2536 defer s.mu.Unlock() 2537 quiescent := true 2538 for c := range s.activeConn { 2539 st, ok := c.curState.Load().(ConnState) 2540 if !ok || st != StateIdle { 2541 quiescent = false 2542 continue 2543 } 2544 c.rwc.Close() 2545 delete(s.activeConn, c) 2546 } 2547 return quiescent 2548 } 2549 2550 func (s *Server) closeListenersLocked() error { 2551 var err error 2552 for ln := range s.listeners { 2553 if cerr := ln.Close(); cerr != nil && err == nil { 2554 err = cerr 2555 } 2556 delete(s.listeners, ln) 2557 } 2558 return err 2559 } 2560 2561 // A ConnState represents the state of a client connection to a server. 2562 // It's used by the optional Server.ConnState hook. 2563 type ConnState int 2564 2565 const ( 2566 // StateNew represents a new connection that is expected to 2567 // send a request immediately. Connections begin at this 2568 // state and then transition to either StateActive or 2569 // StateClosed. 2570 StateNew ConnState = iota 2571 2572 // StateActive represents a connection that has read 1 or more 2573 // bytes of a request. The Server.ConnState hook for 2574 // StateActive fires before the request has entered a handler 2575 // and doesn't fire again until the request has been 2576 // handled. After the request is handled, the state 2577 // transitions to StateClosed, StateHijacked, or StateIdle. 2578 // For HTTP/2, StateActive fires on the transition from zero 2579 // to one active request, and only transitions away once all 2580 // active requests are complete. That means that ConnState 2581 // cannot be used to do per-request work; ConnState only notes 2582 // the overall state of the connection. 2583 StateActive 2584 2585 // StateIdle represents a connection that has finished 2586 // handling a request and is in the keep-alive state, waiting 2587 // for a new request. Connections transition from StateIdle 2588 // to either StateActive or StateClosed. 2589 StateIdle 2590 2591 // StateHijacked represents a hijacked connection. 2592 // This is a terminal state. It does not transition to StateClosed. 2593 StateHijacked 2594 2595 // StateClosed represents a closed connection. 2596 // This is a terminal state. Hijacked connections do not 2597 // transition to StateClosed. 2598 StateClosed 2599 ) 2600 2601 var stateName = map[ConnState]string{ 2602 StateNew: "new", 2603 StateActive: "active", 2604 StateIdle: "idle", 2605 StateHijacked: "hijacked", 2606 StateClosed: "closed", 2607 } 2608 2609 func (c ConnState) String() string { 2610 return stateName[c] 2611 } 2612 2613 // serverHandler delegates to either the server's Handler or 2614 // DefaultServeMux and also handles "OPTIONS *" requests. 2615 type serverHandler struct { 2616 srv *Server 2617 } 2618 2619 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2620 handler := sh.srv.Handler 2621 if handler == nil { 2622 handler = DefaultServeMux 2623 } 2624 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2625 handler = globalOptionsHandler{} 2626 } 2627 handler.ServeHTTP(rw, req) 2628 } 2629 2630 // ListenAndServe listens on the TCP network address srv.Addr and then 2631 // calls Serve to handle requests on incoming connections. 2632 // Accepted connections are configured to enable TCP keep-alives. 2633 // If srv.Addr is blank, ":http" is used. 2634 // ListenAndServe always returns a non-nil error. 2635 func (srv *Server) ListenAndServe() error { 2636 addr := srv.Addr 2637 if addr == "" { 2638 addr = ":http" 2639 } 2640 ln, err := net.Listen("tcp", addr) 2641 if err != nil { 2642 return err 2643 } 2644 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2645 } 2646 2647 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2648 2649 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2650 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2651 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2652 if srv.TLSConfig == nil { 2653 // Compatibility with Go 1.6: 2654 // If there's no TLSConfig, it's possible that the user just 2655 // didn't set it on the http.Server, but did pass it to 2656 // tls.NewListener and passed that listener to Serve. 2657 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2658 // in case the listener returns an "h2" *tls.Conn. 2659 return true 2660 } 2661 // The user specified a TLSConfig on their http.Server. 2662 // In this, case, only configure HTTP/2 if their tls.Config 2663 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2664 // would modify the tls.Config to add it, but they probably already 2665 // passed this tls.Config to tls.NewListener. And if they did, 2666 // it's too late anyway to fix it. It would only be potentially racy. 2667 // See Issue 15908. 2668 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2669 } 2670 2671 // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, 2672 // and ListenAndServeTLS methods after a call to Shutdown or Close. 2673 var ErrServerClosed = errors.New("http: Server closed") 2674 2675 // Serve accepts incoming connections on the Listener l, creating a 2676 // new service goroutine for each. The service goroutines read requests and 2677 // then call srv.Handler to reply to them. 2678 // 2679 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2680 // provided listener's TLS Config before calling Serve. If 2681 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2682 // Config.NextProtos, HTTP/2 support is not enabled. 2683 // 2684 // Serve always returns a non-nil error. After Shutdown or Close, the 2685 // returned error is ErrServerClosed. 2686 func (srv *Server) Serve(l net.Listener) error { 2687 defer l.Close() 2688 if fn := testHookServerServe; fn != nil { 2689 fn(srv, l) 2690 } 2691 var tempDelay time.Duration // how long to sleep on accept failure 2692 2693 if err := srv.setupHTTP2_Serve(); err != nil { 2694 return err 2695 } 2696 2697 srv.trackListener(l, true) 2698 defer srv.trackListener(l, false) 2699 2700 baseCtx := context.Background() // base is always background, per Issue 16220 2701 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2702 for { 2703 rw, e := l.Accept() 2704 if e != nil { 2705 select { 2706 case <-srv.getDoneChan(): 2707 return ErrServerClosed 2708 default: 2709 } 2710 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2711 if tempDelay == 0 { 2712 tempDelay = 5 * time.Millisecond 2713 } else { 2714 tempDelay *= 2 2715 } 2716 if max := 1 * time.Second; tempDelay > max { 2717 tempDelay = max 2718 } 2719 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2720 time.Sleep(tempDelay) 2721 continue 2722 } 2723 return e 2724 } 2725 tempDelay = 0 2726 c := srv.newConn(rw) 2727 c.setState(c.rwc, StateNew) // before Serve can return 2728 go c.serve(ctx) 2729 } 2730 } 2731 2732 // ServeTLS accepts incoming connections on the Listener l, creating a 2733 // new service goroutine for each. The service goroutines read requests and 2734 // then call srv.Handler to reply to them. 2735 // 2736 // Additionally, files containing a certificate and matching private key for 2737 // the server must be provided if neither the Server's TLSConfig.Certificates 2738 // nor TLSConfig.GetCertificate are populated.. If the certificate is signed by 2739 // a certificate authority, the certFile should be the concatenation of the 2740 // server's certificate, any intermediates, and the CA's certificate. 2741 // 2742 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2743 // provided listener's TLS Config before calling Serve. If 2744 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2745 // Config.NextProtos, HTTP/2 support is not enabled. 2746 // 2747 // ServeTLS always returns a non-nil error. After Shutdown or Close, the 2748 // returned error is ErrServerClosed. 2749 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { 2750 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2751 // before we clone it and create the TLS Listener. 2752 if err := srv.setupHTTP2_ServeTLS(); err != nil { 2753 return err 2754 } 2755 2756 config := cloneTLSConfig(srv.TLSConfig) 2757 if !strSliceContains(config.NextProtos, "http/1.1") { 2758 config.NextProtos = append(config.NextProtos, "http/1.1") 2759 } 2760 2761 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2762 if !configHasCert || certFile != "" || keyFile != "" { 2763 var err error 2764 config.Certificates = make([]tls.Certificate, 1) 2765 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2766 if err != nil { 2767 return err 2768 } 2769 } 2770 2771 tlsListener := tls.NewListener(l, config) 2772 return srv.Serve(tlsListener) 2773 } 2774 2775 func (s *Server) trackListener(ln net.Listener, add bool) { 2776 s.mu.Lock() 2777 defer s.mu.Unlock() 2778 if s.listeners == nil { 2779 s.listeners = make(map[net.Listener]struct{}) 2780 } 2781 if add { 2782 // If the *Server is being reused after a previous 2783 // Close or Shutdown, reset its doneChan: 2784 if len(s.listeners) == 0 && len(s.activeConn) == 0 { 2785 s.doneChan = nil 2786 } 2787 s.listeners[ln] = struct{}{} 2788 } else { 2789 delete(s.listeners, ln) 2790 } 2791 } 2792 2793 func (s *Server) trackConn(c *conn, add bool) { 2794 s.mu.Lock() 2795 defer s.mu.Unlock() 2796 if s.activeConn == nil { 2797 s.activeConn = make(map[*conn]struct{}) 2798 } 2799 if add { 2800 s.activeConn[c] = struct{}{} 2801 } else { 2802 delete(s.activeConn, c) 2803 } 2804 } 2805 2806 func (s *Server) idleTimeout() time.Duration { 2807 if s.IdleTimeout != 0 { 2808 return s.IdleTimeout 2809 } 2810 return s.ReadTimeout 2811 } 2812 2813 func (s *Server) readHeaderTimeout() time.Duration { 2814 if s.ReadHeaderTimeout != 0 { 2815 return s.ReadHeaderTimeout 2816 } 2817 return s.ReadTimeout 2818 } 2819 2820 func (s *Server) doKeepAlives() bool { 2821 return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() 2822 } 2823 2824 func (s *Server) shuttingDown() bool { 2825 return atomic.LoadInt32(&s.inShutdown) != 0 2826 } 2827 2828 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2829 // By default, keep-alives are always enabled. Only very 2830 // resource-constrained environments or servers in the process of 2831 // shutting down should disable them. 2832 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2833 if v { 2834 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2835 return 2836 } 2837 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2838 2839 // Close idle HTTP/1 conns: 2840 srv.closeIdleConns() 2841 2842 // Close HTTP/2 conns, as soon as they become idle, but reset 2843 // the chan so future conns (if the listener is still active) 2844 // still work and don't get a GOAWAY immediately, before their 2845 // first request: 2846 srv.mu.Lock() 2847 defer srv.mu.Unlock() 2848 srv.closeDoneChanLocked() // closes http2 conns 2849 srv.doneChan = nil 2850 } 2851 2852 func (s *Server) logf(format string, args ...interface{}) { 2853 if s.ErrorLog != nil { 2854 s.ErrorLog.Printf(format, args...) 2855 } else { 2856 log.Printf(format, args...) 2857 } 2858 } 2859 2860 // logf prints to the ErrorLog of the *Server associated with request r 2861 // via ServerContextKey. If there's no associated server, or if ErrorLog 2862 // is nil, logging is done via the log package's standard logger. 2863 func logf(r *Request, format string, args ...interface{}) { 2864 s, _ := r.Context().Value(ServerContextKey).(*Server) 2865 if s != nil && s.ErrorLog != nil { 2866 s.ErrorLog.Printf(format, args...) 2867 } else { 2868 log.Printf(format, args...) 2869 } 2870 } 2871 2872 // ListenAndServe listens on the TCP network address addr 2873 // and then calls Serve with handler to handle requests 2874 // on incoming connections. 2875 // Accepted connections are configured to enable TCP keep-alives. 2876 // Handler is typically nil, in which case the DefaultServeMux is 2877 // used. 2878 // 2879 // A trivial example server is: 2880 // 2881 // package main 2882 // 2883 // import ( 2884 // "io" 2885 // "net/http" 2886 // "log" 2887 // ) 2888 // 2889 // // hello world, the web server 2890 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2891 // io.WriteString(w, "hello, world!\n") 2892 // } 2893 // 2894 // func main() { 2895 // http.HandleFunc("/hello", HelloServer) 2896 // log.Fatal(http.ListenAndServe(":12345", nil)) 2897 // } 2898 // 2899 // ListenAndServe always returns a non-nil error. 2900 func ListenAndServe(addr string, handler Handler) error { 2901 server := &Server{Addr: addr, Handler: handler} 2902 return server.ListenAndServe() 2903 } 2904 2905 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2906 // expects HTTPS connections. Additionally, files containing a certificate and 2907 // matching private key for the server must be provided. If the certificate 2908 // is signed by a certificate authority, the certFile should be the concatenation 2909 // of the server's certificate, any intermediates, and the CA's certificate. 2910 // 2911 // A trivial example server is: 2912 // 2913 // import ( 2914 // "log" 2915 // "net/http" 2916 // ) 2917 // 2918 // func handler(w http.ResponseWriter, req *http.Request) { 2919 // w.Header().Set("Content-Type", "text/plain") 2920 // w.Write([]byte("This is an example server.\n")) 2921 // } 2922 // 2923 // func main() { 2924 // http.HandleFunc("/", handler) 2925 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2926 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2927 // log.Fatal(err) 2928 // } 2929 // 2930 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2931 // 2932 // ListenAndServeTLS always returns a non-nil error. 2933 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2934 server := &Server{Addr: addr, Handler: handler} 2935 return server.ListenAndServeTLS(certFile, keyFile) 2936 } 2937 2938 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2939 // then calls Serve to handle requests on incoming TLS connections. 2940 // Accepted connections are configured to enable TCP keep-alives. 2941 // 2942 // Filenames containing a certificate and matching private key for the 2943 // server must be provided if neither the Server's TLSConfig.Certificates 2944 // nor TLSConfig.GetCertificate are populated. If the certificate is 2945 // signed by a certificate authority, the certFile should be the 2946 // concatenation of the server's certificate, any intermediates, and 2947 // the CA's certificate. 2948 // 2949 // If srv.Addr is blank, ":https" is used. 2950 // 2951 // ListenAndServeTLS always returns a non-nil error. 2952 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2953 addr := srv.Addr 2954 if addr == "" { 2955 addr = ":https" 2956 } 2957 2958 ln, err := net.Listen("tcp", addr) 2959 if err != nil { 2960 return err 2961 } 2962 2963 return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile) 2964 } 2965 2966 // setupHTTP2_ServeTLS conditionally configures HTTP/2 on 2967 // srv and returns whether there was an error setting it up. If it is 2968 // not configured for policy reasons, nil is returned. 2969 func (srv *Server) setupHTTP2_ServeTLS() error { 2970 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2971 return srv.nextProtoErr 2972 } 2973 2974 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 2975 // configures HTTP/2 on srv using a more conservative policy than 2976 // setupHTTP2_ServeTLS because Serve may be called 2977 // concurrently. 2978 // 2979 // The tests named TestTransportAutomaticHTTP2* and 2980 // TestConcurrentServerServe in server_test.go demonstrate some 2981 // of the supported use cases and motivations. 2982 func (srv *Server) setupHTTP2_Serve() error { 2983 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 2984 return srv.nextProtoErr 2985 } 2986 2987 func (srv *Server) onceSetNextProtoDefaults_Serve() { 2988 if srv.shouldConfigureHTTP2ForServe() { 2989 srv.onceSetNextProtoDefaults() 2990 } 2991 } 2992 2993 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2994 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2995 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 2996 func (srv *Server) onceSetNextProtoDefaults() { 2997 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 2998 return 2999 } 3000 // Enable HTTP/2 by default if the user hasn't otherwise 3001 // configured their TLSNextProto map. 3002 if srv.TLSNextProto == nil { 3003 conf := &http2Server{ 3004 NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, 3005 } 3006 srv.nextProtoErr = http2ConfigureServer(srv, conf) 3007 } 3008 } 3009 3010 // TimeoutHandler returns a Handler that runs h with the given time limit. 3011 // 3012 // The new Handler calls h.ServeHTTP to handle each request, but if a 3013 // call runs for longer than its time limit, the handler responds with 3014 // a 503 Service Unavailable error and the given message in its body. 3015 // (If msg is empty, a suitable default message will be sent.) 3016 // After such a timeout, writes by h to its ResponseWriter will return 3017 // ErrHandlerTimeout. 3018 // 3019 // TimeoutHandler buffers all Handler writes to memory and does not 3020 // support the Hijacker or Flusher interfaces. 3021 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 3022 return &timeoutHandler{ 3023 handler: h, 3024 body: msg, 3025 dt: dt, 3026 } 3027 } 3028 3029 // ErrHandlerTimeout is returned on ResponseWriter Write calls 3030 // in handlers which have timed out. 3031 var ErrHandlerTimeout = errors.New("http: Handler timeout") 3032 3033 type timeoutHandler struct { 3034 handler Handler 3035 body string 3036 dt time.Duration 3037 3038 // When set, no context will be created and this context will 3039 // be used instead. 3040 testContext context.Context 3041 } 3042 3043 func (h *timeoutHandler) errorBody() string { 3044 if h.body != "" { 3045 return h.body 3046 } 3047 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 3048 } 3049 3050 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 3051 ctx := h.testContext 3052 if ctx == nil { 3053 var cancelCtx context.CancelFunc 3054 ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) 3055 defer cancelCtx() 3056 } 3057 r = r.WithContext(ctx) 3058 done := make(chan struct{}) 3059 tw := &timeoutWriter{ 3060 w: w, 3061 h: make(Header), 3062 } 3063 go func() { 3064 h.handler.ServeHTTP(tw, r) 3065 close(done) 3066 }() 3067 select { 3068 case <-done: 3069 tw.mu.Lock() 3070 defer tw.mu.Unlock() 3071 dst := w.Header() 3072 for k, vv := range tw.h { 3073 dst[k] = vv 3074 } 3075 if !tw.wroteHeader { 3076 tw.code = StatusOK 3077 } 3078 w.WriteHeader(tw.code) 3079 w.Write(tw.wbuf.Bytes()) 3080 case <-ctx.Done(): 3081 tw.mu.Lock() 3082 defer tw.mu.Unlock() 3083 w.WriteHeader(StatusServiceUnavailable) 3084 io.WriteString(w, h.errorBody()) 3085 tw.timedOut = true 3086 return 3087 } 3088 } 3089 3090 type timeoutWriter struct { 3091 w ResponseWriter 3092 h Header 3093 wbuf bytes.Buffer 3094 3095 mu sync.Mutex 3096 timedOut bool 3097 wroteHeader bool 3098 code int 3099 } 3100 3101 func (tw *timeoutWriter) Header() Header { return tw.h } 3102 3103 func (tw *timeoutWriter) Write(p []byte) (int, error) { 3104 tw.mu.Lock() 3105 defer tw.mu.Unlock() 3106 if tw.timedOut { 3107 return 0, ErrHandlerTimeout 3108 } 3109 if !tw.wroteHeader { 3110 tw.writeHeader(StatusOK) 3111 } 3112 return tw.wbuf.Write(p) 3113 } 3114 3115 func (tw *timeoutWriter) WriteHeader(code int) { 3116 tw.mu.Lock() 3117 defer tw.mu.Unlock() 3118 if tw.timedOut || tw.wroteHeader { 3119 return 3120 } 3121 tw.writeHeader(code) 3122 } 3123 3124 func (tw *timeoutWriter) writeHeader(code int) { 3125 tw.wroteHeader = true 3126 tw.code = code 3127 } 3128 3129 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 3130 // connections. It's used by ListenAndServe and ListenAndServeTLS so 3131 // dead TCP connections (e.g. closing laptop mid-download) eventually 3132 // go away. 3133 type tcpKeepAliveListener struct { 3134 *net.TCPListener 3135 } 3136 3137 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 3138 tc, err := ln.AcceptTCP() 3139 if err != nil { 3140 return 3141 } 3142 tc.SetKeepAlive(true) 3143 tc.SetKeepAlivePeriod(3 * time.Minute) 3144 return tc, nil 3145 } 3146 3147 // globalOptionsHandler responds to "OPTIONS *" requests. 3148 type globalOptionsHandler struct{} 3149 3150 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 3151 w.Header().Set("Content-Length", "0") 3152 if r.ContentLength != 0 { 3153 // Read up to 4KB of OPTIONS body (as mentioned in the 3154 // spec as being reserved for future use), but anything 3155 // over that is considered a waste of server resources 3156 // (or an attack) and we abort and close the connection, 3157 // courtesy of MaxBytesReader's EOF behavior. 3158 mb := MaxBytesReader(w, r.Body, 4<<10) 3159 io.Copy(ioutil.Discard, mb) 3160 } 3161 } 3162 3163 // initNPNRequest is an HTTP handler that initializes certain 3164 // uninitialized fields in its *Request. Such partially-initialized 3165 // Requests come from NPN protocol handlers. 3166 type initNPNRequest struct { 3167 c *tls.Conn 3168 h serverHandler 3169 } 3170 3171 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 3172 if req.TLS == nil { 3173 req.TLS = &tls.ConnectionState{} 3174 *req.TLS = h.c.ConnectionState() 3175 } 3176 if req.Body == nil { 3177 req.Body = NoBody 3178 } 3179 if req.RemoteAddr == "" { 3180 req.RemoteAddr = h.c.RemoteAddr().String() 3181 } 3182 h.h.ServeHTTP(rw, req) 3183 } 3184 3185 // loggingConn is used for debugging. 3186 type loggingConn struct { 3187 name string 3188 net.Conn 3189 } 3190 3191 var ( 3192 uniqNameMu sync.Mutex 3193 uniqNameNext = make(map[string]int) 3194 ) 3195 3196 func newLoggingConn(baseName string, c net.Conn) net.Conn { 3197 uniqNameMu.Lock() 3198 defer uniqNameMu.Unlock() 3199 uniqNameNext[baseName]++ 3200 return &loggingConn{ 3201 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 3202 Conn: c, 3203 } 3204 } 3205 3206 func (c *loggingConn) Write(p []byte) (n int, err error) { 3207 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 3208 n, err = c.Conn.Write(p) 3209 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 3210 return 3211 } 3212 3213 func (c *loggingConn) Read(p []byte) (n int, err error) { 3214 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 3215 n, err = c.Conn.Read(p) 3216 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 3217 return 3218 } 3219 3220 func (c *loggingConn) Close() (err error) { 3221 log.Printf("%s.Close() = ...", c.name) 3222 err = c.Conn.Close() 3223 log.Printf("%s.Close() = %v", c.name, err) 3224 return 3225 } 3226 3227 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 3228 // It only contains one field (and a pointer field at that), so it 3229 // fits in an interface value without an extra allocation. 3230 type checkConnErrorWriter struct { 3231 c *conn 3232 } 3233 3234 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 3235 n, err = w.c.rwc.Write(p) 3236 if err != nil && w.c.werr == nil { 3237 w.c.werr = err 3238 w.c.cancelCtx() 3239 } 3240 return 3241 } 3242 3243 func numLeadingCRorLF(v []byte) (n int) { 3244 for _, b := range v { 3245 if b == '\r' || b == '\n' { 3246 n++ 3247 continue 3248 } 3249 break 3250 } 3251 return 3252 3253 } 3254 3255 func strSliceContains(ss []string, s string) bool { 3256 for _, v := range ss { 3257 if v == s { 3258 return true 3259 } 3260 } 3261 return false 3262 }