github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 7230 through 7235. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang_org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interface. A zero-byte write on a hijacked 44 // connection will return ErrHijacked without any other side 45 // effects. 46 ErrHijacked = errors.New("http: connection has been hijacked") 47 48 // ErrContentLength is returned by ResponseWriter.Write calls 49 // when a Handler set a Content-Length response header with a 50 // declared size and then attempted to write more bytes than 51 // declared. 52 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 53 54 // Deprecated: ErrWriteAfterFlush is no longer used. 55 ErrWriteAfterFlush = errors.New("unused") 56 ) 57 58 // A Handler responds to an HTTP request. 59 // 60 // ServeHTTP should write reply headers and data to the ResponseWriter 61 // and then return. Returning signals that the request is finished; it 62 // is not valid to use the ResponseWriter or read from the 63 // Request.Body after or concurrently with the completion of the 64 // ServeHTTP call. 65 // 66 // Depending on the HTTP client software, HTTP protocol version, and 67 // any intermediaries between the client and the Go server, it may not 68 // be possible to read from the Request.Body after writing to the 69 // ResponseWriter. Cautious handlers should read the Request.Body 70 // first, and then reply. 71 // 72 // Except for reading the body, handlers should not modify the 73 // provided Request. 74 // 75 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 76 // that the effect of the panic was isolated to the active request. 77 // It recovers the panic, logs a stack trace to the server error log, 78 // and either closes the network connection or sends an HTTP/2 79 // RST_STREAM, depending on the HTTP protocol. To abort a handler so 80 // the client sees an interrupted response but the server doesn't log 81 // an error, panic with the value ErrAbortHandler. 82 type Handler interface { 83 ServeHTTP(ResponseWriter, *Request) 84 } 85 86 // A ResponseWriter interface is used by an HTTP handler to 87 // construct an HTTP response. 88 // 89 // A ResponseWriter may not be used after the Handler.ServeHTTP method 90 // has returned. 91 type ResponseWriter interface { 92 // Header returns the header map that will be sent by 93 // WriteHeader. The Header map also is the mechanism with which 94 // Handlers can set HTTP trailers. 95 // 96 // Changing the header map after a call to WriteHeader (or 97 // Write) has no effect unless the modified headers are 98 // trailers. 99 // 100 // There are two ways to set Trailers. The preferred way is to 101 // predeclare in the headers which trailers you will later 102 // send by setting the "Trailer" header to the names of the 103 // trailer keys which will come later. In this case, those 104 // keys of the Header map are treated as if they were 105 // trailers. See the example. The second way, for trailer 106 // keys not known to the Handler until after the first Write, 107 // is to prefix the Header map keys with the TrailerPrefix 108 // constant value. See TrailerPrefix. 109 // 110 // To suppress implicit response headers (such as "Date"), set 111 // their value to nil. 112 Header() Header 113 114 // Write writes the data to the connection as part of an HTTP reply. 115 // 116 // If WriteHeader has not yet been called, Write calls 117 // WriteHeader(http.StatusOK) before writing the data. If the Header 118 // does not contain a Content-Type line, Write adds a Content-Type set 119 // to the result of passing the initial 512 bytes of written data to 120 // DetectContentType. 121 // 122 // Depending on the HTTP protocol version and the client, calling 123 // Write or WriteHeader may prevent future reads on the 124 // Request.Body. For HTTP/1.x requests, handlers should read any 125 // needed request body data before writing the response. Once the 126 // headers have been flushed (due to either an explicit Flusher.Flush 127 // call or writing enough data to trigger a flush), the request body 128 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 129 // handlers to continue to read the request body while concurrently 130 // writing the response. However, such behavior may not be supported 131 // by all HTTP/2 clients. Handlers should read before writing if 132 // possible to maximize compatibility. 133 Write([]byte) (int, error) 134 135 // WriteHeader sends an HTTP response header with the provided 136 // status code. 137 // 138 // If WriteHeader is not called explicitly, the first call to Write 139 // will trigger an implicit WriteHeader(http.StatusOK). 140 // Thus explicit calls to WriteHeader are mainly used to 141 // send error codes. 142 // 143 // The provided code must be a valid HTTP 1xx-5xx status code. 144 // Only one header may be written. Go does not currently 145 // support sending user-defined 1xx informational headers, 146 // with the exception of 100-continue response header that the 147 // Server sends automatically when the Request.Body is read. 148 WriteHeader(statusCode int) 149 } 150 151 // The Flusher interface is implemented by ResponseWriters that allow 152 // an HTTP handler to flush buffered data to the client. 153 // 154 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 155 // support Flusher, but ResponseWriter wrappers may not. Handlers 156 // should always test for this ability at runtime. 157 // 158 // Note that even for ResponseWriters that support Flush, 159 // if the client is connected through an HTTP proxy, 160 // the buffered data may not reach the client until the response 161 // completes. 162 type Flusher interface { 163 // Flush sends any buffered data to the client. 164 Flush() 165 } 166 167 // The Hijacker interface is implemented by ResponseWriters that allow 168 // an HTTP handler to take over the connection. 169 // 170 // The default ResponseWriter for HTTP/1.x connections supports 171 // Hijacker, but HTTP/2 connections intentionally do not. 172 // ResponseWriter wrappers may also not support Hijacker. Handlers 173 // should always test for this ability at runtime. 174 type Hijacker interface { 175 // Hijack lets the caller take over the connection. 176 // After a call to Hijack the HTTP server library 177 // will not do anything else with the connection. 178 // 179 // It becomes the caller's responsibility to manage 180 // and close the connection. 181 // 182 // The returned net.Conn may have read or write deadlines 183 // already set, depending on the configuration of the 184 // Server. It is the caller's responsibility to set 185 // or clear those deadlines as needed. 186 // 187 // The returned bufio.Reader may contain unprocessed buffered 188 // data from the client. 189 // 190 // After a call to Hijack, the original Request.Body must 191 // not be used. 192 Hijack() (net.Conn, *bufio.ReadWriter, error) 193 } 194 195 // The CloseNotifier interface is implemented by ResponseWriters which 196 // allow detecting when the underlying connection has gone away. 197 // 198 // This mechanism can be used to cancel long operations on the server 199 // if the client has disconnected before the response is ready. 200 type CloseNotifier interface { 201 // CloseNotify returns a channel that receives at most a 202 // single value (true) when the client connection has gone 203 // away. 204 // 205 // CloseNotify may wait to notify until Request.Body has been 206 // fully read. 207 // 208 // After the Handler has returned, there is no guarantee 209 // that the channel receives a value. 210 // 211 // If the protocol is HTTP/1.1 and CloseNotify is called while 212 // processing an idempotent request (such a GET) while 213 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 214 // pipelined request may cause a value to be sent on the 215 // returned channel. In practice HTTP/1.1 pipelining is not 216 // enabled in browsers and not seen often in the wild. If this 217 // is a problem, use HTTP/2 or only use CloseNotify on methods 218 // such as POST. 219 CloseNotify() <-chan bool 220 } 221 222 var ( 223 // ServerContextKey is a context key. It can be used in HTTP 224 // handlers with context.WithValue to access the server that 225 // started the handler. The associated value will be of 226 // type *Server. 227 ServerContextKey = &contextKey{"http-server"} 228 229 // LocalAddrContextKey is a context key. It can be used in 230 // HTTP handlers with context.WithValue to access the address 231 // the local address the connection arrived on. 232 // The associated value will be of type net.Addr. 233 LocalAddrContextKey = &contextKey{"local-addr"} 234 ) 235 236 // A conn represents the server side of an HTTP connection. 237 type conn struct { 238 // server is the server on which the connection arrived. 239 // Immutable; never nil. 240 server *Server 241 242 // cancelCtx cancels the connection-level context. 243 cancelCtx context.CancelFunc 244 245 // rwc is the underlying network connection. 246 // This is never wrapped by other types and is the value given out 247 // to CloseNotifier callers. It is usually of type *net.TCPConn or 248 // *tls.Conn. 249 rwc net.Conn 250 251 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 252 // inside the Listener's Accept goroutine, as some implementations block. 253 // It is populated immediately inside the (*conn).serve goroutine. 254 // This is the value of a Handler's (*Request).RemoteAddr. 255 remoteAddr string 256 257 // tlsState is the TLS connection state when using TLS. 258 // nil means not TLS. 259 tlsState *tls.ConnectionState 260 261 // werr is set to the first write error to rwc. 262 // It is set via checkConnErrorWriter{w}, where bufw writes. 263 werr error 264 265 // r is bufr's read source. It's a wrapper around rwc that provides 266 // io.LimitedReader-style limiting (while reading request headers) 267 // and functionality to support CloseNotifier. See *connReader docs. 268 r *connReader 269 270 // bufr reads from r. 271 bufr *bufio.Reader 272 273 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 274 bufw *bufio.Writer 275 276 // lastMethod is the method of the most recent request 277 // on this connection, if any. 278 lastMethod string 279 280 curReq atomic.Value // of *response (which has a Request in it) 281 282 curState atomic.Value // of ConnState 283 284 // mu guards hijackedv 285 mu sync.Mutex 286 287 // hijackedv is whether this connection has been hijacked 288 // by a Handler with the Hijacker interface. 289 // It is guarded by mu. 290 hijackedv bool 291 } 292 293 func (c *conn) hijacked() bool { 294 c.mu.Lock() 295 defer c.mu.Unlock() 296 return c.hijackedv 297 } 298 299 // c.mu must be held. 300 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 301 if c.hijackedv { 302 return nil, nil, ErrHijacked 303 } 304 c.r.abortPendingRead() 305 306 c.hijackedv = true 307 rwc = c.rwc 308 rwc.SetDeadline(time.Time{}) 309 310 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 311 if c.r.hasByte { 312 if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { 313 return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) 314 } 315 } 316 c.setState(rwc, StateHijacked) 317 return 318 } 319 320 // This should be >= 512 bytes for DetectContentType, 321 // but otherwise it's somewhat arbitrary. 322 const bufferBeforeChunkingSize = 2048 323 324 // chunkWriter writes to a response's conn buffer, and is the writer 325 // wrapped by the response.bufw buffered writer. 326 // 327 // chunkWriter also is responsible for finalizing the Header, including 328 // conditionally setting the Content-Type and setting a Content-Length 329 // in cases where the handler's final output is smaller than the buffer 330 // size. It also conditionally adds chunk headers, when in chunking mode. 331 // 332 // See the comment above (*response).Write for the entire write flow. 333 type chunkWriter struct { 334 res *response 335 336 // header is either nil or a deep clone of res.handlerHeader 337 // at the time of res.WriteHeader, if res.WriteHeader is 338 // called and extra buffering is being done to calculate 339 // Content-Type and/or Content-Length. 340 header Header 341 342 // wroteHeader tells whether the header's been written to "the 343 // wire" (or rather: w.conn.buf). this is unlike 344 // (*response).wroteHeader, which tells only whether it was 345 // logically written. 346 wroteHeader bool 347 348 // set by the writeHeader method: 349 chunking bool // using chunked transfer encoding for reply body 350 } 351 352 var ( 353 crlf = []byte("\r\n") 354 colonSpace = []byte(": ") 355 ) 356 357 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 358 if !cw.wroteHeader { 359 cw.writeHeader(p) 360 } 361 if cw.res.req.Method == "HEAD" { 362 // Eat writes. 363 return len(p), nil 364 } 365 if cw.chunking { 366 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 367 if err != nil { 368 cw.res.conn.rwc.Close() 369 return 370 } 371 } 372 n, err = cw.res.conn.bufw.Write(p) 373 if cw.chunking && err == nil { 374 _, err = cw.res.conn.bufw.Write(crlf) 375 } 376 if err != nil { 377 cw.res.conn.rwc.Close() 378 } 379 return 380 } 381 382 func (cw *chunkWriter) flush() { 383 if !cw.wroteHeader { 384 cw.writeHeader(nil) 385 } 386 cw.res.conn.bufw.Flush() 387 } 388 389 func (cw *chunkWriter) close() { 390 if !cw.wroteHeader { 391 cw.writeHeader(nil) 392 } 393 if cw.chunking { 394 bw := cw.res.conn.bufw // conn's bufio writer 395 // zero chunk to mark EOF 396 bw.WriteString("0\r\n") 397 if trailers := cw.res.finalTrailers(); trailers != nil { 398 trailers.Write(bw) // the writer handles noting errors 399 } 400 // final blank line after the trailers (whether 401 // present or not) 402 bw.WriteString("\r\n") 403 } 404 } 405 406 // A response represents the server side of an HTTP response. 407 type response struct { 408 conn *conn 409 req *Request // request for this response 410 reqBody io.ReadCloser 411 cancelCtx context.CancelFunc // when ServeHTTP exits 412 wroteHeader bool // reply header has been (logically) written 413 wroteContinue bool // 100 Continue response was written 414 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 415 wantsClose bool // HTTP request has Connection "close" 416 417 w *bufio.Writer // buffers output in chunks to chunkWriter 418 cw chunkWriter 419 420 // handlerHeader is the Header that Handlers get access to, 421 // which may be retained and mutated even after WriteHeader. 422 // handlerHeader is copied into cw.header at WriteHeader 423 // time, and privately mutated thereafter. 424 handlerHeader Header 425 calledHeader bool // handler accessed handlerHeader via Header 426 427 written int64 // number of bytes written in body 428 contentLength int64 // explicitly-declared Content-Length; or -1 429 status int // status code passed to WriteHeader 430 431 // close connection after this reply. set on request and 432 // updated after response from handler if there's a 433 // "Connection: keep-alive" response header and a 434 // Content-Length. 435 closeAfterReply bool 436 437 // requestBodyLimitHit is set by requestTooLarge when 438 // maxBytesReader hits its max size. It is checked in 439 // WriteHeader, to make sure we don't consume the 440 // remaining request body to try to advance to the next HTTP 441 // request. Instead, when this is set, we stop reading 442 // subsequent requests on this connection and stop reading 443 // input from it. 444 requestBodyLimitHit bool 445 446 // trailers are the headers to be sent after the handler 447 // finishes writing the body. This field is initialized from 448 // the Trailer response header when the response header is 449 // written. 450 trailers []string 451 452 handlerDone atomicBool // set true when the handler exits 453 454 // Buffers for Date, Content-Length, and status code 455 dateBuf [len(TimeFormat)]byte 456 clenBuf [10]byte 457 statusBuf [3]byte 458 459 // closeNotifyCh is the channel returned by CloseNotify. 460 // TODO(bradfitz): this is currently (for Go 1.8) always 461 // non-nil. Make this lazily-created again as it used to be? 462 closeNotifyCh chan bool 463 didCloseNotify int32 // atomic (only 0->1 winner should send) 464 } 465 466 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys 467 // that, if present, signals that the map entry is actually for 468 // the response trailers, and not the response headers. The prefix 469 // is stripped after the ServeHTTP call finishes and the values are 470 // sent in the trailers. 471 // 472 // This mechanism is intended only for trailers that are not known 473 // prior to the headers being written. If the set of trailers is fixed 474 // or known before the header is written, the normal Go trailers mechanism 475 // is preferred: 476 // https://golang.org/pkg/net/http/#ResponseWriter 477 // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 478 const TrailerPrefix = "Trailer:" 479 480 // finalTrailers is called after the Handler exits and returns a non-nil 481 // value if the Handler set any trailers. 482 func (w *response) finalTrailers() Header { 483 var t Header 484 for k, vv := range w.handlerHeader { 485 if strings.HasPrefix(k, TrailerPrefix) { 486 if t == nil { 487 t = make(Header) 488 } 489 t[strings.TrimPrefix(k, TrailerPrefix)] = vv 490 } 491 } 492 for _, k := range w.trailers { 493 if t == nil { 494 t = make(Header) 495 } 496 for _, v := range w.handlerHeader[k] { 497 t.Add(k, v) 498 } 499 } 500 return t 501 } 502 503 type atomicBool int32 504 505 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 506 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 507 508 // declareTrailer is called for each Trailer header when the 509 // response header is written. It notes that a header will need to be 510 // written in the trailers at the end of the response. 511 func (w *response) declareTrailer(k string) { 512 k = CanonicalHeaderKey(k) 513 switch k { 514 case "Transfer-Encoding", "Content-Length", "Trailer": 515 // Forbidden by RFC 2616 14.40. 516 // TODO: inconsistent with RFC 7230, section 4.1.2 517 return 518 } 519 w.trailers = append(w.trailers, k) 520 } 521 522 // requestTooLarge is called by maxBytesReader when too much input has 523 // been read from the client. 524 func (w *response) requestTooLarge() { 525 w.closeAfterReply = true 526 w.requestBodyLimitHit = true 527 if !w.wroteHeader { 528 w.Header().Set("Connection", "close") 529 } 530 } 531 532 // needsSniff reports whether a Content-Type still needs to be sniffed. 533 func (w *response) needsSniff() bool { 534 _, haveType := w.handlerHeader["Content-Type"] 535 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 536 } 537 538 // writerOnly hides an io.Writer value's optional ReadFrom method 539 // from io.Copy. 540 type writerOnly struct { 541 io.Writer 542 } 543 544 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 545 switch v := src.(type) { 546 case *os.File: 547 fi, err := v.Stat() 548 if err != nil { 549 return false, err 550 } 551 return fi.Mode().IsRegular(), nil 552 case *io.LimitedReader: 553 return srcIsRegularFile(v.R) 554 default: 555 return 556 } 557 } 558 559 // ReadFrom is here to optimize copying from an *os.File regular file 560 // to a *net.TCPConn with sendfile. 561 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 562 // Our underlying w.conn.rwc is usually a *TCPConn (with its 563 // own ReadFrom method). If not, or if our src isn't a regular 564 // file, just fall back to the normal copy method. 565 rf, ok := w.conn.rwc.(io.ReaderFrom) 566 regFile, err := srcIsRegularFile(src) 567 if err != nil { 568 return 0, err 569 } 570 if !ok || !regFile { 571 bufp := copyBufPool.Get().(*[]byte) 572 defer copyBufPool.Put(bufp) 573 return io.CopyBuffer(writerOnly{w}, src, *bufp) 574 } 575 576 // sendfile path: 577 578 if !w.wroteHeader { 579 w.WriteHeader(StatusOK) 580 } 581 582 if w.needsSniff() { 583 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 584 n += n0 585 if err != nil { 586 return n, err 587 } 588 } 589 590 w.w.Flush() // get rid of any previous writes 591 w.cw.flush() // make sure Header is written; flush data to rwc 592 593 // Now that cw has been flushed, its chunking field is guaranteed initialized. 594 if !w.cw.chunking && w.bodyAllowed() { 595 n0, err := rf.ReadFrom(src) 596 n += n0 597 w.written += n0 598 return n, err 599 } 600 601 n0, err := io.Copy(writerOnly{w}, src) 602 n += n0 603 return n, err 604 } 605 606 // debugServerConnections controls whether all server connections are wrapped 607 // with a verbose logging wrapper. 608 const debugServerConnections = false 609 610 // Create new connection from rwc. 611 func (srv *Server) newConn(rwc net.Conn) *conn { 612 c := &conn{ 613 server: srv, 614 rwc: rwc, 615 } 616 if debugServerConnections { 617 c.rwc = newLoggingConn("server", c.rwc) 618 } 619 return c 620 } 621 622 type readResult struct { 623 n int 624 err error 625 b byte // byte read, if n == 1 626 } 627 628 // connReader is the io.Reader wrapper used by *conn. It combines a 629 // selectively-activated io.LimitedReader (to bound request header 630 // read sizes) with support for selectively keeping an io.Reader.Read 631 // call blocked in a background goroutine to wait for activity and 632 // trigger a CloseNotifier channel. 633 type connReader struct { 634 conn *conn 635 636 mu sync.Mutex // guards following 637 hasByte bool 638 byteBuf [1]byte 639 cond *sync.Cond 640 inRead bool 641 aborted bool // set true before conn.rwc deadline is set to past 642 remain int64 // bytes remaining 643 } 644 645 func (cr *connReader) lock() { 646 cr.mu.Lock() 647 if cr.cond == nil { 648 cr.cond = sync.NewCond(&cr.mu) 649 } 650 } 651 652 func (cr *connReader) unlock() { cr.mu.Unlock() } 653 654 func (cr *connReader) startBackgroundRead() { 655 cr.lock() 656 defer cr.unlock() 657 if cr.inRead { 658 panic("invalid concurrent Body.Read call") 659 } 660 if cr.hasByte { 661 return 662 } 663 cr.inRead = true 664 cr.conn.rwc.SetReadDeadline(time.Time{}) 665 go cr.backgroundRead() 666 } 667 668 func (cr *connReader) backgroundRead() { 669 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 670 cr.lock() 671 if n == 1 { 672 cr.hasByte = true 673 // We were at EOF already (since we wouldn't be in a 674 // background read otherwise), so this is a pipelined 675 // HTTP request. 676 cr.closeNotifyFromPipelinedRequest() 677 } 678 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 679 // Ignore this error. It's the expected error from 680 // another goroutine calling abortPendingRead. 681 } else if err != nil { 682 cr.handleReadError(err) 683 } 684 cr.aborted = false 685 cr.inRead = false 686 cr.unlock() 687 cr.cond.Broadcast() 688 } 689 690 func (cr *connReader) abortPendingRead() { 691 cr.lock() 692 defer cr.unlock() 693 if !cr.inRead { 694 return 695 } 696 cr.aborted = true 697 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 698 for cr.inRead { 699 cr.cond.Wait() 700 } 701 cr.conn.rwc.SetReadDeadline(time.Time{}) 702 } 703 704 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 705 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 706 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 707 708 // may be called from multiple goroutines. 709 func (cr *connReader) handleReadError(err error) { 710 cr.conn.cancelCtx() 711 cr.closeNotify() 712 } 713 714 // closeNotifyFromPipelinedRequest simply calls closeNotify. 715 // 716 // This method wrapper is here for documentation. The callers are the 717 // cases where we send on the closenotify channel because of a 718 // pipelined HTTP request, per the previous Go behavior and 719 // documentation (that this "MAY" happen). 720 // 721 // TODO: consider changing this behavior and making context 722 // cancelation and closenotify work the same. 723 func (cr *connReader) closeNotifyFromPipelinedRequest() { 724 cr.closeNotify() 725 } 726 727 // may be called from multiple goroutines. 728 func (cr *connReader) closeNotify() { 729 res, _ := cr.conn.curReq.Load().(*response) 730 if res != nil { 731 if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 732 res.closeNotifyCh <- true 733 } 734 } 735 } 736 737 func (cr *connReader) Read(p []byte) (n int, err error) { 738 cr.lock() 739 if cr.inRead { 740 cr.unlock() 741 if cr.conn.hijacked() { 742 panic("invalid Body.Read call. After hijacked, the original Request must not be used") 743 } 744 panic("invalid concurrent Body.Read call") 745 } 746 if cr.hitReadLimit() { 747 cr.unlock() 748 return 0, io.EOF 749 } 750 if len(p) == 0 { 751 cr.unlock() 752 return 0, nil 753 } 754 if int64(len(p)) > cr.remain { 755 p = p[:cr.remain] 756 } 757 if cr.hasByte { 758 p[0] = cr.byteBuf[0] 759 cr.hasByte = false 760 cr.unlock() 761 return 1, nil 762 } 763 cr.inRead = true 764 cr.unlock() 765 n, err = cr.conn.rwc.Read(p) 766 767 cr.lock() 768 cr.inRead = false 769 if err != nil { 770 cr.handleReadError(err) 771 } 772 cr.remain -= int64(n) 773 cr.unlock() 774 775 cr.cond.Broadcast() 776 return n, err 777 } 778 779 var ( 780 bufioReaderPool sync.Pool 781 bufioWriter2kPool sync.Pool 782 bufioWriter4kPool sync.Pool 783 ) 784 785 var copyBufPool = sync.Pool{ 786 New: func() interface{} { 787 b := make([]byte, 32*1024) 788 return &b 789 }, 790 } 791 792 func bufioWriterPool(size int) *sync.Pool { 793 switch size { 794 case 2 << 10: 795 return &bufioWriter2kPool 796 case 4 << 10: 797 return &bufioWriter4kPool 798 } 799 return nil 800 } 801 802 func newBufioReader(r io.Reader) *bufio.Reader { 803 if v := bufioReaderPool.Get(); v != nil { 804 br := v.(*bufio.Reader) 805 br.Reset(r) 806 return br 807 } 808 // Note: if this reader size is ever changed, update 809 // TestHandlerBodyClose's assumptions. 810 return bufio.NewReader(r) 811 } 812 813 func putBufioReader(br *bufio.Reader) { 814 br.Reset(nil) 815 bufioReaderPool.Put(br) 816 } 817 818 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 819 pool := bufioWriterPool(size) 820 if pool != nil { 821 if v := pool.Get(); v != nil { 822 bw := v.(*bufio.Writer) 823 bw.Reset(w) 824 return bw 825 } 826 } 827 return bufio.NewWriterSize(w, size) 828 } 829 830 func putBufioWriter(bw *bufio.Writer) { 831 bw.Reset(nil) 832 if pool := bufioWriterPool(bw.Available()); pool != nil { 833 pool.Put(bw) 834 } 835 } 836 837 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 838 // in an HTTP request. 839 // This can be overridden by setting Server.MaxHeaderBytes. 840 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 841 842 func (srv *Server) maxHeaderBytes() int { 843 if srv.MaxHeaderBytes > 0 { 844 return srv.MaxHeaderBytes 845 } 846 return DefaultMaxHeaderBytes 847 } 848 849 func (srv *Server) initialReadLimitSize() int64 { 850 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 851 } 852 853 // wrapper around io.ReadCloser which on first read, sends an 854 // HTTP/1.1 100 Continue header 855 type expectContinueReader struct { 856 resp *response 857 readCloser io.ReadCloser 858 closed bool 859 sawEOF bool 860 } 861 862 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 863 if ecr.closed { 864 return 0, ErrBodyReadAfterClose 865 } 866 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 867 ecr.resp.wroteContinue = true 868 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 869 ecr.resp.conn.bufw.Flush() 870 } 871 n, err = ecr.readCloser.Read(p) 872 if err == io.EOF { 873 ecr.sawEOF = true 874 } 875 return 876 } 877 878 func (ecr *expectContinueReader) Close() error { 879 ecr.closed = true 880 return ecr.readCloser.Close() 881 } 882 883 // TimeFormat is the time format to use when generating times in HTTP 884 // headers. It is like time.RFC1123 but hard-codes GMT as the time 885 // zone. The time being formatted must be in UTC for Format to 886 // generate the correct format. 887 // 888 // For parsing this time format, see ParseTime. 889 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 890 891 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 892 func appendTime(b []byte, t time.Time) []byte { 893 const days = "SunMonTueWedThuFriSat" 894 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 895 896 t = t.UTC() 897 yy, mm, dd := t.Date() 898 hh, mn, ss := t.Clock() 899 day := days[3*t.Weekday():] 900 mon := months[3*(mm-1):] 901 902 return append(b, 903 day[0], day[1], day[2], ',', ' ', 904 byte('0'+dd/10), byte('0'+dd%10), ' ', 905 mon[0], mon[1], mon[2], ' ', 906 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 907 byte('0'+hh/10), byte('0'+hh%10), ':', 908 byte('0'+mn/10), byte('0'+mn%10), ':', 909 byte('0'+ss/10), byte('0'+ss%10), ' ', 910 'G', 'M', 'T') 911 } 912 913 var errTooLarge = errors.New("http: request too large") 914 915 // Read next request from connection. 916 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 917 if c.hijacked() { 918 return nil, ErrHijacked 919 } 920 921 var ( 922 wholeReqDeadline time.Time // or zero if none 923 hdrDeadline time.Time // or zero if none 924 ) 925 t0 := time.Now() 926 if d := c.server.readHeaderTimeout(); d != 0 { 927 hdrDeadline = t0.Add(d) 928 } 929 if d := c.server.ReadTimeout; d != 0 { 930 wholeReqDeadline = t0.Add(d) 931 } 932 c.rwc.SetReadDeadline(hdrDeadline) 933 if d := c.server.WriteTimeout; d != 0 { 934 defer func() { 935 c.rwc.SetWriteDeadline(time.Now().Add(d)) 936 }() 937 } 938 939 c.r.setReadLimit(c.server.initialReadLimitSize()) 940 if c.lastMethod == "POST" { 941 // RFC 7230 section 3 tolerance for old buggy clients. 942 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 943 c.bufr.Discard(numLeadingCRorLF(peek)) 944 } 945 req, err := readRequest(c.bufr, keepHostHeader) 946 if err != nil { 947 if c.r.hitReadLimit() { 948 return nil, errTooLarge 949 } 950 return nil, err 951 } 952 953 if !http1ServerSupportsRequest(req) { 954 return nil, badRequestError("unsupported protocol version") 955 } 956 957 c.lastMethod = req.Method 958 c.r.setInfiniteReadLimit() 959 960 hosts, haveHost := req.Header["Host"] 961 isH2Upgrade := req.isH2Upgrade() 962 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { 963 return nil, badRequestError("missing required Host header") 964 } 965 if len(hosts) > 1 { 966 return nil, badRequestError("too many Host headers") 967 } 968 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 969 return nil, badRequestError("malformed Host header") 970 } 971 for k, vv := range req.Header { 972 if !httplex.ValidHeaderFieldName(k) { 973 return nil, badRequestError("invalid header name") 974 } 975 for _, v := range vv { 976 if !httplex.ValidHeaderFieldValue(v) { 977 return nil, badRequestError("invalid header value") 978 } 979 } 980 } 981 delete(req.Header, "Host") 982 983 ctx, cancelCtx := context.WithCancel(ctx) 984 req.ctx = ctx 985 req.RemoteAddr = c.remoteAddr 986 req.TLS = c.tlsState 987 if body, ok := req.Body.(*body); ok { 988 body.doEarlyClose = true 989 } 990 991 // Adjust the read deadline if necessary. 992 if !hdrDeadline.Equal(wholeReqDeadline) { 993 c.rwc.SetReadDeadline(wholeReqDeadline) 994 } 995 996 w = &response{ 997 conn: c, 998 cancelCtx: cancelCtx, 999 req: req, 1000 reqBody: req.Body, 1001 handlerHeader: make(Header), 1002 contentLength: -1, 1003 closeNotifyCh: make(chan bool, 1), 1004 1005 // We populate these ahead of time so we're not 1006 // reading from req.Header after their Handler starts 1007 // and maybe mutates it (Issue 14940) 1008 wants10KeepAlive: req.wantsHttp10KeepAlive(), 1009 wantsClose: req.wantsClose(), 1010 } 1011 if isH2Upgrade { 1012 w.closeAfterReply = true 1013 } 1014 w.cw.res = w 1015 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 1016 return w, nil 1017 } 1018 1019 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 1020 // supports the given request. 1021 func http1ServerSupportsRequest(req *Request) bool { 1022 if req.ProtoMajor == 1 { 1023 return true 1024 } 1025 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 1026 // wire up their own HTTP/2 upgrades. 1027 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 1028 req.Method == "PRI" && req.RequestURI == "*" { 1029 return true 1030 } 1031 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 1032 // aren't encoded in ASCII anyway). 1033 return false 1034 } 1035 1036 func (w *response) Header() Header { 1037 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 1038 // Accessing the header between logically writing it 1039 // and physically writing it means we need to allocate 1040 // a clone to snapshot the logically written state. 1041 w.cw.header = w.handlerHeader.clone() 1042 } 1043 w.calledHeader = true 1044 return w.handlerHeader 1045 } 1046 1047 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 1048 // consumed by a handler that the server will read from the client 1049 // in order to keep a connection alive. If there are more bytes than 1050 // this then the server to be paranoid instead sends a "Connection: 1051 // close" response. 1052 // 1053 // This number is approximately what a typical machine's TCP buffer 1054 // size is anyway. (if we have the bytes on the machine, we might as 1055 // well read them) 1056 const maxPostHandlerReadBytes = 256 << 10 1057 1058 func checkWriteHeaderCode(code int) { 1059 // Issue 22880: require valid WriteHeader status codes. 1060 // For now we only enforce that it's three digits. 1061 // In the future we might block things over 599 (600 and above aren't defined 1062 // at http://httpwg.org/specs/rfc7231.html#status.codes) 1063 // and we might block under 200 (once we have more mature 1xx support). 1064 // But for now any three digits. 1065 // 1066 // We used to send "HTTP/1.1 000 0" on the wire in responses but there's 1067 // no equivalent bogus thing we can realistically send in HTTP/2, 1068 // so we'll consistently panic instead and help people find their bugs 1069 // early. (We can't return an error from WriteHeader even if we wanted to.) 1070 if code < 100 || code > 999 { 1071 panic(fmt.Sprintf("invalid WriteHeader code %v", code)) 1072 } 1073 } 1074 1075 func (w *response) WriteHeader(code int) { 1076 if w.conn.hijacked() { 1077 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 1078 return 1079 } 1080 if w.wroteHeader { 1081 w.conn.server.logf("http: multiple response.WriteHeader calls") 1082 return 1083 } 1084 checkWriteHeaderCode(code) 1085 w.wroteHeader = true 1086 w.status = code 1087 1088 if w.calledHeader && w.cw.header == nil { 1089 w.cw.header = w.handlerHeader.clone() 1090 } 1091 1092 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1093 v, err := strconv.ParseInt(cl, 10, 64) 1094 if err == nil && v >= 0 { 1095 w.contentLength = v 1096 } else { 1097 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1098 w.handlerHeader.Del("Content-Length") 1099 } 1100 } 1101 } 1102 1103 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1104 // This type is used to avoid extra allocations from cloning and/or populating 1105 // the response Header map and all its 1-element slices. 1106 type extraHeader struct { 1107 contentType string 1108 connection string 1109 transferEncoding string 1110 date []byte // written if not nil 1111 contentLength []byte // written if not nil 1112 } 1113 1114 // Sorted the same as extraHeader.Write's loop. 1115 var extraHeaderKeys = [][]byte{ 1116 []byte("Content-Type"), 1117 []byte("Connection"), 1118 []byte("Transfer-Encoding"), 1119 } 1120 1121 var ( 1122 headerContentLength = []byte("Content-Length: ") 1123 headerDate = []byte("Date: ") 1124 ) 1125 1126 // Write writes the headers described in h to w. 1127 // 1128 // This method has a value receiver, despite the somewhat large size 1129 // of h, because it prevents an allocation. The escape analysis isn't 1130 // smart enough to realize this function doesn't mutate h. 1131 func (h extraHeader) Write(w *bufio.Writer) { 1132 if h.date != nil { 1133 w.Write(headerDate) 1134 w.Write(h.date) 1135 w.Write(crlf) 1136 } 1137 if h.contentLength != nil { 1138 w.Write(headerContentLength) 1139 w.Write(h.contentLength) 1140 w.Write(crlf) 1141 } 1142 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1143 if v != "" { 1144 w.Write(extraHeaderKeys[i]) 1145 w.Write(colonSpace) 1146 w.WriteString(v) 1147 w.Write(crlf) 1148 } 1149 } 1150 } 1151 1152 // writeHeader finalizes the header sent to the client and writes it 1153 // to cw.res.conn.bufw. 1154 // 1155 // p is not written by writeHeader, but is the first chunk of the body 1156 // that will be written. It is sniffed for a Content-Type if none is 1157 // set explicitly. It's also used to set the Content-Length, if the 1158 // total body size was small and the handler has already finished 1159 // running. 1160 func (cw *chunkWriter) writeHeader(p []byte) { 1161 if cw.wroteHeader { 1162 return 1163 } 1164 cw.wroteHeader = true 1165 1166 w := cw.res 1167 keepAlivesEnabled := w.conn.server.doKeepAlives() 1168 isHEAD := w.req.Method == "HEAD" 1169 1170 // header is written out to w.conn.buf below. Depending on the 1171 // state of the handler, we either own the map or not. If we 1172 // don't own it, the exclude map is created lazily for 1173 // WriteSubset to remove headers. The setHeader struct holds 1174 // headers we need to add. 1175 header := cw.header 1176 owned := header != nil 1177 if !owned { 1178 header = w.handlerHeader 1179 } 1180 var excludeHeader map[string]bool 1181 delHeader := func(key string) { 1182 if owned { 1183 header.Del(key) 1184 return 1185 } 1186 if _, ok := header[key]; !ok { 1187 return 1188 } 1189 if excludeHeader == nil { 1190 excludeHeader = make(map[string]bool) 1191 } 1192 excludeHeader[key] = true 1193 } 1194 var setHeader extraHeader 1195 1196 // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. 1197 trailers := false 1198 for k := range cw.header { 1199 if strings.HasPrefix(k, TrailerPrefix) { 1200 if excludeHeader == nil { 1201 excludeHeader = make(map[string]bool) 1202 } 1203 excludeHeader[k] = true 1204 trailers = true 1205 } 1206 } 1207 for _, v := range cw.header["Trailer"] { 1208 trailers = true 1209 foreachHeaderElement(v, cw.res.declareTrailer) 1210 } 1211 1212 te := header.get("Transfer-Encoding") 1213 hasTE := te != "" 1214 1215 // If the handler is done but never sent a Content-Length 1216 // response header and this is our first (and last) write, set 1217 // it, even to zero. This helps HTTP/1.0 clients keep their 1218 // "keep-alive" connections alive. 1219 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1220 // it was a HEAD request, we don't know the difference between 1221 // 0 actual bytes and 0 bytes because the handler noticed it 1222 // was a HEAD request and chose not to write anything. So for 1223 // HEAD, the handler should either write the Content-Length or 1224 // write non-zero bytes. If it's actually 0 bytes and the 1225 // handler never looked at the Request.Method, we just don't 1226 // send a Content-Length header. 1227 // Further, we don't send an automatic Content-Length if they 1228 // set a Transfer-Encoding, because they're generally incompatible. 1229 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1230 w.contentLength = int64(len(p)) 1231 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1232 } 1233 1234 // If this was an HTTP/1.0 request with keep-alive and we sent a 1235 // Content-Length back, we can make this a keep-alive response ... 1236 if w.wants10KeepAlive && keepAlivesEnabled { 1237 sentLength := header.get("Content-Length") != "" 1238 if sentLength && header.get("Connection") == "keep-alive" { 1239 w.closeAfterReply = false 1240 } 1241 } 1242 1243 // Check for an explicit (and valid) Content-Length header. 1244 hasCL := w.contentLength != -1 1245 1246 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1247 _, connectionHeaderSet := header["Connection"] 1248 if !connectionHeaderSet { 1249 setHeader.connection = "keep-alive" 1250 } 1251 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1252 w.closeAfterReply = true 1253 } 1254 1255 if header.get("Connection") == "close" || !keepAlivesEnabled { 1256 w.closeAfterReply = true 1257 } 1258 1259 // If the client wanted a 100-continue but we never sent it to 1260 // them (or, more strictly: we never finished reading their 1261 // request body), don't reuse this connection because it's now 1262 // in an unknown state: we might be sending this response at 1263 // the same time the client is now sending its request body 1264 // after a timeout. (Some HTTP clients send Expect: 1265 // 100-continue but knowing that some servers don't support 1266 // it, the clients set a timer and send the body later anyway) 1267 // If we haven't seen EOF, we can't skip over the unread body 1268 // because we don't know if the next bytes on the wire will be 1269 // the body-following-the-timer or the subsequent request. 1270 // See Issue 11549. 1271 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1272 w.closeAfterReply = true 1273 } 1274 1275 // Per RFC 2616, we should consume the request body before 1276 // replying, if the handler hasn't already done so. But we 1277 // don't want to do an unbounded amount of reading here for 1278 // DoS reasons, so we only try up to a threshold. 1279 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1280 // about HTTP/1.x Handlers concurrently reading and writing, like 1281 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1282 if w.req.ContentLength != 0 && !w.closeAfterReply { 1283 var discard, tooBig bool 1284 1285 switch bdy := w.req.Body.(type) { 1286 case *expectContinueReader: 1287 if bdy.resp.wroteContinue { 1288 discard = true 1289 } 1290 case *body: 1291 bdy.mu.Lock() 1292 switch { 1293 case bdy.closed: 1294 if !bdy.sawEOF { 1295 // Body was closed in handler with non-EOF error. 1296 w.closeAfterReply = true 1297 } 1298 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1299 tooBig = true 1300 default: 1301 discard = true 1302 } 1303 bdy.mu.Unlock() 1304 default: 1305 discard = true 1306 } 1307 1308 if discard { 1309 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1310 switch err { 1311 case nil: 1312 // There must be even more data left over. 1313 tooBig = true 1314 case ErrBodyReadAfterClose: 1315 // Body was already consumed and closed. 1316 case io.EOF: 1317 // The remaining body was just consumed, close it. 1318 err = w.reqBody.Close() 1319 if err != nil { 1320 w.closeAfterReply = true 1321 } 1322 default: 1323 // Some other kind of error occurred, like a read timeout, or 1324 // corrupt chunked encoding. In any case, whatever remains 1325 // on the wire must not be parsed as another HTTP request. 1326 w.closeAfterReply = true 1327 } 1328 } 1329 1330 if tooBig { 1331 w.requestTooLarge() 1332 delHeader("Connection") 1333 setHeader.connection = "close" 1334 } 1335 } 1336 1337 code := w.status 1338 if bodyAllowedForStatus(code) { 1339 // If no content type, apply sniffing algorithm to body. 1340 _, haveType := header["Content-Type"] 1341 if !haveType && !hasTE && len(p) > 0 { 1342 setHeader.contentType = DetectContentType(p) 1343 } 1344 } else { 1345 for _, k := range suppressedHeaders(code) { 1346 delHeader(k) 1347 } 1348 } 1349 1350 if _, ok := header["Date"]; !ok { 1351 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1352 } 1353 1354 if hasCL && hasTE && te != "identity" { 1355 // TODO: return an error if WriteHeader gets a return parameter 1356 // For now just ignore the Content-Length. 1357 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1358 te, w.contentLength) 1359 delHeader("Content-Length") 1360 hasCL = false 1361 } 1362 1363 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1364 // do nothing 1365 } else if code == StatusNoContent { 1366 delHeader("Transfer-Encoding") 1367 } else if hasCL { 1368 delHeader("Transfer-Encoding") 1369 } else if w.req.ProtoAtLeast(1, 1) { 1370 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1371 // content-length has been provided. The connection must be closed after the 1372 // reply is written, and no chunking is to be done. This is the setup 1373 // recommended in the Server-Sent Events candidate recommendation 11, 1374 // section 8. 1375 if hasTE && te == "identity" { 1376 cw.chunking = false 1377 w.closeAfterReply = true 1378 } else { 1379 // HTTP/1.1 or greater: use chunked transfer encoding 1380 // to avoid closing the connection at EOF. 1381 cw.chunking = true 1382 setHeader.transferEncoding = "chunked" 1383 if hasTE && te == "chunked" { 1384 // We will send the chunked Transfer-Encoding header later. 1385 delHeader("Transfer-Encoding") 1386 } 1387 } 1388 } else { 1389 // HTTP version < 1.1: cannot do chunked transfer 1390 // encoding and we don't know the Content-Length so 1391 // signal EOF by closing connection. 1392 w.closeAfterReply = true 1393 delHeader("Transfer-Encoding") // in case already set 1394 } 1395 1396 // Cannot use Content-Length with non-identity Transfer-Encoding. 1397 if cw.chunking { 1398 delHeader("Content-Length") 1399 } 1400 if !w.req.ProtoAtLeast(1, 0) { 1401 return 1402 } 1403 1404 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1405 delHeader("Connection") 1406 if w.req.ProtoAtLeast(1, 1) { 1407 setHeader.connection = "close" 1408 } 1409 } 1410 1411 writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) 1412 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1413 setHeader.Write(w.conn.bufw) 1414 w.conn.bufw.Write(crlf) 1415 } 1416 1417 // foreachHeaderElement splits v according to the "#rule" construction 1418 // in RFC 7230 section 7 and calls fn for each non-empty element. 1419 func foreachHeaderElement(v string, fn func(string)) { 1420 v = textproto.TrimString(v) 1421 if v == "" { 1422 return 1423 } 1424 if !strings.Contains(v, ",") { 1425 fn(v) 1426 return 1427 } 1428 for _, f := range strings.Split(v, ",") { 1429 if f = textproto.TrimString(f); f != "" { 1430 fn(f) 1431 } 1432 } 1433 } 1434 1435 // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2) 1436 // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. 1437 // code is the response status code. 1438 // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. 1439 func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { 1440 if is11 { 1441 bw.WriteString("HTTP/1.1 ") 1442 } else { 1443 bw.WriteString("HTTP/1.0 ") 1444 } 1445 if text, ok := statusText[code]; ok { 1446 bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) 1447 bw.WriteByte(' ') 1448 bw.WriteString(text) 1449 bw.WriteString("\r\n") 1450 } else { 1451 // don't worry about performance 1452 fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) 1453 } 1454 } 1455 1456 // bodyAllowed reports whether a Write is allowed for this response type. 1457 // It's illegal to call this before the header has been flushed. 1458 func (w *response) bodyAllowed() bool { 1459 if !w.wroteHeader { 1460 panic("") 1461 } 1462 return bodyAllowedForStatus(w.status) 1463 } 1464 1465 // The Life Of A Write is like this: 1466 // 1467 // Handler starts. No header has been sent. The handler can either 1468 // write a header, or just start writing. Writing before sending a header 1469 // sends an implicitly empty 200 OK header. 1470 // 1471 // If the handler didn't declare a Content-Length up front, we either 1472 // go into chunking mode or, if the handler finishes running before 1473 // the chunking buffer size, we compute a Content-Length and send that 1474 // in the header instead. 1475 // 1476 // Likewise, if the handler didn't set a Content-Type, we sniff that 1477 // from the initial chunk of output. 1478 // 1479 // The Writers are wired together like: 1480 // 1481 // 1. *response (the ResponseWriter) -> 1482 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1483 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1484 // and which writes the chunk headers, if needed. 1485 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1486 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1487 // and populates c.werr with it if so. but otherwise writes to: 1488 // 6. the rwc, the net.Conn. 1489 // 1490 // TODO(bradfitz): short-circuit some of the buffering when the 1491 // initial header contains both a Content-Type and Content-Length. 1492 // Also short-circuit in (1) when the header's been sent and not in 1493 // chunking mode, writing directly to (4) instead, if (2) has no 1494 // buffered data. More generally, we could short-circuit from (1) to 1495 // (3) even in chunking mode if the write size from (1) is over some 1496 // threshold and nothing is in (2). The answer might be mostly making 1497 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1498 // with this instead. 1499 func (w *response) Write(data []byte) (n int, err error) { 1500 return w.write(len(data), data, "") 1501 } 1502 1503 func (w *response) WriteString(data string) (n int, err error) { 1504 return w.write(len(data), nil, data) 1505 } 1506 1507 // either dataB or dataS is non-zero. 1508 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1509 if w.conn.hijacked() { 1510 if lenData > 0 { 1511 w.conn.server.logf("http: response.Write on hijacked connection") 1512 } 1513 return 0, ErrHijacked 1514 } 1515 if !w.wroteHeader { 1516 w.WriteHeader(StatusOK) 1517 } 1518 if lenData == 0 { 1519 return 0, nil 1520 } 1521 if !w.bodyAllowed() { 1522 return 0, ErrBodyNotAllowed 1523 } 1524 1525 w.written += int64(lenData) // ignoring errors, for errorKludge 1526 if w.contentLength != -1 && w.written > w.contentLength { 1527 return 0, ErrContentLength 1528 } 1529 if dataB != nil { 1530 return w.w.Write(dataB) 1531 } else { 1532 return w.w.WriteString(dataS) 1533 } 1534 } 1535 1536 func (w *response) finishRequest() { 1537 w.handlerDone.setTrue() 1538 1539 if !w.wroteHeader { 1540 w.WriteHeader(StatusOK) 1541 } 1542 1543 w.w.Flush() 1544 putBufioWriter(w.w) 1545 w.cw.close() 1546 w.conn.bufw.Flush() 1547 1548 w.conn.r.abortPendingRead() 1549 1550 // Close the body (regardless of w.closeAfterReply) so we can 1551 // re-use its bufio.Reader later safely. 1552 w.reqBody.Close() 1553 1554 if w.req.MultipartForm != nil { 1555 w.req.MultipartForm.RemoveAll() 1556 } 1557 } 1558 1559 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1560 // It must only be called after the handler is done executing. 1561 func (w *response) shouldReuseConnection() bool { 1562 if w.closeAfterReply { 1563 // The request or something set while executing the 1564 // handler indicated we shouldn't reuse this 1565 // connection. 1566 return false 1567 } 1568 1569 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1570 // Did not write enough. Avoid getting out of sync. 1571 return false 1572 } 1573 1574 // There was some error writing to the underlying connection 1575 // during the request, so don't re-use this conn. 1576 if w.conn.werr != nil { 1577 return false 1578 } 1579 1580 if w.closedRequestBodyEarly() { 1581 return false 1582 } 1583 1584 return true 1585 } 1586 1587 func (w *response) closedRequestBodyEarly() bool { 1588 body, ok := w.req.Body.(*body) 1589 return ok && body.didEarlyClose() 1590 } 1591 1592 func (w *response) Flush() { 1593 if !w.wroteHeader { 1594 w.WriteHeader(StatusOK) 1595 } 1596 w.w.Flush() 1597 w.cw.flush() 1598 } 1599 1600 func (c *conn) finalFlush() { 1601 if c.bufr != nil { 1602 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1603 // reader for a future connection. 1604 putBufioReader(c.bufr) 1605 c.bufr = nil 1606 } 1607 1608 if c.bufw != nil { 1609 c.bufw.Flush() 1610 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1611 // writer for a future connection. 1612 putBufioWriter(c.bufw) 1613 c.bufw = nil 1614 } 1615 } 1616 1617 // Close the connection. 1618 func (c *conn) close() { 1619 c.finalFlush() 1620 c.rwc.Close() 1621 } 1622 1623 // rstAvoidanceDelay is the amount of time we sleep after closing the 1624 // write side of a TCP connection before closing the entire socket. 1625 // By sleeping, we increase the chances that the client sees our FIN 1626 // and processes its final data before they process the subsequent RST 1627 // from closing a connection with known unread data. 1628 // This RST seems to occur mostly on BSD systems. (And Windows?) 1629 // This timeout is somewhat arbitrary (~latency around the planet). 1630 const rstAvoidanceDelay = 500 * time.Millisecond 1631 1632 type closeWriter interface { 1633 CloseWrite() error 1634 } 1635 1636 var _ closeWriter = (*net.TCPConn)(nil) 1637 1638 // closeWrite flushes any outstanding data and sends a FIN packet (if 1639 // client is connected via TCP), signalling that we're done. We then 1640 // pause for a bit, hoping the client processes it before any 1641 // subsequent RST. 1642 // 1643 // See https://golang.org/issue/3595 1644 func (c *conn) closeWriteAndWait() { 1645 c.finalFlush() 1646 if tcp, ok := c.rwc.(closeWriter); ok { 1647 tcp.CloseWrite() 1648 } 1649 time.Sleep(rstAvoidanceDelay) 1650 } 1651 1652 // validNPN reports whether the proto is not a blacklisted Next 1653 // Protocol Negotiation protocol. Empty and built-in protocol types 1654 // are blacklisted and can't be overridden with alternate 1655 // implementations. 1656 func validNPN(proto string) bool { 1657 switch proto { 1658 case "", "http/1.1", "http/1.0": 1659 return false 1660 } 1661 return true 1662 } 1663 1664 func (c *conn) setState(nc net.Conn, state ConnState) { 1665 srv := c.server 1666 switch state { 1667 case StateNew: 1668 srv.trackConn(c, true) 1669 case StateHijacked, StateClosed: 1670 srv.trackConn(c, false) 1671 } 1672 c.curState.Store(connStateInterface[state]) 1673 if hook := srv.ConnState; hook != nil { 1674 hook(nc, state) 1675 } 1676 } 1677 1678 // connStateInterface is an array of the interface{} versions of 1679 // ConnState values, so we can use them in atomic.Values later without 1680 // paying the cost of shoving their integers in an interface{}. 1681 var connStateInterface = [...]interface{}{ 1682 StateNew: StateNew, 1683 StateActive: StateActive, 1684 StateIdle: StateIdle, 1685 StateHijacked: StateHijacked, 1686 StateClosed: StateClosed, 1687 } 1688 1689 // badRequestError is a literal string (used by in the server in HTML, 1690 // unescaped) to tell the user why their request was bad. It should 1691 // be plain text without user info or other embedded errors. 1692 type badRequestError string 1693 1694 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1695 1696 // ErrAbortHandler is a sentinel panic value to abort a handler. 1697 // While any panic from ServeHTTP aborts the response to the client, 1698 // panicking with ErrAbortHandler also suppresses logging of a stack 1699 // trace to the server's error log. 1700 var ErrAbortHandler = errors.New("net/http: abort Handler") 1701 1702 // isCommonNetReadError reports whether err is a common error 1703 // encountered during reading a request off the network when the 1704 // client has gone away or had its read fail somehow. This is used to 1705 // determine which logs are interesting enough to log about. 1706 func isCommonNetReadError(err error) bool { 1707 if err == io.EOF { 1708 return true 1709 } 1710 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1711 return true 1712 } 1713 if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { 1714 return true 1715 } 1716 return false 1717 } 1718 1719 // Serve a new connection. 1720 func (c *conn) serve(ctx context.Context) { 1721 c.remoteAddr = c.rwc.RemoteAddr().String() 1722 ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) 1723 defer func() { 1724 if err := recover(); err != nil && err != ErrAbortHandler { 1725 const size = 64 << 10 1726 buf := make([]byte, size) 1727 buf = buf[:runtime.Stack(buf, false)] 1728 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1729 } 1730 if !c.hijacked() { 1731 c.close() 1732 c.setState(c.rwc, StateClosed) 1733 } 1734 }() 1735 1736 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1737 if d := c.server.ReadTimeout; d != 0 { 1738 c.rwc.SetReadDeadline(time.Now().Add(d)) 1739 } 1740 if d := c.server.WriteTimeout; d != 0 { 1741 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1742 } 1743 if err := tlsConn.Handshake(); err != nil { 1744 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1745 return 1746 } 1747 c.tlsState = new(tls.ConnectionState) 1748 *c.tlsState = tlsConn.ConnectionState() 1749 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1750 if fn := c.server.TLSNextProto[proto]; fn != nil { 1751 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1752 fn(c.server, tlsConn, h) 1753 } 1754 return 1755 } 1756 } 1757 1758 // HTTP/1.x from here on. 1759 1760 ctx, cancelCtx := context.WithCancel(ctx) 1761 c.cancelCtx = cancelCtx 1762 defer cancelCtx() 1763 1764 c.r = &connReader{conn: c} 1765 c.bufr = newBufioReader(c.r) 1766 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1767 1768 for { 1769 w, err := c.readRequest(ctx) 1770 if c.r.remain != c.server.initialReadLimitSize() { 1771 // If we read any bytes off the wire, we're active. 1772 c.setState(c.rwc, StateActive) 1773 } 1774 if err != nil { 1775 const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" 1776 1777 if err == errTooLarge { 1778 // Their HTTP client may or may not be 1779 // able to read this if we're 1780 // responding to them and hanging up 1781 // while they're still writing their 1782 // request. Undefined behavior. 1783 const publicErr = "431 Request Header Fields Too Large" 1784 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1785 c.closeWriteAndWait() 1786 return 1787 } 1788 if isCommonNetReadError(err) { 1789 return // don't reply 1790 } 1791 1792 publicErr := "400 Bad Request" 1793 if v, ok := err.(badRequestError); ok { 1794 publicErr = publicErr + ": " + string(v) 1795 } 1796 1797 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1798 return 1799 } 1800 1801 // Expect 100 Continue support 1802 req := w.req 1803 if req.expectsContinue() { 1804 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1805 // Wrap the Body reader with one that replies on the connection 1806 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1807 } 1808 } else if req.Header.get("Expect") != "" { 1809 w.sendExpectationFailed() 1810 return 1811 } 1812 1813 c.curReq.Store(w) 1814 1815 if requestBodyRemains(req.Body) { 1816 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1817 } else { 1818 if w.conn.bufr.Buffered() > 0 { 1819 w.conn.r.closeNotifyFromPipelinedRequest() 1820 } 1821 w.conn.r.startBackgroundRead() 1822 } 1823 1824 // HTTP cannot have multiple simultaneous active requests.[*] 1825 // Until the server replies to this request, it can't read another, 1826 // so we might as well run the handler in this goroutine. 1827 // [*] Not strictly true: HTTP pipelining. We could let them all process 1828 // in parallel even if their responses need to be serialized. 1829 // But we're not going to implement HTTP pipelining because it 1830 // was never deployed in the wild and the answer is HTTP/2. 1831 serverHandler{c.server}.ServeHTTP(w, w.req) 1832 w.cancelCtx() 1833 if c.hijacked() { 1834 return 1835 } 1836 w.finishRequest() 1837 if !w.shouldReuseConnection() { 1838 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1839 c.closeWriteAndWait() 1840 } 1841 return 1842 } 1843 c.setState(c.rwc, StateIdle) 1844 c.curReq.Store((*response)(nil)) 1845 1846 if !w.conn.server.doKeepAlives() { 1847 // We're in shutdown mode. We might've replied 1848 // to the user without "Connection: close" and 1849 // they might think they can send another 1850 // request, but such is life with HTTP/1.1. 1851 return 1852 } 1853 1854 if d := c.server.idleTimeout(); d != 0 { 1855 c.rwc.SetReadDeadline(time.Now().Add(d)) 1856 if _, err := c.bufr.Peek(4); err != nil { 1857 return 1858 } 1859 } 1860 c.rwc.SetReadDeadline(time.Time{}) 1861 } 1862 } 1863 1864 func (w *response) sendExpectationFailed() { 1865 // TODO(bradfitz): let ServeHTTP handlers handle 1866 // requests with non-standard expectation[s]? Seems 1867 // theoretical at best, and doesn't fit into the 1868 // current ServeHTTP model anyway. We'd need to 1869 // make the ResponseWriter an optional 1870 // "ExpectReplier" interface or something. 1871 // 1872 // For now we'll just obey RFC 7231 5.1.1 which says 1873 // "A server that receives an Expect field-value other 1874 // than 100-continue MAY respond with a 417 (Expectation 1875 // Failed) status code to indicate that the unexpected 1876 // expectation cannot be met." 1877 w.Header().Set("Connection", "close") 1878 w.WriteHeader(StatusExpectationFailed) 1879 w.finishRequest() 1880 } 1881 1882 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1883 // and a Hijacker. 1884 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1885 if w.handlerDone.isSet() { 1886 panic("net/http: Hijack called after ServeHTTP finished") 1887 } 1888 if w.wroteHeader { 1889 w.cw.flush() 1890 } 1891 1892 c := w.conn 1893 c.mu.Lock() 1894 defer c.mu.Unlock() 1895 1896 // Release the bufioWriter that writes to the chunk writer, it is not 1897 // used after a connection has been hijacked. 1898 rwc, buf, err = c.hijackLocked() 1899 if err == nil { 1900 putBufioWriter(w.w) 1901 w.w = nil 1902 } 1903 return rwc, buf, err 1904 } 1905 1906 func (w *response) CloseNotify() <-chan bool { 1907 if w.handlerDone.isSet() { 1908 panic("net/http: CloseNotify called after ServeHTTP finished") 1909 } 1910 return w.closeNotifyCh 1911 } 1912 1913 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1914 switch v := rc.(type) { 1915 case *expectContinueReader: 1916 registerOnHitEOF(v.readCloser, fn) 1917 case *body: 1918 v.registerOnHitEOF(fn) 1919 default: 1920 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1921 } 1922 } 1923 1924 // requestBodyRemains reports whether future calls to Read 1925 // on rc might yield more data. 1926 func requestBodyRemains(rc io.ReadCloser) bool { 1927 if rc == NoBody { 1928 return false 1929 } 1930 switch v := rc.(type) { 1931 case *expectContinueReader: 1932 return requestBodyRemains(v.readCloser) 1933 case *body: 1934 return v.bodyRemains() 1935 default: 1936 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1937 } 1938 } 1939 1940 // The HandlerFunc type is an adapter to allow the use of 1941 // ordinary functions as HTTP handlers. If f is a function 1942 // with the appropriate signature, HandlerFunc(f) is a 1943 // Handler that calls f. 1944 type HandlerFunc func(ResponseWriter, *Request) 1945 1946 // ServeHTTP calls f(w, r). 1947 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1948 f(w, r) 1949 } 1950 1951 // Helper handlers 1952 1953 // Error replies to the request with the specified error message and HTTP code. 1954 // It does not otherwise end the request; the caller should ensure no further 1955 // writes are done to w. 1956 // The error message should be plain text. 1957 func Error(w ResponseWriter, error string, code int) { 1958 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1959 w.Header().Set("X-Content-Type-Options", "nosniff") 1960 w.WriteHeader(code) 1961 fmt.Fprintln(w, error) 1962 } 1963 1964 // NotFound replies to the request with an HTTP 404 not found error. 1965 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1966 1967 // NotFoundHandler returns a simple request handler 1968 // that replies to each request with a ``404 page not found'' reply. 1969 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1970 1971 // StripPrefix returns a handler that serves HTTP requests 1972 // by removing the given prefix from the request URL's Path 1973 // and invoking the handler h. StripPrefix handles a 1974 // request for a path that doesn't begin with prefix by 1975 // replying with an HTTP 404 not found error. 1976 func StripPrefix(prefix string, h Handler) Handler { 1977 if prefix == "" { 1978 return h 1979 } 1980 return HandlerFunc(func(w ResponseWriter, r *Request) { 1981 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1982 r2 := new(Request) 1983 *r2 = *r 1984 r2.URL = new(url.URL) 1985 *r2.URL = *r.URL 1986 r2.URL.Path = p 1987 h.ServeHTTP(w, r2) 1988 } else { 1989 NotFound(w, r) 1990 } 1991 }) 1992 } 1993 1994 // Redirect replies to the request with a redirect to url, 1995 // which may be a path relative to the request path. 1996 // 1997 // The provided code should be in the 3xx range and is usually 1998 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1999 func Redirect(w ResponseWriter, r *Request, url string, code int) { 2000 // parseURL is just url.Parse (url is shadowed for godoc). 2001 if u, err := parseURL(url); err == nil { 2002 // If url was relative, make its path absolute by 2003 // combining with request path. 2004 // The client would probably do this for us, 2005 // but doing it ourselves is more reliable. 2006 // See RFC 7231, section 7.1.2 2007 if u.Scheme == "" && u.Host == "" { 2008 oldpath := r.URL.Path 2009 if oldpath == "" { // should not happen, but avoid a crash if it does 2010 oldpath = "/" 2011 } 2012 2013 // no leading http://server 2014 if url == "" || url[0] != '/' { 2015 // make relative path absolute 2016 olddir, _ := path.Split(oldpath) 2017 url = olddir + url 2018 } 2019 2020 var query string 2021 if i := strings.Index(url, "?"); i != -1 { 2022 url, query = url[:i], url[i:] 2023 } 2024 2025 // clean up but preserve trailing slash 2026 trailing := strings.HasSuffix(url, "/") 2027 url = path.Clean(url) 2028 if trailing && !strings.HasSuffix(url, "/") { 2029 url += "/" 2030 } 2031 url += query 2032 } 2033 } 2034 2035 w.Header().Set("Location", hexEscapeNonASCII(url)) 2036 if r.Method == "GET" || r.Method == "HEAD" { 2037 w.Header().Set("Content-Type", "text/html; charset=utf-8") 2038 } 2039 w.WriteHeader(code) 2040 2041 // RFC 7231 notes that a short hypertext note is usually included in 2042 // the response because older user agents may not understand 301/307. 2043 // Shouldn't send the response for POST or HEAD; that leaves GET. 2044 if r.Method == "GET" { 2045 note := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" 2046 fmt.Fprintln(w, note) 2047 } 2048 } 2049 2050 // parseURL is just url.Parse. It exists only so that url.Parse can be called 2051 // in places where url is shadowed for godoc. See https://golang.org/cl/49930. 2052 var parseURL = url.Parse 2053 2054 var htmlReplacer = strings.NewReplacer( 2055 "&", "&", 2056 "<", "<", 2057 ">", ">", 2058 // """ is shorter than """. 2059 `"`, """, 2060 // "'" is shorter than "'" and apos was not in HTML until HTML5. 2061 "'", "'", 2062 ) 2063 2064 func htmlEscape(s string) string { 2065 return htmlReplacer.Replace(s) 2066 } 2067 2068 // Redirect to a fixed URL 2069 type redirectHandler struct { 2070 url string 2071 code int 2072 } 2073 2074 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 2075 Redirect(w, r, rh.url, rh.code) 2076 } 2077 2078 // RedirectHandler returns a request handler that redirects 2079 // each request it receives to the given url using the given 2080 // status code. 2081 // 2082 // The provided code should be in the 3xx range and is usually 2083 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2084 func RedirectHandler(url string, code int) Handler { 2085 return &redirectHandler{url, code} 2086 } 2087 2088 // ServeMux is an HTTP request multiplexer. 2089 // It matches the URL of each incoming request against a list of registered 2090 // patterns and calls the handler for the pattern that 2091 // most closely matches the URL. 2092 // 2093 // Patterns name fixed, rooted paths, like "/favicon.ico", 2094 // or rooted subtrees, like "/images/" (note the trailing slash). 2095 // Longer patterns take precedence over shorter ones, so that 2096 // if there are handlers registered for both "/images/" 2097 // and "/images/thumbnails/", the latter handler will be 2098 // called for paths beginning "/images/thumbnails/" and the 2099 // former will receive requests for any other paths in the 2100 // "/images/" subtree. 2101 // 2102 // Note that since a pattern ending in a slash names a rooted subtree, 2103 // the pattern "/" matches all paths not matched by other registered 2104 // patterns, not just the URL with Path == "/". 2105 // 2106 // If a subtree has been registered and a request is received naming the 2107 // subtree root without its trailing slash, ServeMux redirects that 2108 // request to the subtree root (adding the trailing slash). This behavior can 2109 // be overridden with a separate registration for the path without 2110 // the trailing slash. For example, registering "/images/" causes ServeMux 2111 // to redirect a request for "/images" to "/images/", unless "/images" has 2112 // been registered separately. 2113 // 2114 // Patterns may optionally begin with a host name, restricting matches to 2115 // URLs on that host only. Host-specific patterns take precedence over 2116 // general patterns, so that a handler might register for the two patterns 2117 // "/codesearch" and "codesearch.google.com/" without also taking over 2118 // requests for "http://www.google.com/". 2119 // 2120 // ServeMux also takes care of sanitizing the URL request path, 2121 // redirecting any request containing . or .. elements or repeated slashes 2122 // to an equivalent, cleaner URL. 2123 type ServeMux struct { 2124 mu sync.RWMutex 2125 m map[string]muxEntry 2126 hosts bool // whether any patterns contain hostnames 2127 } 2128 2129 type muxEntry struct { 2130 h Handler 2131 pattern string 2132 } 2133 2134 // NewServeMux allocates and returns a new ServeMux. 2135 func NewServeMux() *ServeMux { return new(ServeMux) } 2136 2137 // DefaultServeMux is the default ServeMux used by Serve. 2138 var DefaultServeMux = &defaultServeMux 2139 2140 var defaultServeMux ServeMux 2141 2142 // Does path match pattern? 2143 func pathMatch(pattern, path string) bool { 2144 if len(pattern) == 0 { 2145 // should not happen 2146 return false 2147 } 2148 n := len(pattern) 2149 if pattern[n-1] != '/' { 2150 return pattern == path 2151 } 2152 return len(path) >= n && path[0:n] == pattern 2153 } 2154 2155 // Return the canonical path for p, eliminating . and .. elements. 2156 func cleanPath(p string) string { 2157 if p == "" { 2158 return "/" 2159 } 2160 if p[0] != '/' { 2161 p = "/" + p 2162 } 2163 np := path.Clean(p) 2164 // path.Clean removes trailing slash except for root; 2165 // put the trailing slash back if necessary. 2166 if p[len(p)-1] == '/' && np != "/" { 2167 np += "/" 2168 } 2169 return np 2170 } 2171 2172 // stripHostPort returns h without any trailing ":<port>". 2173 func stripHostPort(h string) string { 2174 // If no port on host, return unchanged 2175 if strings.IndexByte(h, ':') == -1 { 2176 return h 2177 } 2178 host, _, err := net.SplitHostPort(h) 2179 if err != nil { 2180 return h // on error, return unchanged 2181 } 2182 return host 2183 } 2184 2185 // Find a handler on a handler map given a path string. 2186 // Most-specific (longest) pattern wins. 2187 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2188 // Check for exact match first. 2189 v, ok := mux.m[path] 2190 if ok { 2191 return v.h, v.pattern 2192 } 2193 2194 // Check for longest valid match. 2195 var n = 0 2196 for k, v := range mux.m { 2197 if !pathMatch(k, path) { 2198 continue 2199 } 2200 if h == nil || len(k) > n { 2201 n = len(k) 2202 h = v.h 2203 pattern = v.pattern 2204 } 2205 } 2206 return 2207 } 2208 2209 // redirectToPathSlash determines if the given path needs appending "/" to it. 2210 // This occurs when a handler for path + "/" was already registered, but 2211 // not for path itself. If the path needs appending to, it creates a new 2212 // URL, setting the path to u.Path + "/" and returning true to indicate so. 2213 func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) { 2214 if !mux.shouldRedirect(host, path) { 2215 return u, false 2216 } 2217 path = path + "/" 2218 u = &url.URL{Path: path, RawQuery: u.RawQuery} 2219 return u, true 2220 } 2221 2222 // shouldRedirect reports whether the given path and host should be redirected to 2223 // path+"/". This should happen if a handler is registered for path+"/" but 2224 // not path -- see comments at ServeMux. 2225 func (mux *ServeMux) shouldRedirect(host, path string) bool { 2226 p := []string{path, host + path} 2227 2228 for _, c := range p { 2229 if _, exist := mux.m[c]; exist { 2230 return false 2231 } 2232 } 2233 2234 n := len(path) 2235 if n == 0 { 2236 return false 2237 } 2238 for _, c := range p { 2239 if _, exist := mux.m[c+"/"]; exist { 2240 return path[n-1] != '/' 2241 } 2242 } 2243 2244 return false 2245 } 2246 2247 // Handler returns the handler to use for the given request, 2248 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2249 // a non-nil handler. If the path is not in its canonical form, the 2250 // handler will be an internally-generated handler that redirects 2251 // to the canonical path. If the host contains a port, it is ignored 2252 // when matching handlers. 2253 // 2254 // The path and host are used unchanged for CONNECT requests. 2255 // 2256 // Handler also returns the registered pattern that matches the 2257 // request or, in the case of internally-generated redirects, 2258 // the pattern that will match after following the redirect. 2259 // 2260 // If there is no registered handler that applies to the request, 2261 // Handler returns a ``page not found'' handler and an empty pattern. 2262 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2263 2264 // CONNECT requests are not canonicalized. 2265 if r.Method == "CONNECT" { 2266 // If r.URL.Path is /tree and its handler is not registered, 2267 // the /tree -> /tree/ redirect applies to CONNECT requests 2268 // but the path canonicalization does not. 2269 if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok { 2270 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2271 } 2272 2273 return mux.handler(r.Host, r.URL.Path) 2274 } 2275 2276 // All other requests have any port stripped and path cleaned 2277 // before passing to mux.handler. 2278 host := stripHostPort(r.Host) 2279 path := cleanPath(r.URL.Path) 2280 2281 // If the given path is /tree and its handler is not registered, 2282 // redirect for /tree/. 2283 if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok { 2284 return RedirectHandler(u.String(), StatusMovedPermanently), u.Path 2285 } 2286 2287 if path != r.URL.Path { 2288 _, pattern = mux.handler(host, path) 2289 url := *r.URL 2290 url.Path = path 2291 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2292 } 2293 2294 return mux.handler(host, r.URL.Path) 2295 } 2296 2297 // handler is the main implementation of Handler. 2298 // The path is known to be in canonical form, except for CONNECT methods. 2299 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2300 mux.mu.RLock() 2301 defer mux.mu.RUnlock() 2302 2303 // Host-specific pattern takes precedence over generic ones 2304 if mux.hosts { 2305 h, pattern = mux.match(host + path) 2306 } 2307 if h == nil { 2308 h, pattern = mux.match(path) 2309 } 2310 if h == nil { 2311 h, pattern = NotFoundHandler(), "" 2312 } 2313 return 2314 } 2315 2316 // ServeHTTP dispatches the request to the handler whose 2317 // pattern most closely matches the request URL. 2318 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2319 if r.RequestURI == "*" { 2320 if r.ProtoAtLeast(1, 1) { 2321 w.Header().Set("Connection", "close") 2322 } 2323 w.WriteHeader(StatusBadRequest) 2324 return 2325 } 2326 h, _ := mux.Handler(r) 2327 h.ServeHTTP(w, r) 2328 } 2329 2330 // Handle registers the handler for the given pattern. 2331 // If a handler already exists for pattern, Handle panics. 2332 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2333 mux.mu.Lock() 2334 defer mux.mu.Unlock() 2335 2336 if pattern == "" { 2337 panic("http: invalid pattern") 2338 } 2339 if handler == nil { 2340 panic("http: nil handler") 2341 } 2342 if _, exist := mux.m[pattern]; exist { 2343 panic("http: multiple registrations for " + pattern) 2344 } 2345 2346 if mux.m == nil { 2347 mux.m = make(map[string]muxEntry) 2348 } 2349 mux.m[pattern] = muxEntry{h: handler, pattern: pattern} 2350 2351 if pattern[0] != '/' { 2352 mux.hosts = true 2353 } 2354 } 2355 2356 // HandleFunc registers the handler function for the given pattern. 2357 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2358 mux.Handle(pattern, HandlerFunc(handler)) 2359 } 2360 2361 // Handle registers the handler for the given pattern 2362 // in the DefaultServeMux. 2363 // The documentation for ServeMux explains how patterns are matched. 2364 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2365 2366 // HandleFunc registers the handler function for the given pattern 2367 // in the DefaultServeMux. 2368 // The documentation for ServeMux explains how patterns are matched. 2369 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2370 DefaultServeMux.HandleFunc(pattern, handler) 2371 } 2372 2373 // Serve accepts incoming HTTP connections on the listener l, 2374 // creating a new service goroutine for each. The service goroutines 2375 // read requests and then call handler to reply to them. 2376 // Handler is typically nil, in which case the DefaultServeMux is used. 2377 func Serve(l net.Listener, handler Handler) error { 2378 srv := &Server{Handler: handler} 2379 return srv.Serve(l) 2380 } 2381 2382 // ServeTLS accepts incoming HTTPS connections on the listener l, 2383 // creating a new service goroutine for each. The service goroutines 2384 // read requests and then call handler to reply to them. 2385 // 2386 // Handler is typically nil, in which case the DefaultServeMux is used. 2387 // 2388 // Additionally, files containing a certificate and matching private key 2389 // for the server must be provided. If the certificate is signed by a 2390 // certificate authority, the certFile should be the concatenation 2391 // of the server's certificate, any intermediates, and the CA's certificate. 2392 func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { 2393 srv := &Server{Handler: handler} 2394 return srv.ServeTLS(l, certFile, keyFile) 2395 } 2396 2397 // A Server defines parameters for running an HTTP server. 2398 // The zero value for Server is a valid configuration. 2399 type Server struct { 2400 Addr string // TCP address to listen on, ":http" if empty 2401 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2402 2403 // TLSConfig optionally provides a TLS configuration for use 2404 // by ServeTLS and ListenAndServeTLS. Note that this value is 2405 // cloned by ServeTLS and ListenAndServeTLS, so it's not 2406 // possible to modify the configuration with methods like 2407 // tls.Config.SetSessionTicketKeys. To use 2408 // SetSessionTicketKeys, use Server.Serve with a TLS Listener 2409 // instead. 2410 TLSConfig *tls.Config 2411 2412 // ReadTimeout is the maximum duration for reading the entire 2413 // request, including the body. 2414 // 2415 // Because ReadTimeout does not let Handlers make per-request 2416 // decisions on each request body's acceptable deadline or 2417 // upload rate, most users will prefer to use 2418 // ReadHeaderTimeout. It is valid to use them both. 2419 ReadTimeout time.Duration 2420 2421 // ReadHeaderTimeout is the amount of time allowed to read 2422 // request headers. The connection's read deadline is reset 2423 // after reading the headers and the Handler can decide what 2424 // is considered too slow for the body. 2425 ReadHeaderTimeout time.Duration 2426 2427 // WriteTimeout is the maximum duration before timing out 2428 // writes of the response. It is reset whenever a new 2429 // request's header is read. Like ReadTimeout, it does not 2430 // let Handlers make decisions on a per-request basis. 2431 WriteTimeout time.Duration 2432 2433 // IdleTimeout is the maximum amount of time to wait for the 2434 // next request when keep-alives are enabled. If IdleTimeout 2435 // is zero, the value of ReadTimeout is used. If both are 2436 // zero, ReadHeaderTimeout is used. 2437 IdleTimeout time.Duration 2438 2439 // MaxHeaderBytes controls the maximum number of bytes the 2440 // server will read parsing the request header's keys and 2441 // values, including the request line. It does not limit the 2442 // size of the request body. 2443 // If zero, DefaultMaxHeaderBytes is used. 2444 MaxHeaderBytes int 2445 2446 // TLSNextProto optionally specifies a function to take over 2447 // ownership of the provided TLS connection when an NPN/ALPN 2448 // protocol upgrade has occurred. The map key is the protocol 2449 // name negotiated. The Handler argument should be used to 2450 // handle HTTP requests and will initialize the Request's TLS 2451 // and RemoteAddr if not already set. The connection is 2452 // automatically closed when the function returns. 2453 // If TLSNextProto is not nil, HTTP/2 support is not enabled 2454 // automatically. 2455 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2456 2457 // ConnState specifies an optional callback function that is 2458 // called when a client connection changes state. See the 2459 // ConnState type and associated constants for details. 2460 ConnState func(net.Conn, ConnState) 2461 2462 // ErrorLog specifies an optional logger for errors accepting 2463 // connections, unexpected behavior from handlers, and 2464 // underlying FileSystem errors. 2465 // If nil, logging is done via the log package's standard logger. 2466 ErrorLog *log.Logger 2467 2468 disableKeepAlives int32 // accessed atomically. 2469 inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) 2470 nextProtoOnce sync.Once // guards setupHTTP2_* init 2471 nextProtoErr error // result of http2.ConfigureServer if used 2472 2473 mu sync.Mutex 2474 listeners map[net.Listener]struct{} 2475 activeConn map[*conn]struct{} 2476 doneChan chan struct{} 2477 onShutdown []func() 2478 } 2479 2480 func (s *Server) getDoneChan() <-chan struct{} { 2481 s.mu.Lock() 2482 defer s.mu.Unlock() 2483 return s.getDoneChanLocked() 2484 } 2485 2486 func (s *Server) getDoneChanLocked() chan struct{} { 2487 if s.doneChan == nil { 2488 s.doneChan = make(chan struct{}) 2489 } 2490 return s.doneChan 2491 } 2492 2493 func (s *Server) closeDoneChanLocked() { 2494 ch := s.getDoneChanLocked() 2495 select { 2496 case <-ch: 2497 // Already closed. Don't close again. 2498 default: 2499 // Safe to close here. We're the only closer, guarded 2500 // by s.mu. 2501 close(ch) 2502 } 2503 } 2504 2505 // Close immediately closes all active net.Listeners and any 2506 // connections in state StateNew, StateActive, or StateIdle. For a 2507 // graceful shutdown, use Shutdown. 2508 // 2509 // Close does not attempt to close (and does not even know about) 2510 // any hijacked connections, such as WebSockets. 2511 // 2512 // Close returns any error returned from closing the Server's 2513 // underlying Listener(s). 2514 func (srv *Server) Close() error { 2515 srv.mu.Lock() 2516 defer srv.mu.Unlock() 2517 srv.closeDoneChanLocked() 2518 err := srv.closeListenersLocked() 2519 for c := range srv.activeConn { 2520 c.rwc.Close() 2521 delete(srv.activeConn, c) 2522 } 2523 return err 2524 } 2525 2526 // shutdownPollInterval is how often we poll for quiescence 2527 // during Server.Shutdown. This is lower during tests, to 2528 // speed up tests. 2529 // Ideally we could find a solution that doesn't involve polling, 2530 // but which also doesn't have a high runtime cost (and doesn't 2531 // involve any contentious mutexes), but that is left as an 2532 // exercise for the reader. 2533 var shutdownPollInterval = 500 * time.Millisecond 2534 2535 // Shutdown gracefully shuts down the server without interrupting any 2536 // active connections. Shutdown works by first closing all open 2537 // listeners, then closing all idle connections, and then waiting 2538 // indefinitely for connections to return to idle and then shut down. 2539 // If the provided context expires before the shutdown is complete, 2540 // Shutdown returns the context's error, otherwise it returns any 2541 // error returned from closing the Server's underlying Listener(s). 2542 // 2543 // When Shutdown is called, Serve, ListenAndServe, and 2544 // ListenAndServeTLS immediately return ErrServerClosed. Make sure the 2545 // program doesn't exit and waits instead for Shutdown to return. 2546 // 2547 // Shutdown does not attempt to close nor wait for hijacked 2548 // connections such as WebSockets. The caller of Shutdown should 2549 // separately notify such long-lived connections of shutdown and wait 2550 // for them to close, if desired. See RegisterOnShutdown for a way to 2551 // register shutdown notification functions. 2552 func (srv *Server) Shutdown(ctx context.Context) error { 2553 atomic.AddInt32(&srv.inShutdown, 1) 2554 defer atomic.AddInt32(&srv.inShutdown, -1) 2555 2556 srv.mu.Lock() 2557 lnerr := srv.closeListenersLocked() 2558 srv.closeDoneChanLocked() 2559 for _, f := range srv.onShutdown { 2560 go f() 2561 } 2562 srv.mu.Unlock() 2563 2564 ticker := time.NewTicker(shutdownPollInterval) 2565 defer ticker.Stop() 2566 for { 2567 if srv.closeIdleConns() { 2568 return lnerr 2569 } 2570 select { 2571 case <-ctx.Done(): 2572 return ctx.Err() 2573 case <-ticker.C: 2574 } 2575 } 2576 } 2577 2578 // RegisterOnShutdown registers a function to call on Shutdown. 2579 // This can be used to gracefully shutdown connections that have 2580 // undergone NPN/ALPN protocol upgrade or that have been hijacked. 2581 // This function should start protocol-specific graceful shutdown, 2582 // but should not wait for shutdown to complete. 2583 func (srv *Server) RegisterOnShutdown(f func()) { 2584 srv.mu.Lock() 2585 srv.onShutdown = append(srv.onShutdown, f) 2586 srv.mu.Unlock() 2587 } 2588 2589 // closeIdleConns closes all idle connections and reports whether the 2590 // server is quiescent. 2591 func (s *Server) closeIdleConns() bool { 2592 s.mu.Lock() 2593 defer s.mu.Unlock() 2594 quiescent := true 2595 for c := range s.activeConn { 2596 st, ok := c.curState.Load().(ConnState) 2597 if !ok || st != StateIdle { 2598 quiescent = false 2599 continue 2600 } 2601 c.rwc.Close() 2602 delete(s.activeConn, c) 2603 } 2604 return quiescent 2605 } 2606 2607 func (s *Server) closeListenersLocked() error { 2608 var err error 2609 for ln := range s.listeners { 2610 if cerr := ln.Close(); cerr != nil && err == nil { 2611 err = cerr 2612 } 2613 delete(s.listeners, ln) 2614 } 2615 return err 2616 } 2617 2618 // A ConnState represents the state of a client connection to a server. 2619 // It's used by the optional Server.ConnState hook. 2620 type ConnState int 2621 2622 const ( 2623 // StateNew represents a new connection that is expected to 2624 // send a request immediately. Connections begin at this 2625 // state and then transition to either StateActive or 2626 // StateClosed. 2627 StateNew ConnState = iota 2628 2629 // StateActive represents a connection that has read 1 or more 2630 // bytes of a request. The Server.ConnState hook for 2631 // StateActive fires before the request has entered a handler 2632 // and doesn't fire again until the request has been 2633 // handled. After the request is handled, the state 2634 // transitions to StateClosed, StateHijacked, or StateIdle. 2635 // For HTTP/2, StateActive fires on the transition from zero 2636 // to one active request, and only transitions away once all 2637 // active requests are complete. That means that ConnState 2638 // cannot be used to do per-request work; ConnState only notes 2639 // the overall state of the connection. 2640 StateActive 2641 2642 // StateIdle represents a connection that has finished 2643 // handling a request and is in the keep-alive state, waiting 2644 // for a new request. Connections transition from StateIdle 2645 // to either StateActive or StateClosed. 2646 StateIdle 2647 2648 // StateHijacked represents a hijacked connection. 2649 // This is a terminal state. It does not transition to StateClosed. 2650 StateHijacked 2651 2652 // StateClosed represents a closed connection. 2653 // This is a terminal state. Hijacked connections do not 2654 // transition to StateClosed. 2655 StateClosed 2656 ) 2657 2658 var stateName = map[ConnState]string{ 2659 StateNew: "new", 2660 StateActive: "active", 2661 StateIdle: "idle", 2662 StateHijacked: "hijacked", 2663 StateClosed: "closed", 2664 } 2665 2666 func (c ConnState) String() string { 2667 return stateName[c] 2668 } 2669 2670 // serverHandler delegates to either the server's Handler or 2671 // DefaultServeMux and also handles "OPTIONS *" requests. 2672 type serverHandler struct { 2673 srv *Server 2674 } 2675 2676 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2677 handler := sh.srv.Handler 2678 if handler == nil { 2679 handler = DefaultServeMux 2680 } 2681 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2682 handler = globalOptionsHandler{} 2683 } 2684 handler.ServeHTTP(rw, req) 2685 } 2686 2687 // ListenAndServe listens on the TCP network address srv.Addr and then 2688 // calls Serve to handle requests on incoming connections. 2689 // Accepted connections are configured to enable TCP keep-alives. 2690 // If srv.Addr is blank, ":http" is used. 2691 // ListenAndServe always returns a non-nil error. 2692 func (srv *Server) ListenAndServe() error { 2693 addr := srv.Addr 2694 if addr == "" { 2695 addr = ":http" 2696 } 2697 ln, err := net.Listen("tcp", addr) 2698 if err != nil { 2699 return err 2700 } 2701 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2702 } 2703 2704 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2705 2706 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2707 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2708 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2709 if srv.TLSConfig == nil { 2710 // Compatibility with Go 1.6: 2711 // If there's no TLSConfig, it's possible that the user just 2712 // didn't set it on the http.Server, but did pass it to 2713 // tls.NewListener and passed that listener to Serve. 2714 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2715 // in case the listener returns an "h2" *tls.Conn. 2716 return true 2717 } 2718 // The user specified a TLSConfig on their http.Server. 2719 // In this, case, only configure HTTP/2 if their tls.Config 2720 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2721 // would modify the tls.Config to add it, but they probably already 2722 // passed this tls.Config to tls.NewListener. And if they did, 2723 // it's too late anyway to fix it. It would only be potentially racy. 2724 // See Issue 15908. 2725 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2726 } 2727 2728 // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, 2729 // and ListenAndServeTLS methods after a call to Shutdown or Close. 2730 var ErrServerClosed = errors.New("http: Server closed") 2731 2732 // Serve accepts incoming connections on the Listener l, creating a 2733 // new service goroutine for each. The service goroutines read requests and 2734 // then call srv.Handler to reply to them. 2735 // 2736 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2737 // provided listener's TLS Config before calling Serve. If 2738 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2739 // Config.NextProtos, HTTP/2 support is not enabled. 2740 // 2741 // Serve always returns a non-nil error. After Shutdown or Close, the 2742 // returned error is ErrServerClosed. 2743 func (srv *Server) Serve(l net.Listener) error { 2744 defer l.Close() 2745 if fn := testHookServerServe; fn != nil { 2746 fn(srv, l) 2747 } 2748 var tempDelay time.Duration // how long to sleep on accept failure 2749 2750 if err := srv.setupHTTP2_Serve(); err != nil { 2751 return err 2752 } 2753 2754 srv.trackListener(l, true) 2755 defer srv.trackListener(l, false) 2756 2757 baseCtx := context.Background() // base is always background, per Issue 16220 2758 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2759 for { 2760 rw, e := l.Accept() 2761 if e != nil { 2762 select { 2763 case <-srv.getDoneChan(): 2764 return ErrServerClosed 2765 default: 2766 } 2767 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2768 if tempDelay == 0 { 2769 tempDelay = 5 * time.Millisecond 2770 } else { 2771 tempDelay *= 2 2772 } 2773 if max := 1 * time.Second; tempDelay > max { 2774 tempDelay = max 2775 } 2776 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2777 time.Sleep(tempDelay) 2778 continue 2779 } 2780 return e 2781 } 2782 tempDelay = 0 2783 c := srv.newConn(rw) 2784 c.setState(c.rwc, StateNew) // before Serve can return 2785 go c.serve(ctx) 2786 } 2787 } 2788 2789 // ServeTLS accepts incoming connections on the Listener l, creating a 2790 // new service goroutine for each. The service goroutines read requests and 2791 // then call srv.Handler to reply to them. 2792 // 2793 // Additionally, files containing a certificate and matching private key for 2794 // the server must be provided if neither the Server's TLSConfig.Certificates 2795 // nor TLSConfig.GetCertificate are populated.. If the certificate is signed by 2796 // a certificate authority, the certFile should be the concatenation of the 2797 // server's certificate, any intermediates, and the CA's certificate. 2798 // 2799 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2800 // provided listener's TLS Config before calling ServeTLS. If 2801 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2802 // Config.NextProtos, HTTP/2 support is not enabled. 2803 // 2804 // ServeTLS always returns a non-nil error. After Shutdown or Close, the 2805 // returned error is ErrServerClosed. 2806 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { 2807 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2808 // before we clone it and create the TLS Listener. 2809 if err := srv.setupHTTP2_ServeTLS(); err != nil { 2810 return err 2811 } 2812 2813 config := cloneTLSConfig(srv.TLSConfig) 2814 if !strSliceContains(config.NextProtos, "http/1.1") { 2815 config.NextProtos = append(config.NextProtos, "http/1.1") 2816 } 2817 2818 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2819 if !configHasCert || certFile != "" || keyFile != "" { 2820 var err error 2821 config.Certificates = make([]tls.Certificate, 1) 2822 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2823 if err != nil { 2824 return err 2825 } 2826 } 2827 2828 tlsListener := tls.NewListener(l, config) 2829 return srv.Serve(tlsListener) 2830 } 2831 2832 func (s *Server) trackListener(ln net.Listener, add bool) { 2833 s.mu.Lock() 2834 defer s.mu.Unlock() 2835 if s.listeners == nil { 2836 s.listeners = make(map[net.Listener]struct{}) 2837 } 2838 if add { 2839 // If the *Server is being reused after a previous 2840 // Close or Shutdown, reset its doneChan: 2841 if len(s.listeners) == 0 && len(s.activeConn) == 0 { 2842 s.doneChan = nil 2843 } 2844 s.listeners[ln] = struct{}{} 2845 } else { 2846 delete(s.listeners, ln) 2847 } 2848 } 2849 2850 func (s *Server) trackConn(c *conn, add bool) { 2851 s.mu.Lock() 2852 defer s.mu.Unlock() 2853 if s.activeConn == nil { 2854 s.activeConn = make(map[*conn]struct{}) 2855 } 2856 if add { 2857 s.activeConn[c] = struct{}{} 2858 } else { 2859 delete(s.activeConn, c) 2860 } 2861 } 2862 2863 func (s *Server) idleTimeout() time.Duration { 2864 if s.IdleTimeout != 0 { 2865 return s.IdleTimeout 2866 } 2867 return s.ReadTimeout 2868 } 2869 2870 func (s *Server) readHeaderTimeout() time.Duration { 2871 if s.ReadHeaderTimeout != 0 { 2872 return s.ReadHeaderTimeout 2873 } 2874 return s.ReadTimeout 2875 } 2876 2877 func (s *Server) doKeepAlives() bool { 2878 return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() 2879 } 2880 2881 func (s *Server) shuttingDown() bool { 2882 return atomic.LoadInt32(&s.inShutdown) != 0 2883 } 2884 2885 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2886 // By default, keep-alives are always enabled. Only very 2887 // resource-constrained environments or servers in the process of 2888 // shutting down should disable them. 2889 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2890 if v { 2891 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2892 return 2893 } 2894 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2895 2896 // Close idle HTTP/1 conns: 2897 srv.closeIdleConns() 2898 2899 // Close HTTP/2 conns, as soon as they become idle, but reset 2900 // the chan so future conns (if the listener is still active) 2901 // still work and don't get a GOAWAY immediately, before their 2902 // first request: 2903 srv.mu.Lock() 2904 defer srv.mu.Unlock() 2905 srv.closeDoneChanLocked() // closes http2 conns 2906 srv.doneChan = nil 2907 } 2908 2909 func (s *Server) logf(format string, args ...interface{}) { 2910 if s.ErrorLog != nil { 2911 s.ErrorLog.Printf(format, args...) 2912 } else { 2913 log.Printf(format, args...) 2914 } 2915 } 2916 2917 // logf prints to the ErrorLog of the *Server associated with request r 2918 // via ServerContextKey. If there's no associated server, or if ErrorLog 2919 // is nil, logging is done via the log package's standard logger. 2920 func logf(r *Request, format string, args ...interface{}) { 2921 s, _ := r.Context().Value(ServerContextKey).(*Server) 2922 if s != nil && s.ErrorLog != nil { 2923 s.ErrorLog.Printf(format, args...) 2924 } else { 2925 log.Printf(format, args...) 2926 } 2927 } 2928 2929 // ListenAndServe listens on the TCP network address addr 2930 // and then calls Serve with handler to handle requests 2931 // on incoming connections. 2932 // Accepted connections are configured to enable TCP keep-alives. 2933 // Handler is typically nil, in which case the DefaultServeMux is 2934 // used. 2935 // 2936 // A trivial example server is: 2937 // 2938 // package main 2939 // 2940 // import ( 2941 // "io" 2942 // "net/http" 2943 // "log" 2944 // ) 2945 // 2946 // // hello world, the web server 2947 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2948 // io.WriteString(w, "hello, world!\n") 2949 // } 2950 // 2951 // func main() { 2952 // http.HandleFunc("/hello", HelloServer) 2953 // log.Fatal(http.ListenAndServe(":12345", nil)) 2954 // } 2955 // 2956 // ListenAndServe always returns a non-nil error. 2957 func ListenAndServe(addr string, handler Handler) error { 2958 server := &Server{Addr: addr, Handler: handler} 2959 return server.ListenAndServe() 2960 } 2961 2962 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2963 // expects HTTPS connections. Additionally, files containing a certificate and 2964 // matching private key for the server must be provided. If the certificate 2965 // is signed by a certificate authority, the certFile should be the concatenation 2966 // of the server's certificate, any intermediates, and the CA's certificate. 2967 // 2968 // A trivial example server is: 2969 // 2970 // import ( 2971 // "log" 2972 // "net/http" 2973 // ) 2974 // 2975 // func handler(w http.ResponseWriter, req *http.Request) { 2976 // w.Header().Set("Content-Type", "text/plain") 2977 // w.Write([]byte("This is an example server.\n")) 2978 // } 2979 // 2980 // func main() { 2981 // http.HandleFunc("/", handler) 2982 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2983 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2984 // log.Fatal(err) 2985 // } 2986 // 2987 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2988 // 2989 // ListenAndServeTLS always returns a non-nil error. 2990 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2991 server := &Server{Addr: addr, Handler: handler} 2992 return server.ListenAndServeTLS(certFile, keyFile) 2993 } 2994 2995 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2996 // then calls Serve to handle requests on incoming TLS connections. 2997 // Accepted connections are configured to enable TCP keep-alives. 2998 // 2999 // Filenames containing a certificate and matching private key for the 3000 // server must be provided if neither the Server's TLSConfig.Certificates 3001 // nor TLSConfig.GetCertificate are populated. If the certificate is 3002 // signed by a certificate authority, the certFile should be the 3003 // concatenation of the server's certificate, any intermediates, and 3004 // the CA's certificate. 3005 // 3006 // If srv.Addr is blank, ":https" is used. 3007 // 3008 // ListenAndServeTLS always returns a non-nil error. 3009 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 3010 addr := srv.Addr 3011 if addr == "" { 3012 addr = ":https" 3013 } 3014 3015 ln, err := net.Listen("tcp", addr) 3016 if err != nil { 3017 return err 3018 } 3019 3020 defer ln.Close() 3021 3022 return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile) 3023 } 3024 3025 // setupHTTP2_ServeTLS conditionally configures HTTP/2 on 3026 // srv and returns whether there was an error setting it up. If it is 3027 // not configured for policy reasons, nil is returned. 3028 func (srv *Server) setupHTTP2_ServeTLS() error { 3029 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 3030 return srv.nextProtoErr 3031 } 3032 3033 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 3034 // configures HTTP/2 on srv using a more conservative policy than 3035 // setupHTTP2_ServeTLS because Serve may be called 3036 // concurrently. 3037 // 3038 // The tests named TestTransportAutomaticHTTP2* and 3039 // TestConcurrentServerServe in server_test.go demonstrate some 3040 // of the supported use cases and motivations. 3041 func (srv *Server) setupHTTP2_Serve() error { 3042 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 3043 return srv.nextProtoErr 3044 } 3045 3046 func (srv *Server) onceSetNextProtoDefaults_Serve() { 3047 if srv.shouldConfigureHTTP2ForServe() { 3048 srv.onceSetNextProtoDefaults() 3049 } 3050 } 3051 3052 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 3053 // configured otherwise. (by setting srv.TLSNextProto non-nil) 3054 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 3055 func (srv *Server) onceSetNextProtoDefaults() { 3056 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 3057 return 3058 } 3059 // Enable HTTP/2 by default if the user hasn't otherwise 3060 // configured their TLSNextProto map. 3061 if srv.TLSNextProto == nil { 3062 conf := &http2Server{ 3063 NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, 3064 } 3065 srv.nextProtoErr = http2ConfigureServer(srv, conf) 3066 } 3067 } 3068 3069 // TimeoutHandler returns a Handler that runs h with the given time limit. 3070 // 3071 // The new Handler calls h.ServeHTTP to handle each request, but if a 3072 // call runs for longer than its time limit, the handler responds with 3073 // a 503 Service Unavailable error and the given message in its body. 3074 // (If msg is empty, a suitable default message will be sent.) 3075 // After such a timeout, writes by h to its ResponseWriter will return 3076 // ErrHandlerTimeout. 3077 // 3078 // TimeoutHandler buffers all Handler writes to memory and does not 3079 // support the Hijacker or Flusher interfaces. 3080 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 3081 return &timeoutHandler{ 3082 handler: h, 3083 body: msg, 3084 dt: dt, 3085 } 3086 } 3087 3088 // ErrHandlerTimeout is returned on ResponseWriter Write calls 3089 // in handlers which have timed out. 3090 var ErrHandlerTimeout = errors.New("http: Handler timeout") 3091 3092 type timeoutHandler struct { 3093 handler Handler 3094 body string 3095 dt time.Duration 3096 3097 // When set, no context will be created and this context will 3098 // be used instead. 3099 testContext context.Context 3100 } 3101 3102 func (h *timeoutHandler) errorBody() string { 3103 if h.body != "" { 3104 return h.body 3105 } 3106 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 3107 } 3108 3109 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 3110 ctx := h.testContext 3111 if ctx == nil { 3112 var cancelCtx context.CancelFunc 3113 ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) 3114 defer cancelCtx() 3115 } 3116 r = r.WithContext(ctx) 3117 done := make(chan struct{}) 3118 tw := &timeoutWriter{ 3119 w: w, 3120 h: make(Header), 3121 } 3122 panicChan := make(chan interface{}, 1) 3123 go func() { 3124 defer func() { 3125 if p := recover(); p != nil { 3126 panicChan <- p 3127 } 3128 }() 3129 h.handler.ServeHTTP(tw, r) 3130 close(done) 3131 }() 3132 select { 3133 case p := <-panicChan: 3134 panic(p) 3135 case <-done: 3136 tw.mu.Lock() 3137 defer tw.mu.Unlock() 3138 dst := w.Header() 3139 for k, vv := range tw.h { 3140 dst[k] = vv 3141 } 3142 if !tw.wroteHeader { 3143 tw.code = StatusOK 3144 } 3145 w.WriteHeader(tw.code) 3146 w.Write(tw.wbuf.Bytes()) 3147 case <-ctx.Done(): 3148 tw.mu.Lock() 3149 defer tw.mu.Unlock() 3150 w.WriteHeader(StatusServiceUnavailable) 3151 io.WriteString(w, h.errorBody()) 3152 tw.timedOut = true 3153 return 3154 } 3155 } 3156 3157 type timeoutWriter struct { 3158 w ResponseWriter 3159 h Header 3160 wbuf bytes.Buffer 3161 3162 mu sync.Mutex 3163 timedOut bool 3164 wroteHeader bool 3165 code int 3166 } 3167 3168 func (tw *timeoutWriter) Header() Header { return tw.h } 3169 3170 func (tw *timeoutWriter) Write(p []byte) (int, error) { 3171 tw.mu.Lock() 3172 defer tw.mu.Unlock() 3173 if tw.timedOut { 3174 return 0, ErrHandlerTimeout 3175 } 3176 if !tw.wroteHeader { 3177 tw.writeHeader(StatusOK) 3178 } 3179 return tw.wbuf.Write(p) 3180 } 3181 3182 func (tw *timeoutWriter) WriteHeader(code int) { 3183 checkWriteHeaderCode(code) 3184 tw.mu.Lock() 3185 defer tw.mu.Unlock() 3186 if tw.timedOut || tw.wroteHeader { 3187 return 3188 } 3189 tw.writeHeader(code) 3190 } 3191 3192 func (tw *timeoutWriter) writeHeader(code int) { 3193 tw.wroteHeader = true 3194 tw.code = code 3195 } 3196 3197 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 3198 // connections. It's used by ListenAndServe and ListenAndServeTLS so 3199 // dead TCP connections (e.g. closing laptop mid-download) eventually 3200 // go away. 3201 type tcpKeepAliveListener struct { 3202 *net.TCPListener 3203 } 3204 3205 func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { 3206 tc, err := ln.AcceptTCP() 3207 if err != nil { 3208 return nil, err 3209 } 3210 tc.SetKeepAlive(true) 3211 tc.SetKeepAlivePeriod(3 * time.Minute) 3212 return tc, nil 3213 } 3214 3215 // globalOptionsHandler responds to "OPTIONS *" requests. 3216 type globalOptionsHandler struct{} 3217 3218 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 3219 w.Header().Set("Content-Length", "0") 3220 if r.ContentLength != 0 { 3221 // Read up to 4KB of OPTIONS body (as mentioned in the 3222 // spec as being reserved for future use), but anything 3223 // over that is considered a waste of server resources 3224 // (or an attack) and we abort and close the connection, 3225 // courtesy of MaxBytesReader's EOF behavior. 3226 mb := MaxBytesReader(w, r.Body, 4<<10) 3227 io.Copy(ioutil.Discard, mb) 3228 } 3229 } 3230 3231 // initNPNRequest is an HTTP handler that initializes certain 3232 // uninitialized fields in its *Request. Such partially-initialized 3233 // Requests come from NPN protocol handlers. 3234 type initNPNRequest struct { 3235 c *tls.Conn 3236 h serverHandler 3237 } 3238 3239 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 3240 if req.TLS == nil { 3241 req.TLS = &tls.ConnectionState{} 3242 *req.TLS = h.c.ConnectionState() 3243 } 3244 if req.Body == nil { 3245 req.Body = NoBody 3246 } 3247 if req.RemoteAddr == "" { 3248 req.RemoteAddr = h.c.RemoteAddr().String() 3249 } 3250 h.h.ServeHTTP(rw, req) 3251 } 3252 3253 // loggingConn is used for debugging. 3254 type loggingConn struct { 3255 name string 3256 net.Conn 3257 } 3258 3259 var ( 3260 uniqNameMu sync.Mutex 3261 uniqNameNext = make(map[string]int) 3262 ) 3263 3264 func newLoggingConn(baseName string, c net.Conn) net.Conn { 3265 uniqNameMu.Lock() 3266 defer uniqNameMu.Unlock() 3267 uniqNameNext[baseName]++ 3268 return &loggingConn{ 3269 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 3270 Conn: c, 3271 } 3272 } 3273 3274 func (c *loggingConn) Write(p []byte) (n int, err error) { 3275 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 3276 n, err = c.Conn.Write(p) 3277 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 3278 return 3279 } 3280 3281 func (c *loggingConn) Read(p []byte) (n int, err error) { 3282 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 3283 n, err = c.Conn.Read(p) 3284 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 3285 return 3286 } 3287 3288 func (c *loggingConn) Close() (err error) { 3289 log.Printf("%s.Close() = ...", c.name) 3290 err = c.Conn.Close() 3291 log.Printf("%s.Close() = %v", c.name, err) 3292 return 3293 } 3294 3295 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 3296 // It only contains one field (and a pointer field at that), so it 3297 // fits in an interface value without an extra allocation. 3298 type checkConnErrorWriter struct { 3299 c *conn 3300 } 3301 3302 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 3303 n, err = w.c.rwc.Write(p) 3304 if err != nil && w.c.werr == nil { 3305 w.c.werr = err 3306 w.c.cancelCtx() 3307 } 3308 return 3309 } 3310 3311 func numLeadingCRorLF(v []byte) (n int) { 3312 for _, b := range v { 3313 if b == '\r' || b == '\n' { 3314 n++ 3315 continue 3316 } 3317 break 3318 } 3319 return 3320 3321 } 3322 3323 func strSliceContains(ss []string, s string) bool { 3324 for _, v := range ss { 3325 if v == s { 3326 return true 3327 } 3328 } 3329 return false 3330 }