github.com/activestate/go@v0.0.0-20170614201249-0b81c023a722/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang_org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interface. A zero-byte write on a hijacked 44 // connection will return ErrHijacked without any other side 45 // effects. 46 ErrHijacked = errors.New("http: connection has been hijacked") 47 48 // ErrContentLength is returned by ResponseWriter.Write calls 49 // when a Handler set a Content-Length response header with a 50 // declared size and then attempted to write more bytes than 51 // declared. 52 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 53 54 // Deprecated: ErrWriteAfterFlush is no longer used. 55 ErrWriteAfterFlush = errors.New("unused") 56 ) 57 58 // A Handler responds to an HTTP request. 59 // 60 // ServeHTTP should write reply headers and data to the ResponseWriter 61 // and then return. Returning signals that the request is finished; it 62 // is not valid to use the ResponseWriter or read from the 63 // Request.Body after or concurrently with the completion of the 64 // ServeHTTP call. 65 // 66 // Depending on the HTTP client software, HTTP protocol version, and 67 // any intermediaries between the client and the Go server, it may not 68 // be possible to read from the Request.Body after writing to the 69 // ResponseWriter. Cautious handlers should read the Request.Body 70 // first, and then reply. 71 // 72 // Except for reading the body, handlers should not modify the 73 // provided Request. 74 // 75 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 76 // that the effect of the panic was isolated to the active request. 77 // It recovers the panic, logs a stack trace to the server error log, 78 // and hangs up the connection. To abort a handler so the client sees 79 // an interrupted response but the server doesn't log an error, panic 80 // with the value ErrAbortHandler. 81 type Handler interface { 82 ServeHTTP(ResponseWriter, *Request) 83 } 84 85 // A ResponseWriter interface is used by an HTTP handler to 86 // construct an HTTP response. 87 // 88 // A ResponseWriter may not be used after the Handler.ServeHTTP method 89 // has returned. 90 type ResponseWriter interface { 91 // Header returns the header map that will be sent by 92 // WriteHeader. The Header map also is the mechanism with which 93 // Handlers can set HTTP trailers. 94 // 95 // Changing the header map after a call to WriteHeader (or 96 // Write) has no effect unless the modified headers are 97 // trailers. 98 // 99 // There are two ways to set Trailers. The preferred way is to 100 // predeclare in the headers which trailers you will later 101 // send by setting the "Trailer" header to the names of the 102 // trailer keys which will come later. In this case, those 103 // keys of the Header map are treated as if they were 104 // trailers. See the example. The second way, for trailer 105 // keys not known to the Handler until after the first Write, 106 // is to prefix the Header map keys with the TrailerPrefix 107 // constant value. See TrailerPrefix. 108 // 109 // To suppress implicit response headers (such as "Date"), set 110 // their value to nil. 111 Header() Header 112 113 // Write writes the data to the connection as part of an HTTP reply. 114 // 115 // If WriteHeader has not yet been called, Write calls 116 // WriteHeader(http.StatusOK) before writing the data. If the Header 117 // does not contain a Content-Type line, Write adds a Content-Type set 118 // to the result of passing the initial 512 bytes of written data to 119 // DetectContentType. 120 // 121 // Depending on the HTTP protocol version and the client, calling 122 // Write or WriteHeader may prevent future reads on the 123 // Request.Body. For HTTP/1.x requests, handlers should read any 124 // needed request body data before writing the response. Once the 125 // headers have been flushed (due to either an explicit Flusher.Flush 126 // call or writing enough data to trigger a flush), the request body 127 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 128 // handlers to continue to read the request body while concurrently 129 // writing the response. However, such behavior may not be supported 130 // by all HTTP/2 clients. Handlers should read before writing if 131 // possible to maximize compatibility. 132 Write([]byte) (int, error) 133 134 // WriteHeader sends an HTTP response header with status code. 135 // If WriteHeader is not called explicitly, the first call to Write 136 // will trigger an implicit WriteHeader(http.StatusOK). 137 // Thus explicit calls to WriteHeader are mainly used to 138 // send error codes. 139 WriteHeader(int) 140 } 141 142 // The Flusher interface is implemented by ResponseWriters that allow 143 // an HTTP handler to flush buffered data to the client. 144 // 145 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 146 // support Flusher, but ResponseWriter wrappers may not. Handlers 147 // should always test for this ability at runtime. 148 // 149 // Note that even for ResponseWriters that support Flush, 150 // if the client is connected through an HTTP proxy, 151 // the buffered data may not reach the client until the response 152 // completes. 153 type Flusher interface { 154 // Flush sends any buffered data to the client. 155 Flush() 156 } 157 158 // The Hijacker interface is implemented by ResponseWriters that allow 159 // an HTTP handler to take over the connection. 160 // 161 // The default ResponseWriter for HTTP/1.x connections supports 162 // Hijacker, but HTTP/2 connections intentionally do not. 163 // ResponseWriter wrappers may also not support Hijacker. Handlers 164 // should always test for this ability at runtime. 165 type Hijacker interface { 166 // Hijack lets the caller take over the connection. 167 // After a call to Hijack the HTTP server library 168 // will not do anything else with the connection. 169 // 170 // It becomes the caller's responsibility to manage 171 // and close the connection. 172 // 173 // The returned net.Conn may have read or write deadlines 174 // already set, depending on the configuration of the 175 // Server. It is the caller's responsibility to set 176 // or clear those deadlines as needed. 177 // 178 // The returned bufio.Reader may contain unprocessed buffered 179 // data from the client. 180 Hijack() (net.Conn, *bufio.ReadWriter, error) 181 } 182 183 // The CloseNotifier interface is implemented by ResponseWriters which 184 // allow detecting when the underlying connection has gone away. 185 // 186 // This mechanism can be used to cancel long operations on the server 187 // if the client has disconnected before the response is ready. 188 type CloseNotifier interface { 189 // CloseNotify returns a channel that receives at most a 190 // single value (true) when the client connection has gone 191 // away. 192 // 193 // CloseNotify may wait to notify until Request.Body has been 194 // fully read. 195 // 196 // After the Handler has returned, there is no guarantee 197 // that the channel receives a value. 198 // 199 // If the protocol is HTTP/1.1 and CloseNotify is called while 200 // processing an idempotent request (such a GET) while 201 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 202 // pipelined request may cause a value to be sent on the 203 // returned channel. In practice HTTP/1.1 pipelining is not 204 // enabled in browsers and not seen often in the wild. If this 205 // is a problem, use HTTP/2 or only use CloseNotify on methods 206 // such as POST. 207 CloseNotify() <-chan bool 208 } 209 210 var ( 211 // ServerContextKey is a context key. It can be used in HTTP 212 // handlers with context.WithValue to access the server that 213 // started the handler. The associated value will be of 214 // type *Server. 215 ServerContextKey = &contextKey{"http-server"} 216 217 // LocalAddrContextKey is a context key. It can be used in 218 // HTTP handlers with context.WithValue to access the address 219 // the local address the connection arrived on. 220 // The associated value will be of type net.Addr. 221 LocalAddrContextKey = &contextKey{"local-addr"} 222 ) 223 224 // A conn represents the server side of an HTTP connection. 225 type conn struct { 226 // server is the server on which the connection arrived. 227 // Immutable; never nil. 228 server *Server 229 230 // cancelCtx cancels the connection-level context. 231 cancelCtx context.CancelFunc 232 233 // rwc is the underlying network connection. 234 // This is never wrapped by other types and is the value given out 235 // to CloseNotifier callers. It is usually of type *net.TCPConn or 236 // *tls.Conn. 237 rwc net.Conn 238 239 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 240 // inside the Listener's Accept goroutine, as some implementations block. 241 // It is populated immediately inside the (*conn).serve goroutine. 242 // This is the value of a Handler's (*Request).RemoteAddr. 243 remoteAddr string 244 245 // tlsState is the TLS connection state when using TLS. 246 // nil means not TLS. 247 tlsState *tls.ConnectionState 248 249 // werr is set to the first write error to rwc. 250 // It is set via checkConnErrorWriter{w}, where bufw writes. 251 werr error 252 253 // r is bufr's read source. It's a wrapper around rwc that provides 254 // io.LimitedReader-style limiting (while reading request headers) 255 // and functionality to support CloseNotifier. See *connReader docs. 256 r *connReader 257 258 // bufr reads from r. 259 bufr *bufio.Reader 260 261 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 262 bufw *bufio.Writer 263 264 // lastMethod is the method of the most recent request 265 // on this connection, if any. 266 lastMethod string 267 268 curReq atomic.Value // of *response (which has a Request in it) 269 270 curState atomic.Value // of ConnState 271 272 // mu guards hijackedv 273 mu sync.Mutex 274 275 // hijackedv is whether this connection has been hijacked 276 // by a Handler with the Hijacker interface. 277 // It is guarded by mu. 278 hijackedv bool 279 } 280 281 func (c *conn) hijacked() bool { 282 c.mu.Lock() 283 defer c.mu.Unlock() 284 return c.hijackedv 285 } 286 287 // c.mu must be held. 288 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 289 if c.hijackedv { 290 return nil, nil, ErrHijacked 291 } 292 c.r.abortPendingRead() 293 294 c.hijackedv = true 295 rwc = c.rwc 296 rwc.SetDeadline(time.Time{}) 297 298 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 299 if c.r.hasByte { 300 if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { 301 return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) 302 } 303 } 304 c.setState(rwc, StateHijacked) 305 return 306 } 307 308 // This should be >= 512 bytes for DetectContentType, 309 // but otherwise it's somewhat arbitrary. 310 const bufferBeforeChunkingSize = 2048 311 312 // chunkWriter writes to a response's conn buffer, and is the writer 313 // wrapped by the response.bufw buffered writer. 314 // 315 // chunkWriter also is responsible for finalizing the Header, including 316 // conditionally setting the Content-Type and setting a Content-Length 317 // in cases where the handler's final output is smaller than the buffer 318 // size. It also conditionally adds chunk headers, when in chunking mode. 319 // 320 // See the comment above (*response).Write for the entire write flow. 321 type chunkWriter struct { 322 res *response 323 324 // header is either nil or a deep clone of res.handlerHeader 325 // at the time of res.WriteHeader, if res.WriteHeader is 326 // called and extra buffering is being done to calculate 327 // Content-Type and/or Content-Length. 328 header Header 329 330 // wroteHeader tells whether the header's been written to "the 331 // wire" (or rather: w.conn.buf). this is unlike 332 // (*response).wroteHeader, which tells only whether it was 333 // logically written. 334 wroteHeader bool 335 336 // set by the writeHeader method: 337 chunking bool // using chunked transfer encoding for reply body 338 } 339 340 var ( 341 crlf = []byte("\r\n") 342 colonSpace = []byte(": ") 343 ) 344 345 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 346 if !cw.wroteHeader { 347 cw.writeHeader(p) 348 } 349 if cw.res.req.Method == "HEAD" { 350 // Eat writes. 351 return len(p), nil 352 } 353 if cw.chunking { 354 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 355 if err != nil { 356 cw.res.conn.rwc.Close() 357 return 358 } 359 } 360 n, err = cw.res.conn.bufw.Write(p) 361 if cw.chunking && err == nil { 362 _, err = cw.res.conn.bufw.Write(crlf) 363 } 364 if err != nil { 365 cw.res.conn.rwc.Close() 366 } 367 return 368 } 369 370 func (cw *chunkWriter) flush() { 371 if !cw.wroteHeader { 372 cw.writeHeader(nil) 373 } 374 cw.res.conn.bufw.Flush() 375 } 376 377 func (cw *chunkWriter) close() { 378 if !cw.wroteHeader { 379 cw.writeHeader(nil) 380 } 381 if cw.chunking { 382 bw := cw.res.conn.bufw // conn's bufio writer 383 // zero chunk to mark EOF 384 bw.WriteString("0\r\n") 385 if trailers := cw.res.finalTrailers(); trailers != nil { 386 trailers.Write(bw) // the writer handles noting errors 387 } 388 // final blank line after the trailers (whether 389 // present or not) 390 bw.WriteString("\r\n") 391 } 392 } 393 394 // A response represents the server side of an HTTP response. 395 type response struct { 396 conn *conn 397 req *Request // request for this response 398 reqBody io.ReadCloser 399 cancelCtx context.CancelFunc // when ServeHTTP exits 400 wroteHeader bool // reply header has been (logically) written 401 wroteContinue bool // 100 Continue response was written 402 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 403 wantsClose bool // HTTP request has Connection "close" 404 405 w *bufio.Writer // buffers output in chunks to chunkWriter 406 cw chunkWriter 407 408 // handlerHeader is the Header that Handlers get access to, 409 // which may be retained and mutated even after WriteHeader. 410 // handlerHeader is copied into cw.header at WriteHeader 411 // time, and privately mutated thereafter. 412 handlerHeader Header 413 calledHeader bool // handler accessed handlerHeader via Header 414 415 written int64 // number of bytes written in body 416 contentLength int64 // explicitly-declared Content-Length; or -1 417 status int // status code passed to WriteHeader 418 419 // close connection after this reply. set on request and 420 // updated after response from handler if there's a 421 // "Connection: keep-alive" response header and a 422 // Content-Length. 423 closeAfterReply bool 424 425 // requestBodyLimitHit is set by requestTooLarge when 426 // maxBytesReader hits its max size. It is checked in 427 // WriteHeader, to make sure we don't consume the 428 // remaining request body to try to advance to the next HTTP 429 // request. Instead, when this is set, we stop reading 430 // subsequent requests on this connection and stop reading 431 // input from it. 432 requestBodyLimitHit bool 433 434 // trailers are the headers to be sent after the handler 435 // finishes writing the body. This field is initialized from 436 // the Trailer response header when the response header is 437 // written. 438 trailers []string 439 440 handlerDone atomicBool // set true when the handler exits 441 442 // Buffers for Date, Content-Length, and status code 443 dateBuf [len(TimeFormat)]byte 444 clenBuf [10]byte 445 statusBuf [3]byte 446 447 // closeNotifyCh is the channel returned by CloseNotify. 448 // TODO(bradfitz): this is currently (for Go 1.8) always 449 // non-nil. Make this lazily-created again as it used to be? 450 closeNotifyCh chan bool 451 didCloseNotify int32 // atomic (only 0->1 winner should send) 452 } 453 454 // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys 455 // that, if present, signals that the map entry is actually for 456 // the response trailers, and not the response headers. The prefix 457 // is stripped after the ServeHTTP call finishes and the values are 458 // sent in the trailers. 459 // 460 // This mechanism is intended only for trailers that are not known 461 // prior to the headers being written. If the set of trailers is fixed 462 // or known before the header is written, the normal Go trailers mechanism 463 // is preferred: 464 // https://golang.org/pkg/net/http/#ResponseWriter 465 // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 466 const TrailerPrefix = "Trailer:" 467 468 // finalTrailers is called after the Handler exits and returns a non-nil 469 // value if the Handler set any trailers. 470 func (w *response) finalTrailers() Header { 471 var t Header 472 for k, vv := range w.handlerHeader { 473 if strings.HasPrefix(k, TrailerPrefix) { 474 if t == nil { 475 t = make(Header) 476 } 477 t[strings.TrimPrefix(k, TrailerPrefix)] = vv 478 } 479 } 480 for _, k := range w.trailers { 481 if t == nil { 482 t = make(Header) 483 } 484 for _, v := range w.handlerHeader[k] { 485 t.Add(k, v) 486 } 487 } 488 return t 489 } 490 491 type atomicBool int32 492 493 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 494 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 495 496 // declareTrailer is called for each Trailer header when the 497 // response header is written. It notes that a header will need to be 498 // written in the trailers at the end of the response. 499 func (w *response) declareTrailer(k string) { 500 k = CanonicalHeaderKey(k) 501 switch k { 502 case "Transfer-Encoding", "Content-Length", "Trailer": 503 // Forbidden by RFC 2616 14.40. 504 return 505 } 506 w.trailers = append(w.trailers, k) 507 } 508 509 // requestTooLarge is called by maxBytesReader when too much input has 510 // been read from the client. 511 func (w *response) requestTooLarge() { 512 w.closeAfterReply = true 513 w.requestBodyLimitHit = true 514 if !w.wroteHeader { 515 w.Header().Set("Connection", "close") 516 } 517 } 518 519 // needsSniff reports whether a Content-Type still needs to be sniffed. 520 func (w *response) needsSniff() bool { 521 _, haveType := w.handlerHeader["Content-Type"] 522 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 523 } 524 525 // writerOnly hides an io.Writer value's optional ReadFrom method 526 // from io.Copy. 527 type writerOnly struct { 528 io.Writer 529 } 530 531 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 532 switch v := src.(type) { 533 case *os.File: 534 fi, err := v.Stat() 535 if err != nil { 536 return false, err 537 } 538 return fi.Mode().IsRegular(), nil 539 case *io.LimitedReader: 540 return srcIsRegularFile(v.R) 541 default: 542 return 543 } 544 } 545 546 // ReadFrom is here to optimize copying from an *os.File regular file 547 // to a *net.TCPConn with sendfile. 548 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 549 // Our underlying w.conn.rwc is usually a *TCPConn (with its 550 // own ReadFrom method). If not, or if our src isn't a regular 551 // file, just fall back to the normal copy method. 552 rf, ok := w.conn.rwc.(io.ReaderFrom) 553 regFile, err := srcIsRegularFile(src) 554 if err != nil { 555 return 0, err 556 } 557 if !ok || !regFile { 558 bufp := copyBufPool.Get().(*[]byte) 559 defer copyBufPool.Put(bufp) 560 return io.CopyBuffer(writerOnly{w}, src, *bufp) 561 } 562 563 // sendfile path: 564 565 if !w.wroteHeader { 566 w.WriteHeader(StatusOK) 567 } 568 569 if w.needsSniff() { 570 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 571 n += n0 572 if err != nil { 573 return n, err 574 } 575 } 576 577 w.w.Flush() // get rid of any previous writes 578 w.cw.flush() // make sure Header is written; flush data to rwc 579 580 // Now that cw has been flushed, its chunking field is guaranteed initialized. 581 if !w.cw.chunking && w.bodyAllowed() { 582 n0, err := rf.ReadFrom(src) 583 n += n0 584 w.written += n0 585 return n, err 586 } 587 588 n0, err := io.Copy(writerOnly{w}, src) 589 n += n0 590 return n, err 591 } 592 593 // debugServerConnections controls whether all server connections are wrapped 594 // with a verbose logging wrapper. 595 const debugServerConnections = false 596 597 // Create new connection from rwc. 598 func (srv *Server) newConn(rwc net.Conn) *conn { 599 c := &conn{ 600 server: srv, 601 rwc: rwc, 602 } 603 if debugServerConnections { 604 c.rwc = newLoggingConn("server", c.rwc) 605 } 606 return c 607 } 608 609 type readResult struct { 610 n int 611 err error 612 b byte // byte read, if n == 1 613 } 614 615 // connReader is the io.Reader wrapper used by *conn. It combines a 616 // selectively-activated io.LimitedReader (to bound request header 617 // read sizes) with support for selectively keeping an io.Reader.Read 618 // call blocked in a background goroutine to wait for activity and 619 // trigger a CloseNotifier channel. 620 type connReader struct { 621 conn *conn 622 623 mu sync.Mutex // guards following 624 hasByte bool 625 byteBuf [1]byte 626 cond *sync.Cond 627 inRead bool 628 aborted bool // set true before conn.rwc deadline is set to past 629 remain int64 // bytes remaining 630 } 631 632 func (cr *connReader) lock() { 633 cr.mu.Lock() 634 if cr.cond == nil { 635 cr.cond = sync.NewCond(&cr.mu) 636 } 637 } 638 639 func (cr *connReader) unlock() { cr.mu.Unlock() } 640 641 func (cr *connReader) startBackgroundRead() { 642 cr.lock() 643 defer cr.unlock() 644 if cr.inRead { 645 panic("invalid concurrent Body.Read call") 646 } 647 if cr.hasByte { 648 return 649 } 650 cr.inRead = true 651 cr.conn.rwc.SetReadDeadline(time.Time{}) 652 go cr.backgroundRead() 653 } 654 655 func (cr *connReader) backgroundRead() { 656 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 657 cr.lock() 658 if n == 1 { 659 cr.hasByte = true 660 // We were at EOF already (since we wouldn't be in a 661 // background read otherwise), so this is a pipelined 662 // HTTP request. 663 cr.closeNotifyFromPipelinedRequest() 664 } 665 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 666 // Ignore this error. It's the expected error from 667 // another goroutine calling abortPendingRead. 668 } else if err != nil { 669 cr.handleReadError(err) 670 } 671 cr.aborted = false 672 cr.inRead = false 673 cr.unlock() 674 cr.cond.Broadcast() 675 } 676 677 func (cr *connReader) abortPendingRead() { 678 cr.lock() 679 defer cr.unlock() 680 if !cr.inRead { 681 return 682 } 683 cr.aborted = true 684 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 685 for cr.inRead { 686 cr.cond.Wait() 687 } 688 cr.conn.rwc.SetReadDeadline(time.Time{}) 689 } 690 691 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 692 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 693 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 694 695 // may be called from multiple goroutines. 696 func (cr *connReader) handleReadError(err error) { 697 cr.conn.cancelCtx() 698 cr.closeNotify() 699 } 700 701 // closeNotifyFromPipelinedRequest simply calls closeNotify. 702 // 703 // This method wrapper is here for documentation. The callers are the 704 // cases where we send on the closenotify channel because of a 705 // pipelined HTTP request, per the previous Go behavior and 706 // documentation (that this "MAY" happen). 707 // 708 // TODO: consider changing this behavior and making context 709 // cancelation and closenotify work the same. 710 func (cr *connReader) closeNotifyFromPipelinedRequest() { 711 cr.closeNotify() 712 } 713 714 // may be called from multiple goroutines. 715 func (cr *connReader) closeNotify() { 716 res, _ := cr.conn.curReq.Load().(*response) 717 if res != nil { 718 if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 719 res.closeNotifyCh <- true 720 } 721 } 722 } 723 724 func (cr *connReader) Read(p []byte) (n int, err error) { 725 cr.lock() 726 if cr.inRead { 727 cr.unlock() 728 panic("invalid concurrent Body.Read call") 729 } 730 if cr.hitReadLimit() { 731 cr.unlock() 732 return 0, io.EOF 733 } 734 if len(p) == 0 { 735 cr.unlock() 736 return 0, nil 737 } 738 if int64(len(p)) > cr.remain { 739 p = p[:cr.remain] 740 } 741 if cr.hasByte { 742 p[0] = cr.byteBuf[0] 743 cr.hasByte = false 744 cr.unlock() 745 return 1, nil 746 } 747 cr.inRead = true 748 cr.unlock() 749 n, err = cr.conn.rwc.Read(p) 750 751 cr.lock() 752 cr.inRead = false 753 if err != nil { 754 cr.handleReadError(err) 755 } 756 cr.remain -= int64(n) 757 cr.unlock() 758 759 cr.cond.Broadcast() 760 return n, err 761 } 762 763 var ( 764 bufioReaderPool sync.Pool 765 bufioWriter2kPool sync.Pool 766 bufioWriter4kPool sync.Pool 767 ) 768 769 var copyBufPool = sync.Pool{ 770 New: func() interface{} { 771 b := make([]byte, 32*1024) 772 return &b 773 }, 774 } 775 776 func bufioWriterPool(size int) *sync.Pool { 777 switch size { 778 case 2 << 10: 779 return &bufioWriter2kPool 780 case 4 << 10: 781 return &bufioWriter4kPool 782 } 783 return nil 784 } 785 786 func newBufioReader(r io.Reader) *bufio.Reader { 787 if v := bufioReaderPool.Get(); v != nil { 788 br := v.(*bufio.Reader) 789 br.Reset(r) 790 return br 791 } 792 // Note: if this reader size is ever changed, update 793 // TestHandlerBodyClose's assumptions. 794 return bufio.NewReader(r) 795 } 796 797 func putBufioReader(br *bufio.Reader) { 798 br.Reset(nil) 799 bufioReaderPool.Put(br) 800 } 801 802 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 803 pool := bufioWriterPool(size) 804 if pool != nil { 805 if v := pool.Get(); v != nil { 806 bw := v.(*bufio.Writer) 807 bw.Reset(w) 808 return bw 809 } 810 } 811 return bufio.NewWriterSize(w, size) 812 } 813 814 func putBufioWriter(bw *bufio.Writer) { 815 bw.Reset(nil) 816 if pool := bufioWriterPool(bw.Available()); pool != nil { 817 pool.Put(bw) 818 } 819 } 820 821 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 822 // in an HTTP request. 823 // This can be overridden by setting Server.MaxHeaderBytes. 824 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 825 826 func (srv *Server) maxHeaderBytes() int { 827 if srv.MaxHeaderBytes > 0 { 828 return srv.MaxHeaderBytes 829 } 830 return DefaultMaxHeaderBytes 831 } 832 833 func (srv *Server) initialReadLimitSize() int64 { 834 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 835 } 836 837 // wrapper around io.ReadCloser which on first read, sends an 838 // HTTP/1.1 100 Continue header 839 type expectContinueReader struct { 840 resp *response 841 readCloser io.ReadCloser 842 closed bool 843 sawEOF bool 844 } 845 846 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 847 if ecr.closed { 848 return 0, ErrBodyReadAfterClose 849 } 850 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 851 ecr.resp.wroteContinue = true 852 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 853 ecr.resp.conn.bufw.Flush() 854 } 855 n, err = ecr.readCloser.Read(p) 856 if err == io.EOF { 857 ecr.sawEOF = true 858 } 859 return 860 } 861 862 func (ecr *expectContinueReader) Close() error { 863 ecr.closed = true 864 return ecr.readCloser.Close() 865 } 866 867 // TimeFormat is the time format to use when generating times in HTTP 868 // headers. It is like time.RFC1123 but hard-codes GMT as the time 869 // zone. The time being formatted must be in UTC for Format to 870 // generate the correct format. 871 // 872 // For parsing this time format, see ParseTime. 873 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 874 875 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 876 func appendTime(b []byte, t time.Time) []byte { 877 const days = "SunMonTueWedThuFriSat" 878 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 879 880 t = t.UTC() 881 yy, mm, dd := t.Date() 882 hh, mn, ss := t.Clock() 883 day := days[3*t.Weekday():] 884 mon := months[3*(mm-1):] 885 886 return append(b, 887 day[0], day[1], day[2], ',', ' ', 888 byte('0'+dd/10), byte('0'+dd%10), ' ', 889 mon[0], mon[1], mon[2], ' ', 890 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 891 byte('0'+hh/10), byte('0'+hh%10), ':', 892 byte('0'+mn/10), byte('0'+mn%10), ':', 893 byte('0'+ss/10), byte('0'+ss%10), ' ', 894 'G', 'M', 'T') 895 } 896 897 var errTooLarge = errors.New("http: request too large") 898 899 // Read next request from connection. 900 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 901 if c.hijacked() { 902 return nil, ErrHijacked 903 } 904 905 var ( 906 wholeReqDeadline time.Time // or zero if none 907 hdrDeadline time.Time // or zero if none 908 ) 909 t0 := time.Now() 910 if d := c.server.readHeaderTimeout(); d != 0 { 911 hdrDeadline = t0.Add(d) 912 } 913 if d := c.server.ReadTimeout; d != 0 { 914 wholeReqDeadline = t0.Add(d) 915 } 916 c.rwc.SetReadDeadline(hdrDeadline) 917 if d := c.server.WriteTimeout; d != 0 { 918 defer func() { 919 c.rwc.SetWriteDeadline(time.Now().Add(d)) 920 }() 921 } 922 923 c.r.setReadLimit(c.server.initialReadLimitSize()) 924 if c.lastMethod == "POST" { 925 // RFC 2616 section 4.1 tolerance for old buggy clients. 926 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 927 c.bufr.Discard(numLeadingCRorLF(peek)) 928 } 929 req, err := readRequest(c.bufr, keepHostHeader) 930 if err != nil { 931 if c.r.hitReadLimit() { 932 return nil, errTooLarge 933 } 934 return nil, err 935 } 936 937 if !http1ServerSupportsRequest(req) { 938 return nil, badRequestError("unsupported protocol version") 939 } 940 941 c.lastMethod = req.Method 942 c.r.setInfiniteReadLimit() 943 944 hosts, haveHost := req.Header["Host"] 945 isH2Upgrade := req.isH2Upgrade() 946 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { 947 return nil, badRequestError("missing required Host header") 948 } 949 if len(hosts) > 1 { 950 return nil, badRequestError("too many Host headers") 951 } 952 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 953 return nil, badRequestError("malformed Host header") 954 } 955 for k, vv := range req.Header { 956 if !httplex.ValidHeaderFieldName(k) { 957 return nil, badRequestError("invalid header name") 958 } 959 for _, v := range vv { 960 if !httplex.ValidHeaderFieldValue(v) { 961 return nil, badRequestError("invalid header value") 962 } 963 } 964 } 965 delete(req.Header, "Host") 966 967 ctx, cancelCtx := context.WithCancel(ctx) 968 req.ctx = ctx 969 req.RemoteAddr = c.remoteAddr 970 req.TLS = c.tlsState 971 if body, ok := req.Body.(*body); ok { 972 body.doEarlyClose = true 973 } 974 975 // Adjust the read deadline if necessary. 976 if !hdrDeadline.Equal(wholeReqDeadline) { 977 c.rwc.SetReadDeadline(wholeReqDeadline) 978 } 979 980 w = &response{ 981 conn: c, 982 cancelCtx: cancelCtx, 983 req: req, 984 reqBody: req.Body, 985 handlerHeader: make(Header), 986 contentLength: -1, 987 closeNotifyCh: make(chan bool, 1), 988 989 // We populate these ahead of time so we're not 990 // reading from req.Header after their Handler starts 991 // and maybe mutates it (Issue 14940) 992 wants10KeepAlive: req.wantsHttp10KeepAlive(), 993 wantsClose: req.wantsClose(), 994 } 995 if isH2Upgrade { 996 w.closeAfterReply = true 997 } 998 w.cw.res = w 999 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 1000 return w, nil 1001 } 1002 1003 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 1004 // supports the given request. 1005 func http1ServerSupportsRequest(req *Request) bool { 1006 if req.ProtoMajor == 1 { 1007 return true 1008 } 1009 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 1010 // wire up their own HTTP/2 upgrades. 1011 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 1012 req.Method == "PRI" && req.RequestURI == "*" { 1013 return true 1014 } 1015 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 1016 // aren't encoded in ASCII anyway). 1017 return false 1018 } 1019 1020 func (w *response) Header() Header { 1021 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 1022 // Accessing the header between logically writing it 1023 // and physically writing it means we need to allocate 1024 // a clone to snapshot the logically written state. 1025 w.cw.header = w.handlerHeader.clone() 1026 } 1027 w.calledHeader = true 1028 return w.handlerHeader 1029 } 1030 1031 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 1032 // consumed by a handler that the server will read from the client 1033 // in order to keep a connection alive. If there are more bytes than 1034 // this then the server to be paranoid instead sends a "Connection: 1035 // close" response. 1036 // 1037 // This number is approximately what a typical machine's TCP buffer 1038 // size is anyway. (if we have the bytes on the machine, we might as 1039 // well read them) 1040 const maxPostHandlerReadBytes = 256 << 10 1041 1042 func (w *response) WriteHeader(code int) { 1043 if w.conn.hijacked() { 1044 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 1045 return 1046 } 1047 if w.wroteHeader { 1048 w.conn.server.logf("http: multiple response.WriteHeader calls") 1049 return 1050 } 1051 w.wroteHeader = true 1052 w.status = code 1053 1054 if w.calledHeader && w.cw.header == nil { 1055 w.cw.header = w.handlerHeader.clone() 1056 } 1057 1058 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1059 v, err := strconv.ParseInt(cl, 10, 64) 1060 if err == nil && v >= 0 { 1061 w.contentLength = v 1062 } else { 1063 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1064 w.handlerHeader.Del("Content-Length") 1065 } 1066 } 1067 } 1068 1069 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1070 // This type is used to avoid extra allocations from cloning and/or populating 1071 // the response Header map and all its 1-element slices. 1072 type extraHeader struct { 1073 contentType string 1074 connection string 1075 transferEncoding string 1076 date []byte // written if not nil 1077 contentLength []byte // written if not nil 1078 } 1079 1080 // Sorted the same as extraHeader.Write's loop. 1081 var extraHeaderKeys = [][]byte{ 1082 []byte("Content-Type"), 1083 []byte("Connection"), 1084 []byte("Transfer-Encoding"), 1085 } 1086 1087 var ( 1088 headerContentLength = []byte("Content-Length: ") 1089 headerDate = []byte("Date: ") 1090 ) 1091 1092 // Write writes the headers described in h to w. 1093 // 1094 // This method has a value receiver, despite the somewhat large size 1095 // of h, because it prevents an allocation. The escape analysis isn't 1096 // smart enough to realize this function doesn't mutate h. 1097 func (h extraHeader) Write(w *bufio.Writer) { 1098 if h.date != nil { 1099 w.Write(headerDate) 1100 w.Write(h.date) 1101 w.Write(crlf) 1102 } 1103 if h.contentLength != nil { 1104 w.Write(headerContentLength) 1105 w.Write(h.contentLength) 1106 w.Write(crlf) 1107 } 1108 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1109 if v != "" { 1110 w.Write(extraHeaderKeys[i]) 1111 w.Write(colonSpace) 1112 w.WriteString(v) 1113 w.Write(crlf) 1114 } 1115 } 1116 } 1117 1118 // writeHeader finalizes the header sent to the client and writes it 1119 // to cw.res.conn.bufw. 1120 // 1121 // p is not written by writeHeader, but is the first chunk of the body 1122 // that will be written. It is sniffed for a Content-Type if none is 1123 // set explicitly. It's also used to set the Content-Length, if the 1124 // total body size was small and the handler has already finished 1125 // running. 1126 func (cw *chunkWriter) writeHeader(p []byte) { 1127 if cw.wroteHeader { 1128 return 1129 } 1130 cw.wroteHeader = true 1131 1132 w := cw.res 1133 keepAlivesEnabled := w.conn.server.doKeepAlives() 1134 isHEAD := w.req.Method == "HEAD" 1135 1136 // header is written out to w.conn.buf below. Depending on the 1137 // state of the handler, we either own the map or not. If we 1138 // don't own it, the exclude map is created lazily for 1139 // WriteSubset to remove headers. The setHeader struct holds 1140 // headers we need to add. 1141 header := cw.header 1142 owned := header != nil 1143 if !owned { 1144 header = w.handlerHeader 1145 } 1146 var excludeHeader map[string]bool 1147 delHeader := func(key string) { 1148 if owned { 1149 header.Del(key) 1150 return 1151 } 1152 if _, ok := header[key]; !ok { 1153 return 1154 } 1155 if excludeHeader == nil { 1156 excludeHeader = make(map[string]bool) 1157 } 1158 excludeHeader[key] = true 1159 } 1160 var setHeader extraHeader 1161 1162 // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. 1163 trailers := false 1164 for k := range cw.header { 1165 if strings.HasPrefix(k, TrailerPrefix) { 1166 if excludeHeader == nil { 1167 excludeHeader = make(map[string]bool) 1168 } 1169 excludeHeader[k] = true 1170 trailers = true 1171 } 1172 } 1173 for _, v := range cw.header["Trailer"] { 1174 trailers = true 1175 foreachHeaderElement(v, cw.res.declareTrailer) 1176 } 1177 1178 te := header.get("Transfer-Encoding") 1179 hasTE := te != "" 1180 1181 // If the handler is done but never sent a Content-Length 1182 // response header and this is our first (and last) write, set 1183 // it, even to zero. This helps HTTP/1.0 clients keep their 1184 // "keep-alive" connections alive. 1185 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1186 // it was a HEAD request, we don't know the difference between 1187 // 0 actual bytes and 0 bytes because the handler noticed it 1188 // was a HEAD request and chose not to write anything. So for 1189 // HEAD, the handler should either write the Content-Length or 1190 // write non-zero bytes. If it's actually 0 bytes and the 1191 // handler never looked at the Request.Method, we just don't 1192 // send a Content-Length header. 1193 // Further, we don't send an automatic Content-Length if they 1194 // set a Transfer-Encoding, because they're generally incompatible. 1195 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1196 w.contentLength = int64(len(p)) 1197 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1198 } 1199 1200 // If this was an HTTP/1.0 request with keep-alive and we sent a 1201 // Content-Length back, we can make this a keep-alive response ... 1202 if w.wants10KeepAlive && keepAlivesEnabled { 1203 sentLength := header.get("Content-Length") != "" 1204 if sentLength && header.get("Connection") == "keep-alive" { 1205 w.closeAfterReply = false 1206 } 1207 } 1208 1209 // Check for a explicit (and valid) Content-Length header. 1210 hasCL := w.contentLength != -1 1211 1212 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1213 _, connectionHeaderSet := header["Connection"] 1214 if !connectionHeaderSet { 1215 setHeader.connection = "keep-alive" 1216 } 1217 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1218 w.closeAfterReply = true 1219 } 1220 1221 if header.get("Connection") == "close" || !keepAlivesEnabled { 1222 w.closeAfterReply = true 1223 } 1224 1225 // If the client wanted a 100-continue but we never sent it to 1226 // them (or, more strictly: we never finished reading their 1227 // request body), don't reuse this connection because it's now 1228 // in an unknown state: we might be sending this response at 1229 // the same time the client is now sending its request body 1230 // after a timeout. (Some HTTP clients send Expect: 1231 // 100-continue but knowing that some servers don't support 1232 // it, the clients set a timer and send the body later anyway) 1233 // If we haven't seen EOF, we can't skip over the unread body 1234 // because we don't know if the next bytes on the wire will be 1235 // the body-following-the-timer or the subsequent request. 1236 // See Issue 11549. 1237 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1238 w.closeAfterReply = true 1239 } 1240 1241 // Per RFC 2616, we should consume the request body before 1242 // replying, if the handler hasn't already done so. But we 1243 // don't want to do an unbounded amount of reading here for 1244 // DoS reasons, so we only try up to a threshold. 1245 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1246 // about HTTP/1.x Handlers concurrently reading and writing, like 1247 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1248 if w.req.ContentLength != 0 && !w.closeAfterReply { 1249 var discard, tooBig bool 1250 1251 switch bdy := w.req.Body.(type) { 1252 case *expectContinueReader: 1253 if bdy.resp.wroteContinue { 1254 discard = true 1255 } 1256 case *body: 1257 bdy.mu.Lock() 1258 switch { 1259 case bdy.closed: 1260 if !bdy.sawEOF { 1261 // Body was closed in handler with non-EOF error. 1262 w.closeAfterReply = true 1263 } 1264 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1265 tooBig = true 1266 default: 1267 discard = true 1268 } 1269 bdy.mu.Unlock() 1270 default: 1271 discard = true 1272 } 1273 1274 if discard { 1275 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1276 switch err { 1277 case nil: 1278 // There must be even more data left over. 1279 tooBig = true 1280 case ErrBodyReadAfterClose: 1281 // Body was already consumed and closed. 1282 case io.EOF: 1283 // The remaining body was just consumed, close it. 1284 err = w.reqBody.Close() 1285 if err != nil { 1286 w.closeAfterReply = true 1287 } 1288 default: 1289 // Some other kind of error occurred, like a read timeout, or 1290 // corrupt chunked encoding. In any case, whatever remains 1291 // on the wire must not be parsed as another HTTP request. 1292 w.closeAfterReply = true 1293 } 1294 } 1295 1296 if tooBig { 1297 w.requestTooLarge() 1298 delHeader("Connection") 1299 setHeader.connection = "close" 1300 } 1301 } 1302 1303 code := w.status 1304 if bodyAllowedForStatus(code) { 1305 // If no content type, apply sniffing algorithm to body. 1306 _, haveType := header["Content-Type"] 1307 if !haveType && !hasTE { 1308 setHeader.contentType = DetectContentType(p) 1309 } 1310 } else { 1311 for _, k := range suppressedHeaders(code) { 1312 delHeader(k) 1313 } 1314 } 1315 1316 if _, ok := header["Date"]; !ok { 1317 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1318 } 1319 1320 if hasCL && hasTE && te != "identity" { 1321 // TODO: return an error if WriteHeader gets a return parameter 1322 // For now just ignore the Content-Length. 1323 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1324 te, w.contentLength) 1325 delHeader("Content-Length") 1326 hasCL = false 1327 } 1328 1329 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1330 // do nothing 1331 } else if code == StatusNoContent { 1332 delHeader("Transfer-Encoding") 1333 } else if hasCL { 1334 delHeader("Transfer-Encoding") 1335 } else if w.req.ProtoAtLeast(1, 1) { 1336 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1337 // content-length has been provided. The connection must be closed after the 1338 // reply is written, and no chunking is to be done. This is the setup 1339 // recommended in the Server-Sent Events candidate recommendation 11, 1340 // section 8. 1341 if hasTE && te == "identity" { 1342 cw.chunking = false 1343 w.closeAfterReply = true 1344 } else { 1345 // HTTP/1.1 or greater: use chunked transfer encoding 1346 // to avoid closing the connection at EOF. 1347 cw.chunking = true 1348 setHeader.transferEncoding = "chunked" 1349 if hasTE && te == "chunked" { 1350 // We will send the chunked Transfer-Encoding header later. 1351 delHeader("Transfer-Encoding") 1352 } 1353 } 1354 } else { 1355 // HTTP version < 1.1: cannot do chunked transfer 1356 // encoding and we don't know the Content-Length so 1357 // signal EOF by closing connection. 1358 w.closeAfterReply = true 1359 delHeader("Transfer-Encoding") // in case already set 1360 } 1361 1362 // Cannot use Content-Length with non-identity Transfer-Encoding. 1363 if cw.chunking { 1364 delHeader("Content-Length") 1365 } 1366 if !w.req.ProtoAtLeast(1, 0) { 1367 return 1368 } 1369 1370 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1371 delHeader("Connection") 1372 if w.req.ProtoAtLeast(1, 1) { 1373 setHeader.connection = "close" 1374 } 1375 } 1376 1377 writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) 1378 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1379 setHeader.Write(w.conn.bufw) 1380 w.conn.bufw.Write(crlf) 1381 } 1382 1383 // foreachHeaderElement splits v according to the "#rule" construction 1384 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1385 func foreachHeaderElement(v string, fn func(string)) { 1386 v = textproto.TrimString(v) 1387 if v == "" { 1388 return 1389 } 1390 if !strings.Contains(v, ",") { 1391 fn(v) 1392 return 1393 } 1394 for _, f := range strings.Split(v, ",") { 1395 if f = textproto.TrimString(f); f != "" { 1396 fn(f) 1397 } 1398 } 1399 } 1400 1401 // writeStatusLine writes an HTTP/1.x Status-Line (RFC 2616 Section 6.1) 1402 // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. 1403 // code is the response status code. 1404 // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. 1405 func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { 1406 if is11 { 1407 bw.WriteString("HTTP/1.1 ") 1408 } else { 1409 bw.WriteString("HTTP/1.0 ") 1410 } 1411 if text, ok := statusText[code]; ok { 1412 bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) 1413 bw.WriteByte(' ') 1414 bw.WriteString(text) 1415 bw.WriteString("\r\n") 1416 } else { 1417 // don't worry about performance 1418 fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) 1419 } 1420 } 1421 1422 // bodyAllowed reports whether a Write is allowed for this response type. 1423 // It's illegal to call this before the header has been flushed. 1424 func (w *response) bodyAllowed() bool { 1425 if !w.wroteHeader { 1426 panic("") 1427 } 1428 return bodyAllowedForStatus(w.status) 1429 } 1430 1431 // The Life Of A Write is like this: 1432 // 1433 // Handler starts. No header has been sent. The handler can either 1434 // write a header, or just start writing. Writing before sending a header 1435 // sends an implicitly empty 200 OK header. 1436 // 1437 // If the handler didn't declare a Content-Length up front, we either 1438 // go into chunking mode or, if the handler finishes running before 1439 // the chunking buffer size, we compute a Content-Length and send that 1440 // in the header instead. 1441 // 1442 // Likewise, if the handler didn't set a Content-Type, we sniff that 1443 // from the initial chunk of output. 1444 // 1445 // The Writers are wired together like: 1446 // 1447 // 1. *response (the ResponseWriter) -> 1448 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1449 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1450 // and which writes the chunk headers, if needed. 1451 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1452 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1453 // and populates c.werr with it if so. but otherwise writes to: 1454 // 6. the rwc, the net.Conn. 1455 // 1456 // TODO(bradfitz): short-circuit some of the buffering when the 1457 // initial header contains both a Content-Type and Content-Length. 1458 // Also short-circuit in (1) when the header's been sent and not in 1459 // chunking mode, writing directly to (4) instead, if (2) has no 1460 // buffered data. More generally, we could short-circuit from (1) to 1461 // (3) even in chunking mode if the write size from (1) is over some 1462 // threshold and nothing is in (2). The answer might be mostly making 1463 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1464 // with this instead. 1465 func (w *response) Write(data []byte) (n int, err error) { 1466 return w.write(len(data), data, "") 1467 } 1468 1469 func (w *response) WriteString(data string) (n int, err error) { 1470 return w.write(len(data), nil, data) 1471 } 1472 1473 // either dataB or dataS is non-zero. 1474 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1475 if w.conn.hijacked() { 1476 if lenData > 0 { 1477 w.conn.server.logf("http: response.Write on hijacked connection") 1478 } 1479 return 0, ErrHijacked 1480 } 1481 if !w.wroteHeader { 1482 w.WriteHeader(StatusOK) 1483 } 1484 if lenData == 0 { 1485 return 0, nil 1486 } 1487 if !w.bodyAllowed() { 1488 return 0, ErrBodyNotAllowed 1489 } 1490 1491 w.written += int64(lenData) // ignoring errors, for errorKludge 1492 if w.contentLength != -1 && w.written > w.contentLength { 1493 return 0, ErrContentLength 1494 } 1495 if dataB != nil { 1496 return w.w.Write(dataB) 1497 } else { 1498 return w.w.WriteString(dataS) 1499 } 1500 } 1501 1502 func (w *response) finishRequest() { 1503 w.handlerDone.setTrue() 1504 1505 if !w.wroteHeader { 1506 w.WriteHeader(StatusOK) 1507 } 1508 1509 w.w.Flush() 1510 putBufioWriter(w.w) 1511 w.cw.close() 1512 w.conn.bufw.Flush() 1513 1514 w.conn.r.abortPendingRead() 1515 1516 // Close the body (regardless of w.closeAfterReply) so we can 1517 // re-use its bufio.Reader later safely. 1518 w.reqBody.Close() 1519 1520 if w.req.MultipartForm != nil { 1521 w.req.MultipartForm.RemoveAll() 1522 } 1523 } 1524 1525 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1526 // It must only be called after the handler is done executing. 1527 func (w *response) shouldReuseConnection() bool { 1528 if w.closeAfterReply { 1529 // The request or something set while executing the 1530 // handler indicated we shouldn't reuse this 1531 // connection. 1532 return false 1533 } 1534 1535 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1536 // Did not write enough. Avoid getting out of sync. 1537 return false 1538 } 1539 1540 // There was some error writing to the underlying connection 1541 // during the request, so don't re-use this conn. 1542 if w.conn.werr != nil { 1543 return false 1544 } 1545 1546 if w.closedRequestBodyEarly() { 1547 return false 1548 } 1549 1550 return true 1551 } 1552 1553 func (w *response) closedRequestBodyEarly() bool { 1554 body, ok := w.req.Body.(*body) 1555 return ok && body.didEarlyClose() 1556 } 1557 1558 func (w *response) Flush() { 1559 if !w.wroteHeader { 1560 w.WriteHeader(StatusOK) 1561 } 1562 w.w.Flush() 1563 w.cw.flush() 1564 } 1565 1566 func (c *conn) finalFlush() { 1567 if c.bufr != nil { 1568 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1569 // reader for a future connection. 1570 putBufioReader(c.bufr) 1571 c.bufr = nil 1572 } 1573 1574 if c.bufw != nil { 1575 c.bufw.Flush() 1576 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1577 // writer for a future connection. 1578 putBufioWriter(c.bufw) 1579 c.bufw = nil 1580 } 1581 } 1582 1583 // Close the connection. 1584 func (c *conn) close() { 1585 c.finalFlush() 1586 c.rwc.Close() 1587 } 1588 1589 // rstAvoidanceDelay is the amount of time we sleep after closing the 1590 // write side of a TCP connection before closing the entire socket. 1591 // By sleeping, we increase the chances that the client sees our FIN 1592 // and processes its final data before they process the subsequent RST 1593 // from closing a connection with known unread data. 1594 // This RST seems to occur mostly on BSD systems. (And Windows?) 1595 // This timeout is somewhat arbitrary (~latency around the planet). 1596 const rstAvoidanceDelay = 500 * time.Millisecond 1597 1598 type closeWriter interface { 1599 CloseWrite() error 1600 } 1601 1602 var _ closeWriter = (*net.TCPConn)(nil) 1603 1604 // closeWrite flushes any outstanding data and sends a FIN packet (if 1605 // client is connected via TCP), signalling that we're done. We then 1606 // pause for a bit, hoping the client processes it before any 1607 // subsequent RST. 1608 // 1609 // See https://golang.org/issue/3595 1610 func (c *conn) closeWriteAndWait() { 1611 c.finalFlush() 1612 if tcp, ok := c.rwc.(closeWriter); ok { 1613 tcp.CloseWrite() 1614 } 1615 time.Sleep(rstAvoidanceDelay) 1616 } 1617 1618 // validNPN reports whether the proto is not a blacklisted Next 1619 // Protocol Negotiation protocol. Empty and built-in protocol types 1620 // are blacklisted and can't be overridden with alternate 1621 // implementations. 1622 func validNPN(proto string) bool { 1623 switch proto { 1624 case "", "http/1.1", "http/1.0": 1625 return false 1626 } 1627 return true 1628 } 1629 1630 func (c *conn) setState(nc net.Conn, state ConnState) { 1631 srv := c.server 1632 switch state { 1633 case StateNew: 1634 srv.trackConn(c, true) 1635 case StateHijacked, StateClosed: 1636 srv.trackConn(c, false) 1637 } 1638 c.curState.Store(connStateInterface[state]) 1639 if hook := srv.ConnState; hook != nil { 1640 hook(nc, state) 1641 } 1642 } 1643 1644 // connStateInterface is an array of the interface{} versions of 1645 // ConnState values, so we can use them in atomic.Values later without 1646 // paying the cost of shoving their integers in an interface{}. 1647 var connStateInterface = [...]interface{}{ 1648 StateNew: StateNew, 1649 StateActive: StateActive, 1650 StateIdle: StateIdle, 1651 StateHijacked: StateHijacked, 1652 StateClosed: StateClosed, 1653 } 1654 1655 // badRequestError is a literal string (used by in the server in HTML, 1656 // unescaped) to tell the user why their request was bad. It should 1657 // be plain text without user info or other embedded errors. 1658 type badRequestError string 1659 1660 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1661 1662 // ErrAbortHandler is a sentinel panic value to abort a handler. 1663 // While any panic from ServeHTTP aborts the response to the client, 1664 // panicking with ErrAbortHandler also suppresses logging of a stack 1665 // trace to the server's error log. 1666 var ErrAbortHandler = errors.New("net/http: abort Handler") 1667 1668 // isCommonNetReadError reports whether err is a common error 1669 // encountered during reading a request off the network when the 1670 // client has gone away or had its read fail somehow. This is used to 1671 // determine which logs are interesting enough to log about. 1672 func isCommonNetReadError(err error) bool { 1673 if err == io.EOF { 1674 return true 1675 } 1676 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1677 return true 1678 } 1679 if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { 1680 return true 1681 } 1682 return false 1683 } 1684 1685 // Serve a new connection. 1686 func (c *conn) serve(ctx context.Context) { 1687 c.remoteAddr = c.rwc.RemoteAddr().String() 1688 ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) 1689 defer func() { 1690 if err := recover(); err != nil && err != ErrAbortHandler { 1691 const size = 64 << 10 1692 buf := make([]byte, size) 1693 buf = buf[:runtime.Stack(buf, false)] 1694 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1695 } 1696 if !c.hijacked() { 1697 c.close() 1698 c.setState(c.rwc, StateClosed) 1699 } 1700 }() 1701 1702 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1703 if d := c.server.ReadTimeout; d != 0 { 1704 c.rwc.SetReadDeadline(time.Now().Add(d)) 1705 } 1706 if d := c.server.WriteTimeout; d != 0 { 1707 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1708 } 1709 if err := tlsConn.Handshake(); err != nil { 1710 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1711 return 1712 } 1713 c.tlsState = new(tls.ConnectionState) 1714 *c.tlsState = tlsConn.ConnectionState() 1715 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1716 if fn := c.server.TLSNextProto[proto]; fn != nil { 1717 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1718 fn(c.server, tlsConn, h) 1719 } 1720 return 1721 } 1722 } 1723 1724 // HTTP/1.x from here on. 1725 1726 ctx, cancelCtx := context.WithCancel(ctx) 1727 c.cancelCtx = cancelCtx 1728 defer cancelCtx() 1729 1730 c.r = &connReader{conn: c} 1731 c.bufr = newBufioReader(c.r) 1732 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1733 1734 for { 1735 w, err := c.readRequest(ctx) 1736 if c.r.remain != c.server.initialReadLimitSize() { 1737 // If we read any bytes off the wire, we're active. 1738 c.setState(c.rwc, StateActive) 1739 } 1740 if err != nil { 1741 const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" 1742 1743 if err == errTooLarge { 1744 // Their HTTP client may or may not be 1745 // able to read this if we're 1746 // responding to them and hanging up 1747 // while they're still writing their 1748 // request. Undefined behavior. 1749 const publicErr = "431 Request Header Fields Too Large" 1750 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1751 c.closeWriteAndWait() 1752 return 1753 } 1754 if isCommonNetReadError(err) { 1755 return // don't reply 1756 } 1757 1758 publicErr := "400 Bad Request" 1759 if v, ok := err.(badRequestError); ok { 1760 publicErr = publicErr + ": " + string(v) 1761 } 1762 1763 fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) 1764 return 1765 } 1766 1767 // Expect 100 Continue support 1768 req := w.req 1769 if req.expectsContinue() { 1770 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1771 // Wrap the Body reader with one that replies on the connection 1772 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1773 } 1774 } else if req.Header.get("Expect") != "" { 1775 w.sendExpectationFailed() 1776 return 1777 } 1778 1779 c.curReq.Store(w) 1780 1781 if requestBodyRemains(req.Body) { 1782 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1783 } else { 1784 if w.conn.bufr.Buffered() > 0 { 1785 w.conn.r.closeNotifyFromPipelinedRequest() 1786 } 1787 w.conn.r.startBackgroundRead() 1788 } 1789 1790 // HTTP cannot have multiple simultaneous active requests.[*] 1791 // Until the server replies to this request, it can't read another, 1792 // so we might as well run the handler in this goroutine. 1793 // [*] Not strictly true: HTTP pipelining. We could let them all process 1794 // in parallel even if their responses need to be serialized. 1795 // But we're not going to implement HTTP pipelining because it 1796 // was never deployed in the wild and the answer is HTTP/2. 1797 serverHandler{c.server}.ServeHTTP(w, w.req) 1798 w.cancelCtx() 1799 if c.hijacked() { 1800 return 1801 } 1802 w.finishRequest() 1803 if !w.shouldReuseConnection() { 1804 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1805 c.closeWriteAndWait() 1806 } 1807 return 1808 } 1809 c.setState(c.rwc, StateIdle) 1810 c.curReq.Store((*response)(nil)) 1811 1812 if !w.conn.server.doKeepAlives() { 1813 // We're in shutdown mode. We might've replied 1814 // to the user without "Connection: close" and 1815 // they might think they can send another 1816 // request, but such is life with HTTP/1.1. 1817 return 1818 } 1819 1820 if d := c.server.idleTimeout(); d != 0 { 1821 c.rwc.SetReadDeadline(time.Now().Add(d)) 1822 if _, err := c.bufr.Peek(4); err != nil { 1823 return 1824 } 1825 } 1826 c.rwc.SetReadDeadline(time.Time{}) 1827 } 1828 } 1829 1830 func (w *response) sendExpectationFailed() { 1831 // TODO(bradfitz): let ServeHTTP handlers handle 1832 // requests with non-standard expectation[s]? Seems 1833 // theoretical at best, and doesn't fit into the 1834 // current ServeHTTP model anyway. We'd need to 1835 // make the ResponseWriter an optional 1836 // "ExpectReplier" interface or something. 1837 // 1838 // For now we'll just obey RFC 2616 14.20 which says 1839 // "If a server receives a request containing an 1840 // Expect field that includes an expectation- 1841 // extension that it does not support, it MUST 1842 // respond with a 417 (Expectation Failed) status." 1843 w.Header().Set("Connection", "close") 1844 w.WriteHeader(StatusExpectationFailed) 1845 w.finishRequest() 1846 } 1847 1848 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1849 // and a Hijacker. 1850 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1851 if w.handlerDone.isSet() { 1852 panic("net/http: Hijack called after ServeHTTP finished") 1853 } 1854 if w.wroteHeader { 1855 w.cw.flush() 1856 } 1857 1858 c := w.conn 1859 c.mu.Lock() 1860 defer c.mu.Unlock() 1861 1862 // Release the bufioWriter that writes to the chunk writer, it is not 1863 // used after a connection has been hijacked. 1864 rwc, buf, err = c.hijackLocked() 1865 if err == nil { 1866 putBufioWriter(w.w) 1867 w.w = nil 1868 } 1869 return rwc, buf, err 1870 } 1871 1872 func (w *response) CloseNotify() <-chan bool { 1873 if w.handlerDone.isSet() { 1874 panic("net/http: CloseNotify called after ServeHTTP finished") 1875 } 1876 return w.closeNotifyCh 1877 } 1878 1879 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1880 switch v := rc.(type) { 1881 case *expectContinueReader: 1882 registerOnHitEOF(v.readCloser, fn) 1883 case *body: 1884 v.registerOnHitEOF(fn) 1885 default: 1886 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1887 } 1888 } 1889 1890 // requestBodyRemains reports whether future calls to Read 1891 // on rc might yield more data. 1892 func requestBodyRemains(rc io.ReadCloser) bool { 1893 if rc == NoBody { 1894 return false 1895 } 1896 switch v := rc.(type) { 1897 case *expectContinueReader: 1898 return requestBodyRemains(v.readCloser) 1899 case *body: 1900 return v.bodyRemains() 1901 default: 1902 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1903 } 1904 } 1905 1906 // The HandlerFunc type is an adapter to allow the use of 1907 // ordinary functions as HTTP handlers. If f is a function 1908 // with the appropriate signature, HandlerFunc(f) is a 1909 // Handler that calls f. 1910 type HandlerFunc func(ResponseWriter, *Request) 1911 1912 // ServeHTTP calls f(w, r). 1913 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1914 f(w, r) 1915 } 1916 1917 // Helper handlers 1918 1919 // Error replies to the request with the specified error message and HTTP code. 1920 // It does not otherwise end the request; the caller should ensure no further 1921 // writes are done to w. 1922 // The error message should be plain text. 1923 func Error(w ResponseWriter, error string, code int) { 1924 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1925 w.Header().Set("X-Content-Type-Options", "nosniff") 1926 w.WriteHeader(code) 1927 fmt.Fprintln(w, error) 1928 } 1929 1930 // NotFound replies to the request with an HTTP 404 not found error. 1931 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1932 1933 // NotFoundHandler returns a simple request handler 1934 // that replies to each request with a ``404 page not found'' reply. 1935 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1936 1937 // StripPrefix returns a handler that serves HTTP requests 1938 // by removing the given prefix from the request URL's Path 1939 // and invoking the handler h. StripPrefix handles a 1940 // request for a path that doesn't begin with prefix by 1941 // replying with an HTTP 404 not found error. 1942 func StripPrefix(prefix string, h Handler) Handler { 1943 if prefix == "" { 1944 return h 1945 } 1946 return HandlerFunc(func(w ResponseWriter, r *Request) { 1947 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1948 r2 := new(Request) 1949 *r2 = *r 1950 r2.URL = new(url.URL) 1951 *r2.URL = *r.URL 1952 r2.URL.Path = p 1953 h.ServeHTTP(w, r2) 1954 } else { 1955 NotFound(w, r) 1956 } 1957 }) 1958 } 1959 1960 // Redirect replies to the request with a redirect to url, 1961 // which may be a path relative to the request path. 1962 // 1963 // The provided code should be in the 3xx range and is usually 1964 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1965 func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { 1966 if u, err := url.Parse(urlStr); err == nil { 1967 // If url was relative, make absolute by 1968 // combining with request path. 1969 // The browser would probably do this for us, 1970 // but doing it ourselves is more reliable. 1971 1972 // NOTE(rsc): RFC 2616 says that the Location 1973 // line must be an absolute URI, like 1974 // "http://www.google.com/redirect/", 1975 // not a path like "/redirect/". 1976 // Unfortunately, we don't know what to 1977 // put in the host name section to get the 1978 // client to connect to us again, so we can't 1979 // know the right absolute URI to send back. 1980 // Because of this problem, no one pays attention 1981 // to the RFC; they all send back just a new path. 1982 // So do we. 1983 if u.Scheme == "" && u.Host == "" { 1984 oldpath := r.URL.Path 1985 if oldpath == "" { // should not happen, but avoid a crash if it does 1986 oldpath = "/" 1987 } 1988 1989 // no leading http://server 1990 if urlStr == "" || urlStr[0] != '/' { 1991 // make relative path absolute 1992 olddir, _ := path.Split(oldpath) 1993 urlStr = olddir + urlStr 1994 } 1995 1996 var query string 1997 if i := strings.Index(urlStr, "?"); i != -1 { 1998 urlStr, query = urlStr[:i], urlStr[i:] 1999 } 2000 2001 // clean up but preserve trailing slash 2002 trailing := strings.HasSuffix(urlStr, "/") 2003 urlStr = path.Clean(urlStr) 2004 if trailing && !strings.HasSuffix(urlStr, "/") { 2005 urlStr += "/" 2006 } 2007 urlStr += query 2008 } 2009 } 2010 2011 w.Header().Set("Location", hexEscapeNonASCII(urlStr)) 2012 w.WriteHeader(code) 2013 2014 // RFC 2616 recommends that a short note "SHOULD" be included in the 2015 // response because older user agents may not understand 301/307. 2016 // Shouldn't send the response for POST or HEAD; that leaves GET. 2017 if r.Method == "GET" { 2018 note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" 2019 fmt.Fprintln(w, note) 2020 } 2021 } 2022 2023 var htmlReplacer = strings.NewReplacer( 2024 "&", "&", 2025 "<", "<", 2026 ">", ">", 2027 // """ is shorter than """. 2028 `"`, """, 2029 // "'" is shorter than "'" and apos was not in HTML until HTML5. 2030 "'", "'", 2031 ) 2032 2033 func htmlEscape(s string) string { 2034 return htmlReplacer.Replace(s) 2035 } 2036 2037 // Redirect to a fixed URL 2038 type redirectHandler struct { 2039 url string 2040 code int 2041 } 2042 2043 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 2044 Redirect(w, r, rh.url, rh.code) 2045 } 2046 2047 // RedirectHandler returns a request handler that redirects 2048 // each request it receives to the given url using the given 2049 // status code. 2050 // 2051 // The provided code should be in the 3xx range and is usually 2052 // StatusMovedPermanently, StatusFound or StatusSeeOther. 2053 func RedirectHandler(url string, code int) Handler { 2054 return &redirectHandler{url, code} 2055 } 2056 2057 // ServeMux is an HTTP request multiplexer. 2058 // It matches the URL of each incoming request against a list of registered 2059 // patterns and calls the handler for the pattern that 2060 // most closely matches the URL. 2061 // 2062 // Patterns name fixed, rooted paths, like "/favicon.ico", 2063 // or rooted subtrees, like "/images/" (note the trailing slash). 2064 // Longer patterns take precedence over shorter ones, so that 2065 // if there are handlers registered for both "/images/" 2066 // and "/images/thumbnails/", the latter handler will be 2067 // called for paths beginning "/images/thumbnails/" and the 2068 // former will receive requests for any other paths in the 2069 // "/images/" subtree. 2070 // 2071 // Note that since a pattern ending in a slash names a rooted subtree, 2072 // the pattern "/" matches all paths not matched by other registered 2073 // patterns, not just the URL with Path == "/". 2074 // 2075 // If a subtree has been registered and a request is received naming the 2076 // subtree root without its trailing slash, ServeMux redirects that 2077 // request to the subtree root (adding the trailing slash). This behavior can 2078 // be overridden with a separate registration for the path without 2079 // the trailing slash. For example, registering "/images/" causes ServeMux 2080 // to redirect a request for "/images" to "/images/", unless "/images" has 2081 // been registered separately. 2082 // 2083 // Patterns may optionally begin with a host name, restricting matches to 2084 // URLs on that host only. Host-specific patterns take precedence over 2085 // general patterns, so that a handler might register for the two patterns 2086 // "/codesearch" and "codesearch.google.com/" without also taking over 2087 // requests for "http://www.google.com/". 2088 // 2089 // ServeMux also takes care of sanitizing the URL request path, 2090 // redirecting any request containing . or .. elements or repeated slashes 2091 // to an equivalent, cleaner URL. 2092 type ServeMux struct { 2093 mu sync.RWMutex 2094 m map[string]muxEntry 2095 hosts bool // whether any patterns contain hostnames 2096 } 2097 2098 type muxEntry struct { 2099 explicit bool 2100 h Handler 2101 pattern string 2102 } 2103 2104 // NewServeMux allocates and returns a new ServeMux. 2105 func NewServeMux() *ServeMux { return new(ServeMux) } 2106 2107 // DefaultServeMux is the default ServeMux used by Serve. 2108 var DefaultServeMux = &defaultServeMux 2109 2110 var defaultServeMux ServeMux 2111 2112 // Does path match pattern? 2113 func pathMatch(pattern, path string) bool { 2114 if len(pattern) == 0 { 2115 // should not happen 2116 return false 2117 } 2118 n := len(pattern) 2119 if pattern[n-1] != '/' { 2120 return pattern == path 2121 } 2122 return len(path) >= n && path[0:n] == pattern 2123 } 2124 2125 // Return the canonical path for p, eliminating . and .. elements. 2126 func cleanPath(p string) string { 2127 if p == "" { 2128 return "/" 2129 } 2130 if p[0] != '/' { 2131 p = "/" + p 2132 } 2133 np := path.Clean(p) 2134 // path.Clean removes trailing slash except for root; 2135 // put the trailing slash back if necessary. 2136 if p[len(p)-1] == '/' && np != "/" { 2137 np += "/" 2138 } 2139 return np 2140 } 2141 2142 // stripHostPort returns h without any trailing ":<port>". 2143 func stripHostPort(h string) string { 2144 // If no port on host, return unchanged 2145 if strings.IndexByte(h, ':') == -1 { 2146 return h 2147 } 2148 host, _, err := net.SplitHostPort(h) 2149 if err != nil { 2150 return h // on error, return unchanged 2151 } 2152 return host 2153 } 2154 2155 // Find a handler on a handler map given a path string. 2156 // Most-specific (longest) pattern wins. 2157 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2158 // Check for exact match first. 2159 v, ok := mux.m[path] 2160 if ok { 2161 return v.h, v.pattern 2162 } 2163 2164 // Check for longest valid match. 2165 var n = 0 2166 for k, v := range mux.m { 2167 if !pathMatch(k, path) { 2168 continue 2169 } 2170 if h == nil || len(k) > n { 2171 n = len(k) 2172 h = v.h 2173 pattern = v.pattern 2174 } 2175 } 2176 return 2177 } 2178 2179 // Handler returns the handler to use for the given request, 2180 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2181 // a non-nil handler. If the path is not in its canonical form, the 2182 // handler will be an internally-generated handler that redirects 2183 // to the canonical path. If the host contains a port, it is ignored 2184 // when matching handlers. 2185 // 2186 // The path and host are used unchanged for CONNECT requests. 2187 // 2188 // Handler also returns the registered pattern that matches the 2189 // request or, in the case of internally-generated redirects, 2190 // the pattern that will match after following the redirect. 2191 // 2192 // If there is no registered handler that applies to the request, 2193 // Handler returns a ``page not found'' handler and an empty pattern. 2194 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2195 2196 // CONNECT requests are not canonicalized. 2197 if r.Method == "CONNECT" { 2198 return mux.handler(r.Host, r.URL.Path) 2199 } 2200 2201 // All other requests have any port stripped and path cleaned 2202 // before passing to mux.handler. 2203 host := stripHostPort(r.Host) 2204 path := cleanPath(r.URL.Path) 2205 if path != r.URL.Path { 2206 _, pattern = mux.handler(host, path) 2207 url := *r.URL 2208 url.Path = path 2209 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2210 } 2211 2212 return mux.handler(host, r.URL.Path) 2213 } 2214 2215 // handler is the main implementation of Handler. 2216 // The path is known to be in canonical form, except for CONNECT methods. 2217 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2218 mux.mu.RLock() 2219 defer mux.mu.RUnlock() 2220 2221 // Host-specific pattern takes precedence over generic ones 2222 if mux.hosts { 2223 h, pattern = mux.match(host + path) 2224 } 2225 if h == nil { 2226 h, pattern = mux.match(path) 2227 } 2228 if h == nil { 2229 h, pattern = NotFoundHandler(), "" 2230 } 2231 return 2232 } 2233 2234 // ServeHTTP dispatches the request to the handler whose 2235 // pattern most closely matches the request URL. 2236 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2237 if r.RequestURI == "*" { 2238 if r.ProtoAtLeast(1, 1) { 2239 w.Header().Set("Connection", "close") 2240 } 2241 w.WriteHeader(StatusBadRequest) 2242 return 2243 } 2244 h, _ := mux.Handler(r) 2245 h.ServeHTTP(w, r) 2246 } 2247 2248 // Handle registers the handler for the given pattern. 2249 // If a handler already exists for pattern, Handle panics. 2250 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2251 mux.mu.Lock() 2252 defer mux.mu.Unlock() 2253 2254 if pattern == "" { 2255 panic("http: invalid pattern " + pattern) 2256 } 2257 if handler == nil { 2258 panic("http: nil handler") 2259 } 2260 if mux.m[pattern].explicit { 2261 panic("http: multiple registrations for " + pattern) 2262 } 2263 2264 if mux.m == nil { 2265 mux.m = make(map[string]muxEntry) 2266 } 2267 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 2268 2269 if pattern[0] != '/' { 2270 mux.hosts = true 2271 } 2272 2273 // Helpful behavior: 2274 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 2275 // It can be overridden by an explicit registration. 2276 n := len(pattern) 2277 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 2278 // If pattern contains a host name, strip it and use remaining 2279 // path for redirect. 2280 path := pattern 2281 if pattern[0] != '/' { 2282 // In pattern, at least the last character is a '/', so 2283 // strings.Index can't be -1. 2284 path = pattern[strings.Index(pattern, "/"):] 2285 } 2286 url := &url.URL{Path: path} 2287 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 2288 } 2289 } 2290 2291 // HandleFunc registers the handler function for the given pattern. 2292 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2293 mux.Handle(pattern, HandlerFunc(handler)) 2294 } 2295 2296 // Handle registers the handler for the given pattern 2297 // in the DefaultServeMux. 2298 // The documentation for ServeMux explains how patterns are matched. 2299 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2300 2301 // HandleFunc registers the handler function for the given pattern 2302 // in the DefaultServeMux. 2303 // The documentation for ServeMux explains how patterns are matched. 2304 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2305 DefaultServeMux.HandleFunc(pattern, handler) 2306 } 2307 2308 // Serve accepts incoming HTTP connections on the listener l, 2309 // creating a new service goroutine for each. The service goroutines 2310 // read requests and then call handler to reply to them. 2311 // Handler is typically nil, in which case the DefaultServeMux is used. 2312 func Serve(l net.Listener, handler Handler) error { 2313 srv := &Server{Handler: handler} 2314 return srv.Serve(l) 2315 } 2316 2317 // Serve accepts incoming HTTPS connections on the listener l, 2318 // creating a new service goroutine for each. The service goroutines 2319 // read requests and then call handler to reply to them. 2320 // 2321 // Handler is typically nil, in which case the DefaultServeMux is used. 2322 // 2323 // Additionally, files containing a certificate and matching private key 2324 // for the server must be provided. If the certificate is signed by a 2325 // certificate authority, the certFile should be the concatenation 2326 // of the server's certificate, any intermediates, and the CA's certificate. 2327 func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { 2328 srv := &Server{Handler: handler} 2329 return srv.ServeTLS(l, certFile, keyFile) 2330 } 2331 2332 // A Server defines parameters for running an HTTP server. 2333 // The zero value for Server is a valid configuration. 2334 type Server struct { 2335 Addr string // TCP address to listen on, ":http" if empty 2336 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2337 TLSConfig *tls.Config // optional TLS config, used by ServeTLS and ListenAndServeTLS 2338 2339 // ReadTimeout is the maximum duration for reading the entire 2340 // request, including the body. 2341 // 2342 // Because ReadTimeout does not let Handlers make per-request 2343 // decisions on each request body's acceptable deadline or 2344 // upload rate, most users will prefer to use 2345 // ReadHeaderTimeout. It is valid to use them both. 2346 ReadTimeout time.Duration 2347 2348 // ReadHeaderTimeout is the amount of time allowed to read 2349 // request headers. The connection's read deadline is reset 2350 // after reading the headers and the Handler can decide what 2351 // is considered too slow for the body. 2352 ReadHeaderTimeout time.Duration 2353 2354 // WriteTimeout is the maximum duration before timing out 2355 // writes of the response. It is reset whenever a new 2356 // request's header is read. Like ReadTimeout, it does not 2357 // let Handlers make decisions on a per-request basis. 2358 WriteTimeout time.Duration 2359 2360 // IdleTimeout is the maximum amount of time to wait for the 2361 // next request when keep-alives are enabled. If IdleTimeout 2362 // is zero, the value of ReadTimeout is used. If both are 2363 // zero, there is no timeout. 2364 IdleTimeout time.Duration 2365 2366 // MaxHeaderBytes controls the maximum number of bytes the 2367 // server will read parsing the request header's keys and 2368 // values, including the request line. It does not limit the 2369 // size of the request body. 2370 // If zero, DefaultMaxHeaderBytes is used. 2371 MaxHeaderBytes int 2372 2373 // TLSNextProto optionally specifies a function to take over 2374 // ownership of the provided TLS connection when an NPN/ALPN 2375 // protocol upgrade has occurred. The map key is the protocol 2376 // name negotiated. The Handler argument should be used to 2377 // handle HTTP requests and will initialize the Request's TLS 2378 // and RemoteAddr if not already set. The connection is 2379 // automatically closed when the function returns. 2380 // If TLSNextProto is not nil, HTTP/2 support is not enabled 2381 // automatically. 2382 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2383 2384 // ConnState specifies an optional callback function that is 2385 // called when a client connection changes state. See the 2386 // ConnState type and associated constants for details. 2387 ConnState func(net.Conn, ConnState) 2388 2389 // ErrorLog specifies an optional logger for errors accepting 2390 // connections and unexpected behavior from handlers. 2391 // If nil, logging goes to os.Stderr via the log package's 2392 // standard logger. 2393 ErrorLog *log.Logger 2394 2395 disableKeepAlives int32 // accessed atomically. 2396 inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) 2397 nextProtoOnce sync.Once // guards setupHTTP2_* init 2398 nextProtoErr error // result of http2.ConfigureServer if used 2399 2400 mu sync.Mutex 2401 listeners map[net.Listener]struct{} 2402 activeConn map[*conn]struct{} 2403 doneChan chan struct{} 2404 onShutdown []func() 2405 } 2406 2407 func (s *Server) getDoneChan() <-chan struct{} { 2408 s.mu.Lock() 2409 defer s.mu.Unlock() 2410 return s.getDoneChanLocked() 2411 } 2412 2413 func (s *Server) getDoneChanLocked() chan struct{} { 2414 if s.doneChan == nil { 2415 s.doneChan = make(chan struct{}) 2416 } 2417 return s.doneChan 2418 } 2419 2420 func (s *Server) closeDoneChanLocked() { 2421 ch := s.getDoneChanLocked() 2422 select { 2423 case <-ch: 2424 // Already closed. Don't close again. 2425 default: 2426 // Safe to close here. We're the only closer, guarded 2427 // by s.mu. 2428 close(ch) 2429 } 2430 } 2431 2432 // Close immediately closes all active net.Listeners and any 2433 // connections in state StateNew, StateActive, or StateIdle. For a 2434 // graceful shutdown, use Shutdown. 2435 // 2436 // Close does not attempt to close (and does not even know about) 2437 // any hijacked connections, such as WebSockets. 2438 // 2439 // Close returns any error returned from closing the Server's 2440 // underlying Listener(s). 2441 func (srv *Server) Close() error { 2442 srv.mu.Lock() 2443 defer srv.mu.Unlock() 2444 srv.closeDoneChanLocked() 2445 err := srv.closeListenersLocked() 2446 for c := range srv.activeConn { 2447 c.rwc.Close() 2448 delete(srv.activeConn, c) 2449 } 2450 return err 2451 } 2452 2453 // shutdownPollInterval is how often we poll for quiescence 2454 // during Server.Shutdown. This is lower during tests, to 2455 // speed up tests. 2456 // Ideally we could find a solution that doesn't involve polling, 2457 // but which also doesn't have a high runtime cost (and doesn't 2458 // involve any contentious mutexes), but that is left as an 2459 // exercise for the reader. 2460 var shutdownPollInterval = 500 * time.Millisecond 2461 2462 // Shutdown gracefully shuts down the server without interrupting any 2463 // active connections. Shutdown works by first closing all open 2464 // listeners, then closing all idle connections, and then waiting 2465 // indefinitely for connections to return to idle and then shut down. 2466 // If the provided context expires before the shutdown is complete, 2467 // Shutdown returns the context's error, otherwise it returns any 2468 // error returned from closing the Server's underlying Listener(s). 2469 // 2470 // When Shutdown is called, Serve, ListenAndServe, and 2471 // ListenAndServeTLS immediately return ErrServerClosed. Make sure the 2472 // program doesn't exit and waits instead for Shutdown to return. 2473 // 2474 // Shutdown does not attempt to close nor wait for hijacked 2475 // connections such as WebSockets. The caller of Shutdown should 2476 // separately notify such long-lived connections of shutdown and wait 2477 // for them to close, if desired. 2478 func (srv *Server) Shutdown(ctx context.Context) error { 2479 atomic.AddInt32(&srv.inShutdown, 1) 2480 defer atomic.AddInt32(&srv.inShutdown, -1) 2481 2482 srv.mu.Lock() 2483 lnerr := srv.closeListenersLocked() 2484 srv.closeDoneChanLocked() 2485 for _, f := range srv.onShutdown { 2486 go f() 2487 } 2488 srv.mu.Unlock() 2489 2490 ticker := time.NewTicker(shutdownPollInterval) 2491 defer ticker.Stop() 2492 for { 2493 if srv.closeIdleConns() { 2494 return lnerr 2495 } 2496 select { 2497 case <-ctx.Done(): 2498 return ctx.Err() 2499 case <-ticker.C: 2500 } 2501 } 2502 } 2503 2504 // RegisterOnShutdown registers a function to call on Shutdown. 2505 // This can be used to gracefully shutdown connections that have 2506 // undergone NPN/ALPN protocol upgrade or that have been hijacked. 2507 // This function should start protocol-specific graceful shutdown, 2508 // but should not wait for shutdown to complete. 2509 func (srv *Server) RegisterOnShutdown(f func()) { 2510 srv.mu.Lock() 2511 srv.onShutdown = append(srv.onShutdown, f) 2512 srv.mu.Unlock() 2513 } 2514 2515 // closeIdleConns closes all idle connections and reports whether the 2516 // server is quiescent. 2517 func (s *Server) closeIdleConns() bool { 2518 s.mu.Lock() 2519 defer s.mu.Unlock() 2520 quiescent := true 2521 for c := range s.activeConn { 2522 st, ok := c.curState.Load().(ConnState) 2523 if !ok || st != StateIdle { 2524 quiescent = false 2525 continue 2526 } 2527 c.rwc.Close() 2528 delete(s.activeConn, c) 2529 } 2530 return quiescent 2531 } 2532 2533 func (s *Server) closeListenersLocked() error { 2534 var err error 2535 for ln := range s.listeners { 2536 if cerr := ln.Close(); cerr != nil && err == nil { 2537 err = cerr 2538 } 2539 delete(s.listeners, ln) 2540 } 2541 return err 2542 } 2543 2544 // A ConnState represents the state of a client connection to a server. 2545 // It's used by the optional Server.ConnState hook. 2546 type ConnState int 2547 2548 const ( 2549 // StateNew represents a new connection that is expected to 2550 // send a request immediately. Connections begin at this 2551 // state and then transition to either StateActive or 2552 // StateClosed. 2553 StateNew ConnState = iota 2554 2555 // StateActive represents a connection that has read 1 or more 2556 // bytes of a request. The Server.ConnState hook for 2557 // StateActive fires before the request has entered a handler 2558 // and doesn't fire again until the request has been 2559 // handled. After the request is handled, the state 2560 // transitions to StateClosed, StateHijacked, or StateIdle. 2561 // For HTTP/2, StateActive fires on the transition from zero 2562 // to one active request, and only transitions away once all 2563 // active requests are complete. That means that ConnState 2564 // cannot be used to do per-request work; ConnState only notes 2565 // the overall state of the connection. 2566 StateActive 2567 2568 // StateIdle represents a connection that has finished 2569 // handling a request and is in the keep-alive state, waiting 2570 // for a new request. Connections transition from StateIdle 2571 // to either StateActive or StateClosed. 2572 StateIdle 2573 2574 // StateHijacked represents a hijacked connection. 2575 // This is a terminal state. It does not transition to StateClosed. 2576 StateHijacked 2577 2578 // StateClosed represents a closed connection. 2579 // This is a terminal state. Hijacked connections do not 2580 // transition to StateClosed. 2581 StateClosed 2582 ) 2583 2584 var stateName = map[ConnState]string{ 2585 StateNew: "new", 2586 StateActive: "active", 2587 StateIdle: "idle", 2588 StateHijacked: "hijacked", 2589 StateClosed: "closed", 2590 } 2591 2592 func (c ConnState) String() string { 2593 return stateName[c] 2594 } 2595 2596 // serverHandler delegates to either the server's Handler or 2597 // DefaultServeMux and also handles "OPTIONS *" requests. 2598 type serverHandler struct { 2599 srv *Server 2600 } 2601 2602 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2603 handler := sh.srv.Handler 2604 if handler == nil { 2605 handler = DefaultServeMux 2606 } 2607 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2608 handler = globalOptionsHandler{} 2609 } 2610 handler.ServeHTTP(rw, req) 2611 } 2612 2613 // ListenAndServe listens on the TCP network address srv.Addr and then 2614 // calls Serve to handle requests on incoming connections. 2615 // Accepted connections are configured to enable TCP keep-alives. 2616 // If srv.Addr is blank, ":http" is used. 2617 // ListenAndServe always returns a non-nil error. 2618 func (srv *Server) ListenAndServe() error { 2619 addr := srv.Addr 2620 if addr == "" { 2621 addr = ":http" 2622 } 2623 ln, err := net.Listen("tcp", addr) 2624 if err != nil { 2625 return err 2626 } 2627 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2628 } 2629 2630 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2631 2632 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2633 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2634 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2635 if srv.TLSConfig == nil { 2636 // Compatibility with Go 1.6: 2637 // If there's no TLSConfig, it's possible that the user just 2638 // didn't set it on the http.Server, but did pass it to 2639 // tls.NewListener and passed that listener to Serve. 2640 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2641 // in case the listener returns an "h2" *tls.Conn. 2642 return true 2643 } 2644 // The user specified a TLSConfig on their http.Server. 2645 // In this, case, only configure HTTP/2 if their tls.Config 2646 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2647 // would modify the tls.Config to add it, but they probably already 2648 // passed this tls.Config to tls.NewListener. And if they did, 2649 // it's too late anyway to fix it. It would only be potentially racy. 2650 // See Issue 15908. 2651 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2652 } 2653 2654 // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, 2655 // and ListenAndServeTLS methods after a call to Shutdown or Close. 2656 var ErrServerClosed = errors.New("http: Server closed") 2657 2658 // Serve accepts incoming connections on the Listener l, creating a 2659 // new service goroutine for each. The service goroutines read requests and 2660 // then call srv.Handler to reply to them. 2661 // 2662 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2663 // provided listener's TLS Config before calling Serve. If 2664 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2665 // Config.NextProtos, HTTP/2 support is not enabled. 2666 // 2667 // Serve always returns a non-nil error. After Shutdown or Close, the 2668 // returned error is ErrServerClosed. 2669 func (srv *Server) Serve(l net.Listener) error { 2670 defer l.Close() 2671 if fn := testHookServerServe; fn != nil { 2672 fn(srv, l) 2673 } 2674 var tempDelay time.Duration // how long to sleep on accept failure 2675 2676 if err := srv.setupHTTP2_Serve(); err != nil { 2677 return err 2678 } 2679 2680 srv.trackListener(l, true) 2681 defer srv.trackListener(l, false) 2682 2683 baseCtx := context.Background() // base is always background, per Issue 16220 2684 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2685 for { 2686 rw, e := l.Accept() 2687 if e != nil { 2688 select { 2689 case <-srv.getDoneChan(): 2690 return ErrServerClosed 2691 default: 2692 } 2693 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2694 if tempDelay == 0 { 2695 tempDelay = 5 * time.Millisecond 2696 } else { 2697 tempDelay *= 2 2698 } 2699 if max := 1 * time.Second; tempDelay > max { 2700 tempDelay = max 2701 } 2702 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2703 time.Sleep(tempDelay) 2704 continue 2705 } 2706 return e 2707 } 2708 tempDelay = 0 2709 c := srv.newConn(rw) 2710 c.setState(c.rwc, StateNew) // before Serve can return 2711 go c.serve(ctx) 2712 } 2713 } 2714 2715 // ServeTLS accepts incoming connections on the Listener l, creating a 2716 // new service goroutine for each. The service goroutines read requests and 2717 // then call srv.Handler to reply to them. 2718 // 2719 // Additionally, files containing a certificate and matching private key for 2720 // the server must be provided if neither the Server's TLSConfig.Certificates 2721 // nor TLSConfig.GetCertificate are populated.. If the certificate is signed by 2722 // a certificate authority, the certFile should be the concatenation of the 2723 // server's certificate, any intermediates, and the CA's certificate. 2724 // 2725 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2726 // provided listener's TLS Config before calling Serve. If 2727 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2728 // Config.NextProtos, HTTP/2 support is not enabled. 2729 // 2730 // ServeTLS always returns a non-nil error. After Shutdown or Close, the 2731 // returned error is ErrServerClosed. 2732 func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { 2733 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2734 // before we clone it and create the TLS Listener. 2735 if err := srv.setupHTTP2_ServeTLS(); err != nil { 2736 return err 2737 } 2738 2739 config := cloneTLSConfig(srv.TLSConfig) 2740 if !strSliceContains(config.NextProtos, "http/1.1") { 2741 config.NextProtos = append(config.NextProtos, "http/1.1") 2742 } 2743 2744 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2745 if !configHasCert || certFile != "" || keyFile != "" { 2746 var err error 2747 config.Certificates = make([]tls.Certificate, 1) 2748 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2749 if err != nil { 2750 return err 2751 } 2752 } 2753 2754 tlsListener := tls.NewListener(l, config) 2755 return srv.Serve(tlsListener) 2756 } 2757 2758 func (s *Server) trackListener(ln net.Listener, add bool) { 2759 s.mu.Lock() 2760 defer s.mu.Unlock() 2761 if s.listeners == nil { 2762 s.listeners = make(map[net.Listener]struct{}) 2763 } 2764 if add { 2765 // If the *Server is being reused after a previous 2766 // Close or Shutdown, reset its doneChan: 2767 if len(s.listeners) == 0 && len(s.activeConn) == 0 { 2768 s.doneChan = nil 2769 } 2770 s.listeners[ln] = struct{}{} 2771 } else { 2772 delete(s.listeners, ln) 2773 } 2774 } 2775 2776 func (s *Server) trackConn(c *conn, add bool) { 2777 s.mu.Lock() 2778 defer s.mu.Unlock() 2779 if s.activeConn == nil { 2780 s.activeConn = make(map[*conn]struct{}) 2781 } 2782 if add { 2783 s.activeConn[c] = struct{}{} 2784 } else { 2785 delete(s.activeConn, c) 2786 } 2787 } 2788 2789 func (s *Server) idleTimeout() time.Duration { 2790 if s.IdleTimeout != 0 { 2791 return s.IdleTimeout 2792 } 2793 return s.ReadTimeout 2794 } 2795 2796 func (s *Server) readHeaderTimeout() time.Duration { 2797 if s.ReadHeaderTimeout != 0 { 2798 return s.ReadHeaderTimeout 2799 } 2800 return s.ReadTimeout 2801 } 2802 2803 func (s *Server) doKeepAlives() bool { 2804 return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() 2805 } 2806 2807 func (s *Server) shuttingDown() bool { 2808 return atomic.LoadInt32(&s.inShutdown) != 0 2809 } 2810 2811 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2812 // By default, keep-alives are always enabled. Only very 2813 // resource-constrained environments or servers in the process of 2814 // shutting down should disable them. 2815 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2816 if v { 2817 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2818 return 2819 } 2820 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2821 2822 // Close idle HTTP/1 conns: 2823 srv.closeIdleConns() 2824 2825 // Close HTTP/2 conns, as soon as they become idle, but reset 2826 // the chan so future conns (if the listener is still active) 2827 // still work and don't get a GOAWAY immediately, before their 2828 // first request: 2829 srv.mu.Lock() 2830 defer srv.mu.Unlock() 2831 srv.closeDoneChanLocked() // closes http2 conns 2832 srv.doneChan = nil 2833 } 2834 2835 func (s *Server) logf(format string, args ...interface{}) { 2836 if s.ErrorLog != nil { 2837 s.ErrorLog.Printf(format, args...) 2838 } else { 2839 log.Printf(format, args...) 2840 } 2841 } 2842 2843 // ListenAndServe listens on the TCP network address addr 2844 // and then calls Serve with handler to handle requests 2845 // on incoming connections. 2846 // Accepted connections are configured to enable TCP keep-alives. 2847 // Handler is typically nil, in which case the DefaultServeMux is 2848 // used. 2849 // 2850 // A trivial example server is: 2851 // 2852 // package main 2853 // 2854 // import ( 2855 // "io" 2856 // "net/http" 2857 // "log" 2858 // ) 2859 // 2860 // // hello world, the web server 2861 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2862 // io.WriteString(w, "hello, world!\n") 2863 // } 2864 // 2865 // func main() { 2866 // http.HandleFunc("/hello", HelloServer) 2867 // log.Fatal(http.ListenAndServe(":12345", nil)) 2868 // } 2869 // 2870 // ListenAndServe always returns a non-nil error. 2871 func ListenAndServe(addr string, handler Handler) error { 2872 server := &Server{Addr: addr, Handler: handler} 2873 return server.ListenAndServe() 2874 } 2875 2876 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2877 // expects HTTPS connections. Additionally, files containing a certificate and 2878 // matching private key for the server must be provided. If the certificate 2879 // is signed by a certificate authority, the certFile should be the concatenation 2880 // of the server's certificate, any intermediates, and the CA's certificate. 2881 // 2882 // A trivial example server is: 2883 // 2884 // import ( 2885 // "log" 2886 // "net/http" 2887 // ) 2888 // 2889 // func handler(w http.ResponseWriter, req *http.Request) { 2890 // w.Header().Set("Content-Type", "text/plain") 2891 // w.Write([]byte("This is an example server.\n")) 2892 // } 2893 // 2894 // func main() { 2895 // http.HandleFunc("/", handler) 2896 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2897 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2898 // log.Fatal(err) 2899 // } 2900 // 2901 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2902 // 2903 // ListenAndServeTLS always returns a non-nil error. 2904 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2905 server := &Server{Addr: addr, Handler: handler} 2906 return server.ListenAndServeTLS(certFile, keyFile) 2907 } 2908 2909 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2910 // then calls Serve to handle requests on incoming TLS connections. 2911 // Accepted connections are configured to enable TCP keep-alives. 2912 // 2913 // Filenames containing a certificate and matching private key for the 2914 // server must be provided if neither the Server's TLSConfig.Certificates 2915 // nor TLSConfig.GetCertificate are populated. If the certificate is 2916 // signed by a certificate authority, the certFile should be the 2917 // concatenation of the server's certificate, any intermediates, and 2918 // the CA's certificate. 2919 // 2920 // If srv.Addr is blank, ":https" is used. 2921 // 2922 // ListenAndServeTLS always returns a non-nil error. 2923 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2924 addr := srv.Addr 2925 if addr == "" { 2926 addr = ":https" 2927 } 2928 2929 ln, err := net.Listen("tcp", addr) 2930 if err != nil { 2931 return err 2932 } 2933 2934 return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile) 2935 } 2936 2937 // setupHTTP2_ServeTLS conditionally configures HTTP/2 on 2938 // srv and returns whether there was an error setting it up. If it is 2939 // not configured for policy reasons, nil is returned. 2940 func (srv *Server) setupHTTP2_ServeTLS() error { 2941 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2942 return srv.nextProtoErr 2943 } 2944 2945 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 2946 // configures HTTP/2 on srv using a more conservative policy than 2947 // setupHTTP2_ServeTLS because Serve may be called 2948 // concurrently. 2949 // 2950 // The tests named TestTransportAutomaticHTTP2* and 2951 // TestConcurrentServerServe in server_test.go demonstrate some 2952 // of the supported use cases and motivations. 2953 func (srv *Server) setupHTTP2_Serve() error { 2954 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 2955 return srv.nextProtoErr 2956 } 2957 2958 func (srv *Server) onceSetNextProtoDefaults_Serve() { 2959 if srv.shouldConfigureHTTP2ForServe() { 2960 srv.onceSetNextProtoDefaults() 2961 } 2962 } 2963 2964 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2965 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2966 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 2967 func (srv *Server) onceSetNextProtoDefaults() { 2968 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 2969 return 2970 } 2971 // Enable HTTP/2 by default if the user hasn't otherwise 2972 // configured their TLSNextProto map. 2973 if srv.TLSNextProto == nil { 2974 conf := &http2Server{ 2975 NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, 2976 } 2977 srv.nextProtoErr = http2ConfigureServer(srv, conf) 2978 } 2979 } 2980 2981 // TimeoutHandler returns a Handler that runs h with the given time limit. 2982 // 2983 // The new Handler calls h.ServeHTTP to handle each request, but if a 2984 // call runs for longer than its time limit, the handler responds with 2985 // a 503 Service Unavailable error and the given message in its body. 2986 // (If msg is empty, a suitable default message will be sent.) 2987 // After such a timeout, writes by h to its ResponseWriter will return 2988 // ErrHandlerTimeout. 2989 // 2990 // TimeoutHandler buffers all Handler writes to memory and does not 2991 // support the Hijacker or Flusher interfaces. 2992 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 2993 return &timeoutHandler{ 2994 handler: h, 2995 body: msg, 2996 dt: dt, 2997 } 2998 } 2999 3000 // ErrHandlerTimeout is returned on ResponseWriter Write calls 3001 // in handlers which have timed out. 3002 var ErrHandlerTimeout = errors.New("http: Handler timeout") 3003 3004 type timeoutHandler struct { 3005 handler Handler 3006 body string 3007 dt time.Duration 3008 3009 // When set, no timer will be created and this channel will 3010 // be used instead. 3011 testTimeout <-chan time.Time 3012 } 3013 3014 func (h *timeoutHandler) errorBody() string { 3015 if h.body != "" { 3016 return h.body 3017 } 3018 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 3019 } 3020 3021 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 3022 var t *time.Timer 3023 timeout := h.testTimeout 3024 if timeout == nil { 3025 t = time.NewTimer(h.dt) 3026 timeout = t.C 3027 } 3028 done := make(chan struct{}) 3029 tw := &timeoutWriter{ 3030 w: w, 3031 h: make(Header), 3032 } 3033 go func() { 3034 h.handler.ServeHTTP(tw, r) 3035 close(done) 3036 }() 3037 select { 3038 case <-done: 3039 tw.mu.Lock() 3040 defer tw.mu.Unlock() 3041 dst := w.Header() 3042 for k, vv := range tw.h { 3043 dst[k] = vv 3044 } 3045 if !tw.wroteHeader { 3046 tw.code = StatusOK 3047 } 3048 w.WriteHeader(tw.code) 3049 w.Write(tw.wbuf.Bytes()) 3050 if t != nil { 3051 t.Stop() 3052 } 3053 case <-timeout: 3054 tw.mu.Lock() 3055 defer tw.mu.Unlock() 3056 w.WriteHeader(StatusServiceUnavailable) 3057 io.WriteString(w, h.errorBody()) 3058 tw.timedOut = true 3059 return 3060 } 3061 } 3062 3063 type timeoutWriter struct { 3064 w ResponseWriter 3065 h Header 3066 wbuf bytes.Buffer 3067 3068 mu sync.Mutex 3069 timedOut bool 3070 wroteHeader bool 3071 code int 3072 } 3073 3074 func (tw *timeoutWriter) Header() Header { return tw.h } 3075 3076 func (tw *timeoutWriter) Write(p []byte) (int, error) { 3077 tw.mu.Lock() 3078 defer tw.mu.Unlock() 3079 if tw.timedOut { 3080 return 0, ErrHandlerTimeout 3081 } 3082 if !tw.wroteHeader { 3083 tw.writeHeader(StatusOK) 3084 } 3085 return tw.wbuf.Write(p) 3086 } 3087 3088 func (tw *timeoutWriter) WriteHeader(code int) { 3089 tw.mu.Lock() 3090 defer tw.mu.Unlock() 3091 if tw.timedOut || tw.wroteHeader { 3092 return 3093 } 3094 tw.writeHeader(code) 3095 } 3096 3097 func (tw *timeoutWriter) writeHeader(code int) { 3098 tw.wroteHeader = true 3099 tw.code = code 3100 } 3101 3102 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 3103 // connections. It's used by ListenAndServe and ListenAndServeTLS so 3104 // dead TCP connections (e.g. closing laptop mid-download) eventually 3105 // go away. 3106 type tcpKeepAliveListener struct { 3107 *net.TCPListener 3108 } 3109 3110 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 3111 tc, err := ln.AcceptTCP() 3112 if err != nil { 3113 return 3114 } 3115 tc.SetKeepAlive(true) 3116 tc.SetKeepAlivePeriod(3 * time.Minute) 3117 return tc, nil 3118 } 3119 3120 // globalOptionsHandler responds to "OPTIONS *" requests. 3121 type globalOptionsHandler struct{} 3122 3123 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 3124 w.Header().Set("Content-Length", "0") 3125 if r.ContentLength != 0 { 3126 // Read up to 4KB of OPTIONS body (as mentioned in the 3127 // spec as being reserved for future use), but anything 3128 // over that is considered a waste of server resources 3129 // (or an attack) and we abort and close the connection, 3130 // courtesy of MaxBytesReader's EOF behavior. 3131 mb := MaxBytesReader(w, r.Body, 4<<10) 3132 io.Copy(ioutil.Discard, mb) 3133 } 3134 } 3135 3136 // initNPNRequest is an HTTP handler that initializes certain 3137 // uninitialized fields in its *Request. Such partially-initialized 3138 // Requests come from NPN protocol handlers. 3139 type initNPNRequest struct { 3140 c *tls.Conn 3141 h serverHandler 3142 } 3143 3144 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 3145 if req.TLS == nil { 3146 req.TLS = &tls.ConnectionState{} 3147 *req.TLS = h.c.ConnectionState() 3148 } 3149 if req.Body == nil { 3150 req.Body = NoBody 3151 } 3152 if req.RemoteAddr == "" { 3153 req.RemoteAddr = h.c.RemoteAddr().String() 3154 } 3155 h.h.ServeHTTP(rw, req) 3156 } 3157 3158 // loggingConn is used for debugging. 3159 type loggingConn struct { 3160 name string 3161 net.Conn 3162 } 3163 3164 var ( 3165 uniqNameMu sync.Mutex 3166 uniqNameNext = make(map[string]int) 3167 ) 3168 3169 func newLoggingConn(baseName string, c net.Conn) net.Conn { 3170 uniqNameMu.Lock() 3171 defer uniqNameMu.Unlock() 3172 uniqNameNext[baseName]++ 3173 return &loggingConn{ 3174 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 3175 Conn: c, 3176 } 3177 } 3178 3179 func (c *loggingConn) Write(p []byte) (n int, err error) { 3180 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 3181 n, err = c.Conn.Write(p) 3182 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 3183 return 3184 } 3185 3186 func (c *loggingConn) Read(p []byte) (n int, err error) { 3187 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 3188 n, err = c.Conn.Read(p) 3189 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 3190 return 3191 } 3192 3193 func (c *loggingConn) Close() (err error) { 3194 log.Printf("%s.Close() = ...", c.name) 3195 err = c.Conn.Close() 3196 log.Printf("%s.Close() = %v", c.name, err) 3197 return 3198 } 3199 3200 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 3201 // It only contains one field (and a pointer field at that), so it 3202 // fits in an interface value without an extra allocation. 3203 type checkConnErrorWriter struct { 3204 c *conn 3205 } 3206 3207 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 3208 n, err = w.c.rwc.Write(p) 3209 if err != nil && w.c.werr == nil { 3210 w.c.werr = err 3211 w.c.cancelCtx() 3212 } 3213 return 3214 } 3215 3216 func numLeadingCRorLF(v []byte) (n int) { 3217 for _, b := range v { 3218 if b == '\r' || b == '\n' { 3219 n++ 3220 continue 3221 } 3222 break 3223 } 3224 return 3225 3226 } 3227 3228 func strSliceContains(ss []string, s string) bool { 3229 for _, v := range ss { 3230 if v == s { 3231 return true 3232 } 3233 } 3234 return false 3235 }