github.com/sean-/go@v0.0.0-20151219100004-97f854cd7bb6/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "crypto/tls" 13 "errors" 14 "fmt" 15 "io" 16 "io/ioutil" 17 "log" 18 "net" 19 "net/textproto" 20 "net/url" 21 "os" 22 "path" 23 "runtime" 24 "strconv" 25 "strings" 26 "sync" 27 "sync/atomic" 28 "time" 29 ) 30 31 // Errors introduced by the HTTP server. 32 var ( 33 ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") 34 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 35 ErrHijacked = errors.New("Conn has been hijacked") 36 ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") 37 ) 38 39 // A Handler responds to an HTTP request. 40 // 41 // ServeHTTP should write reply headers and data to the ResponseWriter 42 // and then return. Returning signals that the request is finished; it 43 // is not valid to use the ResponseWriter or read from the 44 // Request.Body after or concurrently with the completion of the 45 // ServeHTTP call. 46 // 47 // Depending on the HTTP client software, HTTP protocol version, and 48 // any intermediaries between the client and the Go server, it may not 49 // be possible to read from the Request.Body after writing to the 50 // ResponseWriter. Cautious handlers should read the Request.Body 51 // first, and then reply. 52 // 53 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 54 // that the effect of the panic was isolated to the active request. 55 // It recovers the panic, logs a stack trace to the server error log, 56 // and hangs up the connection. 57 type Handler interface { 58 ServeHTTP(ResponseWriter, *Request) 59 } 60 61 // A ResponseWriter interface is used by an HTTP handler to 62 // construct an HTTP response. 63 // 64 // A ResponseWriter may not be used after the Handler.ServeHTTP method 65 // has returned. 66 type ResponseWriter interface { 67 // Header returns the header map that will be sent by 68 // WriteHeader. Changing the header after a call to 69 // WriteHeader (or Write) has no effect unless the modified 70 // headers were declared as trailers by setting the 71 // "Trailer" header before the call to WriteHeader (see example). 72 // To suppress implicit response headers, set their value to nil. 73 Header() Header 74 75 // Write writes the data to the connection as part of an HTTP reply. 76 // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) 77 // before writing the data. If the Header does not contain a 78 // Content-Type line, Write adds a Content-Type set to the result of passing 79 // the initial 512 bytes of written data to DetectContentType. 80 Write([]byte) (int, error) 81 82 // WriteHeader sends an HTTP response header with status code. 83 // If WriteHeader is not called explicitly, the first call to Write 84 // will trigger an implicit WriteHeader(http.StatusOK). 85 // Thus explicit calls to WriteHeader are mainly used to 86 // send error codes. 87 WriteHeader(int) 88 } 89 90 // The Flusher interface is implemented by ResponseWriters that allow 91 // an HTTP handler to flush buffered data to the client. 92 // 93 // Note that even for ResponseWriters that support Flush, 94 // if the client is connected through an HTTP proxy, 95 // the buffered data may not reach the client until the response 96 // completes. 97 type Flusher interface { 98 // Flush sends any buffered data to the client. 99 Flush() 100 } 101 102 // The Hijacker interface is implemented by ResponseWriters that allow 103 // an HTTP handler to take over the connection. 104 type Hijacker interface { 105 // Hijack lets the caller take over the connection. 106 // After a call to Hijack(), the HTTP server library 107 // will not do anything else with the connection. 108 // 109 // It becomes the caller's responsibility to manage 110 // and close the connection. 111 // 112 // The returned net.Conn may have read or write deadlines 113 // already set, depending on the configuration of the 114 // Server. It is the caller's responsibility to set 115 // or clear those deadlines as needed. 116 Hijack() (net.Conn, *bufio.ReadWriter, error) 117 } 118 119 // The CloseNotifier interface is implemented by ResponseWriters which 120 // allow detecting when the underlying connection has gone away. 121 // 122 // This mechanism can be used to cancel long operations on the server 123 // if the client has disconnected before the response is ready. 124 type CloseNotifier interface { 125 // CloseNotify returns a channel that receives at most a 126 // single value (true) when the client connection has gone 127 // away. 128 // 129 // CloseNotify is undefined before Request.Body has been 130 // fully read. 131 // 132 // After the Handler has returned, there is no guarantee 133 // that the channel receives a value. 134 // 135 // If the protocol is HTTP/1.1 and CloseNotify is called while 136 // processing an idempotent request (such a GET) while 137 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 138 // pipelined request will cause a value to be sent on the 139 // returned channel. In practice HTTP/1.1 pipelining is not 140 // enabled in browsers and not seen often in the wild. If this 141 // is a problem, use HTTP/2 or only use CloseNotify on methods 142 // such as POST. 143 CloseNotify() <-chan bool 144 } 145 146 // A conn represents the server side of an HTTP connection. 147 type conn struct { 148 // server is the server on which the connection arrived. 149 // Immutable; never nil. 150 server *Server 151 152 // rwc is the underlying network connection. 153 // This is never wrapped by other types and is the value given out 154 // to CloseNotifier callers. It is usually of type *net.TCPConn or 155 // *tls.Conn. 156 rwc net.Conn 157 158 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 159 // inside the Listener's Accept goroutine, as some implementations block. 160 // It is populated immediately inside the (*conn).serve goroutine. 161 // This is the value of a Handler's (*Request).RemoteAddr. 162 remoteAddr string 163 164 // tlsState is the TLS connection state when using TLS. 165 // nil means not TLS. 166 tlsState *tls.ConnectionState 167 168 // werr is set to the first write error to rwc. 169 // It is set via checkConnErrorWriter{w}, where bufw writes. 170 werr error 171 172 // r is bufr's read source. It's a wrapper around rwc that provides 173 // io.LimitedReader-style limiting (while reading request headers) 174 // and functionality to support CloseNotifier. See *connReader docs. 175 r *connReader 176 177 // bufr reads from r. 178 // Users of bufr must hold mu. 179 bufr *bufio.Reader 180 181 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 182 bufw *bufio.Writer 183 184 // lastMethod is the method of the most recent request 185 // on this connection, if any. 186 lastMethod string 187 188 // mu guards hijackedv, use of bufr, (*response).closeNotifyCh. 189 mu sync.Mutex 190 191 // hijackedv is whether this connection has been hijacked 192 // by a Handler with the Hijacker interface. 193 // It is guarded by mu. 194 hijackedv bool 195 } 196 197 func (c *conn) hijacked() bool { 198 c.mu.Lock() 199 defer c.mu.Unlock() 200 return c.hijackedv 201 } 202 203 // c.mu must be held. 204 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 205 if c.hijackedv { 206 return nil, nil, ErrHijacked 207 } 208 c.hijackedv = true 209 rwc = c.rwc 210 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 211 c.setState(rwc, StateHijacked) 212 return 213 } 214 215 // This should be >= 512 bytes for DetectContentType, 216 // but otherwise it's somewhat arbitrary. 217 const bufferBeforeChunkingSize = 2048 218 219 // chunkWriter writes to a response's conn buffer, and is the writer 220 // wrapped by the response.bufw buffered writer. 221 // 222 // chunkWriter also is responsible for finalizing the Header, including 223 // conditionally setting the Content-Type and setting a Content-Length 224 // in cases where the handler's final output is smaller than the buffer 225 // size. It also conditionally adds chunk headers, when in chunking mode. 226 // 227 // See the comment above (*response).Write for the entire write flow. 228 type chunkWriter struct { 229 res *response 230 231 // header is either nil or a deep clone of res.handlerHeader 232 // at the time of res.WriteHeader, if res.WriteHeader is 233 // called and extra buffering is being done to calculate 234 // Content-Type and/or Content-Length. 235 header Header 236 237 // wroteHeader tells whether the header's been written to "the 238 // wire" (or rather: w.conn.buf). this is unlike 239 // (*response).wroteHeader, which tells only whether it was 240 // logically written. 241 wroteHeader bool 242 243 // set by the writeHeader method: 244 chunking bool // using chunked transfer encoding for reply body 245 } 246 247 var ( 248 crlf = []byte("\r\n") 249 colonSpace = []byte(": ") 250 ) 251 252 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 253 if !cw.wroteHeader { 254 cw.writeHeader(p) 255 } 256 if cw.res.req.Method == "HEAD" { 257 // Eat writes. 258 return len(p), nil 259 } 260 if cw.chunking { 261 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 262 if err != nil { 263 cw.res.conn.rwc.Close() 264 return 265 } 266 } 267 n, err = cw.res.conn.bufw.Write(p) 268 if cw.chunking && err == nil { 269 _, err = cw.res.conn.bufw.Write(crlf) 270 } 271 if err != nil { 272 cw.res.conn.rwc.Close() 273 } 274 return 275 } 276 277 func (cw *chunkWriter) flush() { 278 if !cw.wroteHeader { 279 cw.writeHeader(nil) 280 } 281 cw.res.conn.bufw.Flush() 282 } 283 284 func (cw *chunkWriter) close() { 285 if !cw.wroteHeader { 286 cw.writeHeader(nil) 287 } 288 if cw.chunking { 289 bw := cw.res.conn.bufw // conn's bufio writer 290 // zero chunk to mark EOF 291 bw.WriteString("0\r\n") 292 if len(cw.res.trailers) > 0 { 293 trailers := make(Header) 294 for _, h := range cw.res.trailers { 295 if vv := cw.res.handlerHeader[h]; len(vv) > 0 { 296 trailers[h] = vv 297 } 298 } 299 trailers.Write(bw) // the writer handles noting errors 300 } 301 // final blank line after the trailers (whether 302 // present or not) 303 bw.WriteString("\r\n") 304 } 305 } 306 307 // A response represents the server side of an HTTP response. 308 type response struct { 309 conn *conn 310 req *Request // request for this response 311 reqBody io.ReadCloser 312 wroteHeader bool // reply header has been (logically) written 313 wroteContinue bool // 100 Continue response was written 314 315 w *bufio.Writer // buffers output in chunks to chunkWriter 316 cw chunkWriter 317 318 // handlerHeader is the Header that Handlers get access to, 319 // which may be retained and mutated even after WriteHeader. 320 // handlerHeader is copied into cw.header at WriteHeader 321 // time, and privately mutated thereafter. 322 handlerHeader Header 323 calledHeader bool // handler accessed handlerHeader via Header 324 325 written int64 // number of bytes written in body 326 contentLength int64 // explicitly-declared Content-Length; or -1 327 status int // status code passed to WriteHeader 328 329 // close connection after this reply. set on request and 330 // updated after response from handler if there's a 331 // "Connection: keep-alive" response header and a 332 // Content-Length. 333 closeAfterReply bool 334 335 // requestBodyLimitHit is set by requestTooLarge when 336 // maxBytesReader hits its max size. It is checked in 337 // WriteHeader, to make sure we don't consume the 338 // remaining request body to try to advance to the next HTTP 339 // request. Instead, when this is set, we stop reading 340 // subsequent requests on this connection and stop reading 341 // input from it. 342 requestBodyLimitHit bool 343 344 // trailers are the headers to be sent after the handler 345 // finishes writing the body. This field is initialized from 346 // the Trailer response header when the response header is 347 // written. 348 trailers []string 349 350 handlerDone bool // set true when the handler exits 351 352 // Buffers for Date and Content-Length 353 dateBuf [len(TimeFormat)]byte 354 clenBuf [10]byte 355 356 closeNotifyCh <-chan bool // guarded by conn.mu 357 } 358 359 // declareTrailer is called for each Trailer header when the 360 // response header is written. It notes that a header will need to be 361 // written in the trailers at the end of the response. 362 func (w *response) declareTrailer(k string) { 363 k = CanonicalHeaderKey(k) 364 switch k { 365 case "Transfer-Encoding", "Content-Length", "Trailer": 366 // Forbidden by RFC 2616 14.40. 367 return 368 } 369 w.trailers = append(w.trailers, k) 370 } 371 372 // requestTooLarge is called by maxBytesReader when too much input has 373 // been read from the client. 374 func (w *response) requestTooLarge() { 375 w.closeAfterReply = true 376 w.requestBodyLimitHit = true 377 if !w.wroteHeader { 378 w.Header().Set("Connection", "close") 379 } 380 } 381 382 // needsSniff reports whether a Content-Type still needs to be sniffed. 383 func (w *response) needsSniff() bool { 384 _, haveType := w.handlerHeader["Content-Type"] 385 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 386 } 387 388 // writerOnly hides an io.Writer value's optional ReadFrom method 389 // from io.Copy. 390 type writerOnly struct { 391 io.Writer 392 } 393 394 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 395 switch v := src.(type) { 396 case *os.File: 397 fi, err := v.Stat() 398 if err != nil { 399 return false, err 400 } 401 return fi.Mode().IsRegular(), nil 402 case *io.LimitedReader: 403 return srcIsRegularFile(v.R) 404 default: 405 return 406 } 407 } 408 409 // ReadFrom is here to optimize copying from an *os.File regular file 410 // to a *net.TCPConn with sendfile. 411 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 412 // Our underlying w.conn.rwc is usually a *TCPConn (with its 413 // own ReadFrom method). If not, or if our src isn't a regular 414 // file, just fall back to the normal copy method. 415 rf, ok := w.conn.rwc.(io.ReaderFrom) 416 regFile, err := srcIsRegularFile(src) 417 if err != nil { 418 return 0, err 419 } 420 if !ok || !regFile { 421 bufp := copyBufPool.Get().(*[]byte) 422 defer copyBufPool.Put(bufp) 423 return io.CopyBuffer(writerOnly{w}, src, *bufp) 424 } 425 426 // sendfile path: 427 428 if !w.wroteHeader { 429 w.WriteHeader(StatusOK) 430 } 431 432 if w.needsSniff() { 433 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 434 n += n0 435 if err != nil { 436 return n, err 437 } 438 } 439 440 w.w.Flush() // get rid of any previous writes 441 w.cw.flush() // make sure Header is written; flush data to rwc 442 443 // Now that cw has been flushed, its chunking field is guaranteed initialized. 444 if !w.cw.chunking && w.bodyAllowed() { 445 n0, err := rf.ReadFrom(src) 446 n += n0 447 w.written += n0 448 return n, err 449 } 450 451 n0, err := io.Copy(writerOnly{w}, src) 452 n += n0 453 return n, err 454 } 455 456 // debugServerConnections controls whether all server connections are wrapped 457 // with a verbose logging wrapper. 458 const debugServerConnections = false 459 460 // Create new connection from rwc. 461 func (srv *Server) newConn(rwc net.Conn) *conn { 462 c := &conn{ 463 server: srv, 464 rwc: rwc, 465 } 466 if debugServerConnections { 467 c.rwc = newLoggingConn("server", c.rwc) 468 } 469 return c 470 } 471 472 type readResult struct { 473 n int 474 err error 475 b byte // byte read, if n == 1 476 } 477 478 // connReader is the io.Reader wrapper used by *conn. It combines a 479 // selectively-activated io.LimitedReader (to bound request header 480 // read sizes) with support for selectively keeping an io.Reader.Read 481 // call blocked in a background goroutine to wait for activitiy and 482 // trigger a CloseNotifier channel. 483 type connReader struct { 484 r io.Reader 485 remain int64 // bytes remaining 486 487 // ch is non-nil if a background read is in progress. 488 // It is guarded by conn.mu. 489 ch chan readResult 490 } 491 492 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 493 func (cr *connReader) setInfiniteReadLimit() { cr.remain = 1<<63 - 1 } 494 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 495 496 func (cr *connReader) Read(p []byte) (n int, err error) { 497 if cr.hitReadLimit() { 498 return 0, io.EOF 499 } 500 if len(p) == 0 { 501 return 502 } 503 if int64(len(p)) > cr.remain { 504 p = p[:cr.remain] 505 } 506 507 // Is a background read (started by CloseNotifier) already in 508 // flight? If so, wait for it and use its result. 509 ch := cr.ch 510 if ch != nil { 511 cr.ch = nil 512 res := <-ch 513 if res.n == 1 { 514 p[0] = res.b 515 cr.remain -= 1 516 } 517 return res.n, res.err 518 } 519 n, err = cr.r.Read(p) 520 cr.remain -= int64(n) 521 return 522 } 523 524 func (cr *connReader) startBackgroundRead(onReadComplete func()) { 525 if cr.ch != nil { 526 // Background read already started. 527 return 528 } 529 cr.ch = make(chan readResult, 1) 530 go cr.closeNotifyAwaitActivityRead(cr.ch, onReadComplete) 531 } 532 533 func (cr *connReader) closeNotifyAwaitActivityRead(ch chan<- readResult, onReadComplete func()) { 534 var buf [1]byte 535 n, err := cr.r.Read(buf[:1]) 536 onReadComplete() 537 ch <- readResult{n, err, buf[0]} 538 } 539 540 var ( 541 bufioReaderPool sync.Pool 542 bufioWriter2kPool sync.Pool 543 bufioWriter4kPool sync.Pool 544 ) 545 546 var copyBufPool = sync.Pool{ 547 New: func() interface{} { 548 b := make([]byte, 32*1024) 549 return &b 550 }, 551 } 552 553 func bufioWriterPool(size int) *sync.Pool { 554 switch size { 555 case 2 << 10: 556 return &bufioWriter2kPool 557 case 4 << 10: 558 return &bufioWriter4kPool 559 } 560 return nil 561 } 562 563 func newBufioReader(r io.Reader) *bufio.Reader { 564 if v := bufioReaderPool.Get(); v != nil { 565 br := v.(*bufio.Reader) 566 br.Reset(r) 567 return br 568 } 569 // Note: if this reader size is every changed, update 570 // TestHandlerBodyClose's assumptions. 571 return bufio.NewReader(r) 572 } 573 574 func putBufioReader(br *bufio.Reader) { 575 br.Reset(nil) 576 bufioReaderPool.Put(br) 577 } 578 579 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 580 pool := bufioWriterPool(size) 581 if pool != nil { 582 if v := pool.Get(); v != nil { 583 bw := v.(*bufio.Writer) 584 bw.Reset(w) 585 return bw 586 } 587 } 588 return bufio.NewWriterSize(w, size) 589 } 590 591 func putBufioWriter(bw *bufio.Writer) { 592 bw.Reset(nil) 593 if pool := bufioWriterPool(bw.Available()); pool != nil { 594 pool.Put(bw) 595 } 596 } 597 598 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 599 // in an HTTP request. 600 // This can be overridden by setting Server.MaxHeaderBytes. 601 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 602 603 func (srv *Server) maxHeaderBytes() int { 604 if srv.MaxHeaderBytes > 0 { 605 return srv.MaxHeaderBytes 606 } 607 return DefaultMaxHeaderBytes 608 } 609 610 func (srv *Server) initialReadLimitSize() int64 { 611 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 612 } 613 614 // wrapper around io.ReaderCloser which on first read, sends an 615 // HTTP/1.1 100 Continue header 616 type expectContinueReader struct { 617 resp *response 618 readCloser io.ReadCloser 619 closed bool 620 sawEOF bool 621 } 622 623 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 624 if ecr.closed { 625 return 0, ErrBodyReadAfterClose 626 } 627 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 628 ecr.resp.wroteContinue = true 629 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 630 ecr.resp.conn.bufw.Flush() 631 } 632 n, err = ecr.readCloser.Read(p) 633 if err == io.EOF { 634 ecr.sawEOF = true 635 } 636 return 637 } 638 639 func (ecr *expectContinueReader) Close() error { 640 ecr.closed = true 641 return ecr.readCloser.Close() 642 } 643 644 // TimeFormat is the time format to use with 645 // time.Parse and time.Time.Format when parsing 646 // or generating times in HTTP headers. 647 // It is like time.RFC1123 but hard codes GMT as the time zone. 648 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 649 650 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 651 func appendTime(b []byte, t time.Time) []byte { 652 const days = "SunMonTueWedThuFriSat" 653 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 654 655 t = t.UTC() 656 yy, mm, dd := t.Date() 657 hh, mn, ss := t.Clock() 658 day := days[3*t.Weekday():] 659 mon := months[3*(mm-1):] 660 661 return append(b, 662 day[0], day[1], day[2], ',', ' ', 663 byte('0'+dd/10), byte('0'+dd%10), ' ', 664 mon[0], mon[1], mon[2], ' ', 665 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 666 byte('0'+hh/10), byte('0'+hh%10), ':', 667 byte('0'+mn/10), byte('0'+mn%10), ':', 668 byte('0'+ss/10), byte('0'+ss%10), ' ', 669 'G', 'M', 'T') 670 } 671 672 var errTooLarge = errors.New("http: request too large") 673 674 // Read next request from connection. 675 func (c *conn) readRequest() (w *response, err error) { 676 if c.hijacked() { 677 return nil, ErrHijacked 678 } 679 680 if d := c.server.ReadTimeout; d != 0 { 681 c.rwc.SetReadDeadline(time.Now().Add(d)) 682 } 683 if d := c.server.WriteTimeout; d != 0 { 684 defer func() { 685 c.rwc.SetWriteDeadline(time.Now().Add(d)) 686 }() 687 } 688 689 c.r.setReadLimit(c.server.initialReadLimitSize()) 690 c.mu.Lock() // while using bufr 691 if c.lastMethod == "POST" { 692 // RFC 2616 section 4.1 tolerance for old buggy clients. 693 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 694 c.bufr.Discard(numLeadingCRorLF(peek)) 695 } 696 req, err := readRequest(c.bufr, false) 697 c.mu.Unlock() 698 if err != nil { 699 if c.r.hitReadLimit() { 700 return nil, errTooLarge 701 } 702 return nil, err 703 } 704 c.lastMethod = req.Method 705 c.r.setInfiniteReadLimit() 706 707 hosts, haveHost := req.Header["Host"] 708 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) { 709 return nil, badRequestError("missing required Host header") 710 } 711 if len(hosts) > 1 { 712 return nil, badRequestError("too many Host headers") 713 } 714 if len(hosts) == 1 && !validHostHeader(hosts[0]) { 715 return nil, badRequestError("malformed Host header") 716 } 717 for k, vv := range req.Header { 718 if !validHeaderName(k) { 719 return nil, badRequestError("invalid header name") 720 } 721 for _, v := range vv { 722 if !validHeaderValue(v) { 723 return nil, badRequestError("invalid header value") 724 } 725 } 726 } 727 delete(req.Header, "Host") 728 729 req.RemoteAddr = c.remoteAddr 730 req.TLS = c.tlsState 731 if body, ok := req.Body.(*body); ok { 732 body.doEarlyClose = true 733 } 734 735 w = &response{ 736 conn: c, 737 req: req, 738 reqBody: req.Body, 739 handlerHeader: make(Header), 740 contentLength: -1, 741 } 742 w.cw.res = w 743 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 744 return w, nil 745 } 746 747 func (w *response) Header() Header { 748 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 749 // Accessing the header between logically writing it 750 // and physically writing it means we need to allocate 751 // a clone to snapshot the logically written state. 752 w.cw.header = w.handlerHeader.clone() 753 } 754 w.calledHeader = true 755 return w.handlerHeader 756 } 757 758 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 759 // consumed by a handler that the server will read from the client 760 // in order to keep a connection alive. If there are more bytes than 761 // this then the server to be paranoid instead sends a "Connection: 762 // close" response. 763 // 764 // This number is approximately what a typical machine's TCP buffer 765 // size is anyway. (if we have the bytes on the machine, we might as 766 // well read them) 767 const maxPostHandlerReadBytes = 256 << 10 768 769 func (w *response) WriteHeader(code int) { 770 if w.conn.hijacked() { 771 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 772 return 773 } 774 if w.wroteHeader { 775 w.conn.server.logf("http: multiple response.WriteHeader calls") 776 return 777 } 778 w.wroteHeader = true 779 w.status = code 780 781 if w.calledHeader && w.cw.header == nil { 782 w.cw.header = w.handlerHeader.clone() 783 } 784 785 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 786 v, err := strconv.ParseInt(cl, 10, 64) 787 if err == nil && v >= 0 { 788 w.contentLength = v 789 } else { 790 w.conn.server.logf("http: invalid Content-Length of %q", cl) 791 w.handlerHeader.Del("Content-Length") 792 } 793 } 794 } 795 796 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 797 // This type is used to avoid extra allocations from cloning and/or populating 798 // the response Header map and all its 1-element slices. 799 type extraHeader struct { 800 contentType string 801 connection string 802 transferEncoding string 803 date []byte // written if not nil 804 contentLength []byte // written if not nil 805 } 806 807 // Sorted the same as extraHeader.Write's loop. 808 var extraHeaderKeys = [][]byte{ 809 []byte("Content-Type"), 810 []byte("Connection"), 811 []byte("Transfer-Encoding"), 812 } 813 814 var ( 815 headerContentLength = []byte("Content-Length: ") 816 headerDate = []byte("Date: ") 817 ) 818 819 // Write writes the headers described in h to w. 820 // 821 // This method has a value receiver, despite the somewhat large size 822 // of h, because it prevents an allocation. The escape analysis isn't 823 // smart enough to realize this function doesn't mutate h. 824 func (h extraHeader) Write(w *bufio.Writer) { 825 if h.date != nil { 826 w.Write(headerDate) 827 w.Write(h.date) 828 w.Write(crlf) 829 } 830 if h.contentLength != nil { 831 w.Write(headerContentLength) 832 w.Write(h.contentLength) 833 w.Write(crlf) 834 } 835 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 836 if v != "" { 837 w.Write(extraHeaderKeys[i]) 838 w.Write(colonSpace) 839 w.WriteString(v) 840 w.Write(crlf) 841 } 842 } 843 } 844 845 // writeHeader finalizes the header sent to the client and writes it 846 // to cw.res.conn.bufw. 847 // 848 // p is not written by writeHeader, but is the first chunk of the body 849 // that will be written. It is sniffed for a Content-Type if none is 850 // set explicitly. It's also used to set the Content-Length, if the 851 // total body size was small and the handler has already finished 852 // running. 853 func (cw *chunkWriter) writeHeader(p []byte) { 854 if cw.wroteHeader { 855 return 856 } 857 cw.wroteHeader = true 858 859 w := cw.res 860 keepAlivesEnabled := w.conn.server.doKeepAlives() 861 isHEAD := w.req.Method == "HEAD" 862 863 // header is written out to w.conn.buf below. Depending on the 864 // state of the handler, we either own the map or not. If we 865 // don't own it, the exclude map is created lazily for 866 // WriteSubset to remove headers. The setHeader struct holds 867 // headers we need to add. 868 header := cw.header 869 owned := header != nil 870 if !owned { 871 header = w.handlerHeader 872 } 873 var excludeHeader map[string]bool 874 delHeader := func(key string) { 875 if owned { 876 header.Del(key) 877 return 878 } 879 if _, ok := header[key]; !ok { 880 return 881 } 882 if excludeHeader == nil { 883 excludeHeader = make(map[string]bool) 884 } 885 excludeHeader[key] = true 886 } 887 var setHeader extraHeader 888 889 trailers := false 890 for _, v := range cw.header["Trailer"] { 891 trailers = true 892 foreachHeaderElement(v, cw.res.declareTrailer) 893 } 894 895 te := header.get("Transfer-Encoding") 896 hasTE := te != "" 897 898 // If the handler is done but never sent a Content-Length 899 // response header and this is our first (and last) write, set 900 // it, even to zero. This helps HTTP/1.0 clients keep their 901 // "keep-alive" connections alive. 902 // Exceptions: 304/204/1xx responses never get Content-Length, and if 903 // it was a HEAD request, we don't know the difference between 904 // 0 actual bytes and 0 bytes because the handler noticed it 905 // was a HEAD request and chose not to write anything. So for 906 // HEAD, the handler should either write the Content-Length or 907 // write non-zero bytes. If it's actually 0 bytes and the 908 // handler never looked at the Request.Method, we just don't 909 // send a Content-Length header. 910 // Further, we don't send an automatic Content-Length if they 911 // set a Transfer-Encoding, because they're generally incompatible. 912 if w.handlerDone && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 913 w.contentLength = int64(len(p)) 914 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 915 } 916 917 // If this was an HTTP/1.0 request with keep-alive and we sent a 918 // Content-Length back, we can make this a keep-alive response ... 919 if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { 920 sentLength := header.get("Content-Length") != "" 921 if sentLength && header.get("Connection") == "keep-alive" { 922 w.closeAfterReply = false 923 } 924 } 925 926 // Check for a explicit (and valid) Content-Length header. 927 hasCL := w.contentLength != -1 928 929 if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { 930 _, connectionHeaderSet := header["Connection"] 931 if !connectionHeaderSet { 932 setHeader.connection = "keep-alive" 933 } 934 } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { 935 w.closeAfterReply = true 936 } 937 938 if header.get("Connection") == "close" || !keepAlivesEnabled { 939 w.closeAfterReply = true 940 } 941 942 // If the client wanted a 100-continue but we never sent it to 943 // them (or, more strictly: we never finished reading their 944 // request body), don't reuse this connection because it's now 945 // in an unknown state: we might be sending this response at 946 // the same time the client is now sending its request body 947 // after a timeout. (Some HTTP clients send Expect: 948 // 100-continue but knowing that some servers don't support 949 // it, the clients set a timer and send the body later anyway) 950 // If we haven't seen EOF, we can't skip over the unread body 951 // because we don't know if the next bytes on the wire will be 952 // the body-following-the-timer or the subsequent request. 953 // See Issue 11549. 954 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 955 w.closeAfterReply = true 956 } 957 958 // Per RFC 2616, we should consume the request body before 959 // replying, if the handler hasn't already done so. But we 960 // don't want to do an unbounded amount of reading here for 961 // DoS reasons, so we only try up to a threshold. 962 if w.req.ContentLength != 0 && !w.closeAfterReply { 963 var discard, tooBig bool 964 965 switch bdy := w.req.Body.(type) { 966 case *expectContinueReader: 967 if bdy.resp.wroteContinue { 968 discard = true 969 } 970 case *body: 971 bdy.mu.Lock() 972 switch { 973 case bdy.closed: 974 if !bdy.sawEOF { 975 // Body was closed in handler with non-EOF error. 976 w.closeAfterReply = true 977 } 978 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 979 tooBig = true 980 default: 981 discard = true 982 } 983 bdy.mu.Unlock() 984 default: 985 discard = true 986 } 987 988 if discard { 989 _, err := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) 990 switch err { 991 case nil: 992 // There must be even more data left over. 993 tooBig = true 994 case ErrBodyReadAfterClose: 995 // Body was already consumed and closed. 996 case io.EOF: 997 // The remaining body was just consumed, close it. 998 err = w.req.Body.Close() 999 if err != nil { 1000 w.closeAfterReply = true 1001 } 1002 default: 1003 // Some other kind of error occured, like a read timeout, or 1004 // corrupt chunked encoding. In any case, whatever remains 1005 // on the wire must not be parsed as another HTTP request. 1006 w.closeAfterReply = true 1007 } 1008 } 1009 1010 if tooBig { 1011 w.requestTooLarge() 1012 delHeader("Connection") 1013 setHeader.connection = "close" 1014 } 1015 } 1016 1017 code := w.status 1018 if bodyAllowedForStatus(code) { 1019 // If no content type, apply sniffing algorithm to body. 1020 _, haveType := header["Content-Type"] 1021 if !haveType && !hasTE { 1022 setHeader.contentType = DetectContentType(p) 1023 } 1024 } else { 1025 for _, k := range suppressedHeaders(code) { 1026 delHeader(k) 1027 } 1028 } 1029 1030 if _, ok := header["Date"]; !ok { 1031 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1032 } 1033 1034 if hasCL && hasTE && te != "identity" { 1035 // TODO: return an error if WriteHeader gets a return parameter 1036 // For now just ignore the Content-Length. 1037 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1038 te, w.contentLength) 1039 delHeader("Content-Length") 1040 hasCL = false 1041 } 1042 1043 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1044 // do nothing 1045 } else if code == StatusNoContent { 1046 delHeader("Transfer-Encoding") 1047 } else if hasCL { 1048 delHeader("Transfer-Encoding") 1049 } else if w.req.ProtoAtLeast(1, 1) { 1050 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1051 // content-length has been provided. The connection must be closed after the 1052 // reply is written, and no chunking is to be done. This is the setup 1053 // recommended in the Server-Sent Events candidate recommendation 11, 1054 // section 8. 1055 if hasTE && te == "identity" { 1056 cw.chunking = false 1057 w.closeAfterReply = true 1058 } else { 1059 // HTTP/1.1 or greater: use chunked transfer encoding 1060 // to avoid closing the connection at EOF. 1061 cw.chunking = true 1062 setHeader.transferEncoding = "chunked" 1063 } 1064 } else { 1065 // HTTP version < 1.1: cannot do chunked transfer 1066 // encoding and we don't know the Content-Length so 1067 // signal EOF by closing connection. 1068 w.closeAfterReply = true 1069 delHeader("Transfer-Encoding") // in case already set 1070 } 1071 1072 // Cannot use Content-Length with non-identity Transfer-Encoding. 1073 if cw.chunking { 1074 delHeader("Content-Length") 1075 } 1076 if !w.req.ProtoAtLeast(1, 0) { 1077 return 1078 } 1079 1080 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1081 delHeader("Connection") 1082 if w.req.ProtoAtLeast(1, 1) { 1083 setHeader.connection = "close" 1084 } 1085 } 1086 1087 w.conn.bufw.WriteString(statusLine(w.req, code)) 1088 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1089 setHeader.Write(w.conn.bufw) 1090 w.conn.bufw.Write(crlf) 1091 } 1092 1093 // foreachHeaderElement splits v according to the "#rule" construction 1094 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1095 func foreachHeaderElement(v string, fn func(string)) { 1096 v = textproto.TrimString(v) 1097 if v == "" { 1098 return 1099 } 1100 if !strings.Contains(v, ",") { 1101 fn(v) 1102 return 1103 } 1104 for _, f := range strings.Split(v, ",") { 1105 if f = textproto.TrimString(f); f != "" { 1106 fn(f) 1107 } 1108 } 1109 } 1110 1111 // statusLines is a cache of Status-Line strings, keyed by code (for 1112 // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a 1113 // map keyed by struct of two fields. This map's max size is bounded 1114 // by 2*len(statusText), two protocol types for each known official 1115 // status code in the statusText map. 1116 var ( 1117 statusMu sync.RWMutex 1118 statusLines = make(map[int]string) 1119 ) 1120 1121 // statusLine returns a response Status-Line (RFC 2616 Section 6.1) 1122 // for the given request and response status code. 1123 func statusLine(req *Request, code int) string { 1124 // Fast path: 1125 key := code 1126 proto11 := req.ProtoAtLeast(1, 1) 1127 if !proto11 { 1128 key = -key 1129 } 1130 statusMu.RLock() 1131 line, ok := statusLines[key] 1132 statusMu.RUnlock() 1133 if ok { 1134 return line 1135 } 1136 1137 // Slow path: 1138 proto := "HTTP/1.0" 1139 if proto11 { 1140 proto = "HTTP/1.1" 1141 } 1142 codestring := strconv.Itoa(code) 1143 text, ok := statusText[code] 1144 if !ok { 1145 text = "status code " + codestring 1146 } 1147 line = proto + " " + codestring + " " + text + "\r\n" 1148 if ok { 1149 statusMu.Lock() 1150 defer statusMu.Unlock() 1151 statusLines[key] = line 1152 } 1153 return line 1154 } 1155 1156 // bodyAllowed reports whether a Write is allowed for this response type. 1157 // It's illegal to call this before the header has been flushed. 1158 func (w *response) bodyAllowed() bool { 1159 if !w.wroteHeader { 1160 panic("") 1161 } 1162 return bodyAllowedForStatus(w.status) 1163 } 1164 1165 // The Life Of A Write is like this: 1166 // 1167 // Handler starts. No header has been sent. The handler can either 1168 // write a header, or just start writing. Writing before sending a header 1169 // sends an implicitly empty 200 OK header. 1170 // 1171 // If the handler didn't declare a Content-Length up front, we either 1172 // go into chunking mode or, if the handler finishes running before 1173 // the chunking buffer size, we compute a Content-Length and send that 1174 // in the header instead. 1175 // 1176 // Likewise, if the handler didn't set a Content-Type, we sniff that 1177 // from the initial chunk of output. 1178 // 1179 // The Writers are wired together like: 1180 // 1181 // 1. *response (the ResponseWriter) -> 1182 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1183 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1184 // and which writes the chunk headers, if needed. 1185 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1186 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1187 // and populates c.werr with it if so. but otherwise writes to: 1188 // 6. the rwc, the net.Conn. 1189 // 1190 // TODO(bradfitz): short-circuit some of the buffering when the 1191 // initial header contains both a Content-Type and Content-Length. 1192 // Also short-circuit in (1) when the header's been sent and not in 1193 // chunking mode, writing directly to (4) instead, if (2) has no 1194 // buffered data. More generally, we could short-circuit from (1) to 1195 // (3) even in chunking mode if the write size from (1) is over some 1196 // threshold and nothing is in (2). The answer might be mostly making 1197 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1198 // with this instead. 1199 func (w *response) Write(data []byte) (n int, err error) { 1200 return w.write(len(data), data, "") 1201 } 1202 1203 func (w *response) WriteString(data string) (n int, err error) { 1204 return w.write(len(data), nil, data) 1205 } 1206 1207 // either dataB or dataS is non-zero. 1208 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1209 if w.conn.hijacked() { 1210 w.conn.server.logf("http: response.Write on hijacked connection") 1211 return 0, ErrHijacked 1212 } 1213 if !w.wroteHeader { 1214 w.WriteHeader(StatusOK) 1215 } 1216 if lenData == 0 { 1217 return 0, nil 1218 } 1219 if !w.bodyAllowed() { 1220 return 0, ErrBodyNotAllowed 1221 } 1222 1223 w.written += int64(lenData) // ignoring errors, for errorKludge 1224 if w.contentLength != -1 && w.written > w.contentLength { 1225 return 0, ErrContentLength 1226 } 1227 if dataB != nil { 1228 return w.w.Write(dataB) 1229 } else { 1230 return w.w.WriteString(dataS) 1231 } 1232 } 1233 1234 func (w *response) finishRequest() { 1235 w.handlerDone = true 1236 1237 if !w.wroteHeader { 1238 w.WriteHeader(StatusOK) 1239 } 1240 1241 w.w.Flush() 1242 putBufioWriter(w.w) 1243 w.cw.close() 1244 w.conn.bufw.Flush() 1245 1246 // Close the body (regardless of w.closeAfterReply) so we can 1247 // re-use its bufio.Reader later safely. 1248 w.reqBody.Close() 1249 1250 if w.req.MultipartForm != nil { 1251 w.req.MultipartForm.RemoveAll() 1252 } 1253 } 1254 1255 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1256 // It must only be called after the handler is done executing. 1257 func (w *response) shouldReuseConnection() bool { 1258 if w.closeAfterReply { 1259 // The request or something set while executing the 1260 // handler indicated we shouldn't reuse this 1261 // connection. 1262 return false 1263 } 1264 1265 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1266 // Did not write enough. Avoid getting out of sync. 1267 return false 1268 } 1269 1270 // There was some error writing to the underlying connection 1271 // during the request, so don't re-use this conn. 1272 if w.conn.werr != nil { 1273 return false 1274 } 1275 1276 if w.closedRequestBodyEarly() { 1277 return false 1278 } 1279 1280 return true 1281 } 1282 1283 func (w *response) closedRequestBodyEarly() bool { 1284 body, ok := w.req.Body.(*body) 1285 return ok && body.didEarlyClose() 1286 } 1287 1288 func (w *response) Flush() { 1289 if !w.wroteHeader { 1290 w.WriteHeader(StatusOK) 1291 } 1292 w.w.Flush() 1293 w.cw.flush() 1294 } 1295 1296 func (c *conn) finalFlush() { 1297 if c.bufr != nil { 1298 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1299 // reader for a future connection. 1300 putBufioReader(c.bufr) 1301 c.bufr = nil 1302 } 1303 1304 if c.bufw != nil { 1305 c.bufw.Flush() 1306 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1307 // writer for a future connection. 1308 putBufioWriter(c.bufw) 1309 c.bufw = nil 1310 } 1311 } 1312 1313 // Close the connection. 1314 func (c *conn) close() { 1315 c.finalFlush() 1316 c.rwc.Close() 1317 } 1318 1319 // rstAvoidanceDelay is the amount of time we sleep after closing the 1320 // write side of a TCP connection before closing the entire socket. 1321 // By sleeping, we increase the chances that the client sees our FIN 1322 // and processes its final data before they process the subsequent RST 1323 // from closing a connection with known unread data. 1324 // This RST seems to occur mostly on BSD systems. (And Windows?) 1325 // This timeout is somewhat arbitrary (~latency around the planet). 1326 const rstAvoidanceDelay = 500 * time.Millisecond 1327 1328 type closeWriter interface { 1329 CloseWrite() error 1330 } 1331 1332 var _ closeWriter = (*net.TCPConn)(nil) 1333 1334 // closeWrite flushes any outstanding data and sends a FIN packet (if 1335 // client is connected via TCP), signalling that we're done. We then 1336 // pause for a bit, hoping the client processes it before any 1337 // subsequent RST. 1338 // 1339 // See https://golang.org/issue/3595 1340 func (c *conn) closeWriteAndWait() { 1341 c.finalFlush() 1342 if tcp, ok := c.rwc.(closeWriter); ok { 1343 tcp.CloseWrite() 1344 } 1345 time.Sleep(rstAvoidanceDelay) 1346 } 1347 1348 // validNPN reports whether the proto is not a blacklisted Next 1349 // Protocol Negotiation protocol. Empty and built-in protocol types 1350 // are blacklisted and can't be overridden with alternate 1351 // implementations. 1352 func validNPN(proto string) bool { 1353 switch proto { 1354 case "", "http/1.1", "http/1.0": 1355 return false 1356 } 1357 return true 1358 } 1359 1360 func (c *conn) setState(nc net.Conn, state ConnState) { 1361 if hook := c.server.ConnState; hook != nil { 1362 hook(nc, state) 1363 } 1364 } 1365 1366 // badRequestError is a literal string (used by in the server in HTML, 1367 // unescaped) to tell the user why their request was bad. It should 1368 // be plain text without user info or other embeddded errors. 1369 type badRequestError string 1370 1371 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1372 1373 // Serve a new connection. 1374 func (c *conn) serve() { 1375 c.remoteAddr = c.rwc.RemoteAddr().String() 1376 defer func() { 1377 if err := recover(); err != nil { 1378 const size = 64 << 10 1379 buf := make([]byte, size) 1380 buf = buf[:runtime.Stack(buf, false)] 1381 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1382 } 1383 if !c.hijacked() { 1384 c.close() 1385 c.setState(c.rwc, StateClosed) 1386 } 1387 }() 1388 1389 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1390 if d := c.server.ReadTimeout; d != 0 { 1391 c.rwc.SetReadDeadline(time.Now().Add(d)) 1392 } 1393 if d := c.server.WriteTimeout; d != 0 { 1394 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1395 } 1396 if err := tlsConn.Handshake(); err != nil { 1397 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1398 return 1399 } 1400 c.tlsState = new(tls.ConnectionState) 1401 *c.tlsState = tlsConn.ConnectionState() 1402 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1403 if fn := c.server.TLSNextProto[proto]; fn != nil { 1404 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1405 fn(c.server, tlsConn, h) 1406 } 1407 return 1408 } 1409 } 1410 1411 c.r = &connReader{r: c.rwc} 1412 c.bufr = newBufioReader(c.r) 1413 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1414 1415 for { 1416 w, err := c.readRequest() 1417 if c.r.remain != c.server.initialReadLimitSize() { 1418 // If we read any bytes off the wire, we're active. 1419 c.setState(c.rwc, StateActive) 1420 } 1421 if err != nil { 1422 if err == errTooLarge { 1423 // Their HTTP client may or may not be 1424 // able to read this if we're 1425 // responding to them and hanging up 1426 // while they're still writing their 1427 // request. Undefined behavior. 1428 io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large") 1429 c.closeWriteAndWait() 1430 return 1431 } 1432 if err == io.EOF { 1433 return // don't reply 1434 } 1435 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1436 return // don't reply 1437 } 1438 var publicErr string 1439 if v, ok := err.(badRequestError); ok { 1440 publicErr = ": " + string(v) 1441 } 1442 io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr) 1443 return 1444 } 1445 1446 // Expect 100 Continue support 1447 req := w.req 1448 if req.expectsContinue() { 1449 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1450 // Wrap the Body reader with one that replies on the connection 1451 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1452 } 1453 req.Header.Del("Expect") 1454 } else if req.Header.get("Expect") != "" { 1455 w.sendExpectationFailed() 1456 return 1457 } 1458 1459 // HTTP cannot have multiple simultaneous active requests.[*] 1460 // Until the server replies to this request, it can't read another, 1461 // so we might as well run the handler in this goroutine. 1462 // [*] Not strictly true: HTTP pipelining. We could let them all process 1463 // in parallel even if their responses need to be serialized. 1464 serverHandler{c.server}.ServeHTTP(w, w.req) 1465 if c.hijacked() { 1466 return 1467 } 1468 w.finishRequest() 1469 if !w.shouldReuseConnection() { 1470 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1471 c.closeWriteAndWait() 1472 } 1473 return 1474 } 1475 c.setState(c.rwc, StateIdle) 1476 } 1477 } 1478 1479 func (w *response) sendExpectationFailed() { 1480 // TODO(bradfitz): let ServeHTTP handlers handle 1481 // requests with non-standard expectation[s]? Seems 1482 // theoretical at best, and doesn't fit into the 1483 // current ServeHTTP model anyway. We'd need to 1484 // make the ResponseWriter an optional 1485 // "ExpectReplier" interface or something. 1486 // 1487 // For now we'll just obey RFC 2616 14.20 which says 1488 // "If a server receives a request containing an 1489 // Expect field that includes an expectation- 1490 // extension that it does not support, it MUST 1491 // respond with a 417 (Expectation Failed) status." 1492 w.Header().Set("Connection", "close") 1493 w.WriteHeader(StatusExpectationFailed) 1494 w.finishRequest() 1495 } 1496 1497 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1498 // and a Hijacker. 1499 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1500 if w.wroteHeader { 1501 w.cw.flush() 1502 } 1503 1504 c := w.conn 1505 c.mu.Lock() 1506 defer c.mu.Unlock() 1507 1508 if w.closeNotifyCh != nil { 1509 return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier in same ServeHTTP call") 1510 } 1511 1512 // Release the bufioWriter that writes to the chunk writer, it is not 1513 // used after a connection has been hijacked. 1514 rwc, buf, err = c.hijackLocked() 1515 if err == nil { 1516 putBufioWriter(w.w) 1517 w.w = nil 1518 } 1519 return rwc, buf, err 1520 } 1521 1522 func (w *response) CloseNotify() <-chan bool { 1523 c := w.conn 1524 c.mu.Lock() 1525 defer c.mu.Unlock() 1526 1527 if w.closeNotifyCh != nil { 1528 return w.closeNotifyCh 1529 } 1530 ch := make(chan bool, 1) 1531 w.closeNotifyCh = ch 1532 1533 if w.conn.hijackedv { 1534 // CloseNotify is undefined after a hijack, but we have 1535 // no place to return an error, so just return a channel, 1536 // even though it'll never receive a value. 1537 return ch 1538 } 1539 1540 var once sync.Once 1541 notify := func() { once.Do(func() { ch <- true }) } 1542 1543 if c.bufr.Buffered() > 0 { 1544 // A pipelined request or unread request body data is available 1545 // unread. Per the CloseNotifier docs, fire immediately. 1546 notify() 1547 } else { 1548 c.r.startBackgroundRead(notify) 1549 } 1550 return ch 1551 } 1552 1553 // The HandlerFunc type is an adapter to allow the use of 1554 // ordinary functions as HTTP handlers. If f is a function 1555 // with the appropriate signature, HandlerFunc(f) is a 1556 // Handler that calls f. 1557 type HandlerFunc func(ResponseWriter, *Request) 1558 1559 // ServeHTTP calls f(w, r). 1560 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1561 f(w, r) 1562 } 1563 1564 // Helper handlers 1565 1566 // Error replies to the request with the specified error message and HTTP code. 1567 // The error message should be plain text. 1568 func Error(w ResponseWriter, error string, code int) { 1569 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1570 w.Header().Set("X-Content-Type-Options", "nosniff") 1571 w.WriteHeader(code) 1572 fmt.Fprintln(w, error) 1573 } 1574 1575 // NotFound replies to the request with an HTTP 404 not found error. 1576 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1577 1578 // NotFoundHandler returns a simple request handler 1579 // that replies to each request with a ``404 page not found'' reply. 1580 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1581 1582 // StripPrefix returns a handler that serves HTTP requests 1583 // by removing the given prefix from the request URL's Path 1584 // and invoking the handler h. StripPrefix handles a 1585 // request for a path that doesn't begin with prefix by 1586 // replying with an HTTP 404 not found error. 1587 func StripPrefix(prefix string, h Handler) Handler { 1588 if prefix == "" { 1589 return h 1590 } 1591 return HandlerFunc(func(w ResponseWriter, r *Request) { 1592 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1593 r.URL.Path = p 1594 h.ServeHTTP(w, r) 1595 } else { 1596 NotFound(w, r) 1597 } 1598 }) 1599 } 1600 1601 // Redirect replies to the request with a redirect to url, 1602 // which may be a path relative to the request path. 1603 // 1604 // The provided code should be in the 3xx range and is usually 1605 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1606 func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { 1607 if u, err := url.Parse(urlStr); err == nil { 1608 // If url was relative, make absolute by 1609 // combining with request path. 1610 // The browser would probably do this for us, 1611 // but doing it ourselves is more reliable. 1612 1613 // NOTE(rsc): RFC 2616 says that the Location 1614 // line must be an absolute URI, like 1615 // "http://www.google.com/redirect/", 1616 // not a path like "/redirect/". 1617 // Unfortunately, we don't know what to 1618 // put in the host name section to get the 1619 // client to connect to us again, so we can't 1620 // know the right absolute URI to send back. 1621 // Because of this problem, no one pays attention 1622 // to the RFC; they all send back just a new path. 1623 // So do we. 1624 oldpath := r.URL.Path 1625 if oldpath == "" { // should not happen, but avoid a crash if it does 1626 oldpath = "/" 1627 } 1628 if u.Scheme == "" { 1629 // no leading http://server 1630 if urlStr == "" || urlStr[0] != '/' { 1631 // make relative path absolute 1632 olddir, _ := path.Split(oldpath) 1633 urlStr = olddir + urlStr 1634 } 1635 1636 var query string 1637 if i := strings.Index(urlStr, "?"); i != -1 { 1638 urlStr, query = urlStr[:i], urlStr[i:] 1639 } 1640 1641 // clean up but preserve trailing slash 1642 trailing := strings.HasSuffix(urlStr, "/") 1643 urlStr = path.Clean(urlStr) 1644 if trailing && !strings.HasSuffix(urlStr, "/") { 1645 urlStr += "/" 1646 } 1647 urlStr += query 1648 } 1649 } 1650 1651 w.Header().Set("Location", urlStr) 1652 w.WriteHeader(code) 1653 1654 // RFC2616 recommends that a short note "SHOULD" be included in the 1655 // response because older user agents may not understand 301/307. 1656 // Shouldn't send the response for POST or HEAD; that leaves GET. 1657 if r.Method == "GET" { 1658 note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" 1659 fmt.Fprintln(w, note) 1660 } 1661 } 1662 1663 var htmlReplacer = strings.NewReplacer( 1664 "&", "&", 1665 "<", "<", 1666 ">", ">", 1667 // """ is shorter than """. 1668 `"`, """, 1669 // "'" is shorter than "'" and apos was not in HTML until HTML5. 1670 "'", "'", 1671 ) 1672 1673 func htmlEscape(s string) string { 1674 return htmlReplacer.Replace(s) 1675 } 1676 1677 // Redirect to a fixed URL 1678 type redirectHandler struct { 1679 url string 1680 code int 1681 } 1682 1683 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 1684 Redirect(w, r, rh.url, rh.code) 1685 } 1686 1687 // RedirectHandler returns a request handler that redirects 1688 // each request it receives to the given url using the given 1689 // status code. 1690 // 1691 // The provided code should be in the 3xx range and is usually 1692 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1693 func RedirectHandler(url string, code int) Handler { 1694 return &redirectHandler{url, code} 1695 } 1696 1697 // ServeMux is an HTTP request multiplexer. 1698 // It matches the URL of each incoming request against a list of registered 1699 // patterns and calls the handler for the pattern that 1700 // most closely matches the URL. 1701 // 1702 // Patterns name fixed, rooted paths, like "/favicon.ico", 1703 // or rooted subtrees, like "/images/" (note the trailing slash). 1704 // Longer patterns take precedence over shorter ones, so that 1705 // if there are handlers registered for both "/images/" 1706 // and "/images/thumbnails/", the latter handler will be 1707 // called for paths beginning "/images/thumbnails/" and the 1708 // former will receive requests for any other paths in the 1709 // "/images/" subtree. 1710 // 1711 // Note that since a pattern ending in a slash names a rooted subtree, 1712 // the pattern "/" matches all paths not matched by other registered 1713 // patterns, not just the URL with Path == "/". 1714 // 1715 // If a subtree has been registered and a request is received naming the 1716 // subtree root without its trailing slash, ServeMux redirects that 1717 // request to the subtree root (adding the trailing slash). This behavior can 1718 // be overridden with a separate registration for the path without 1719 // the trailing slash. For example, registering "/images/" causes ServeMux 1720 // to redirect a request for "/images" to "/images/", unless "/images" has 1721 // been registered separately. 1722 // 1723 // Patterns may optionally begin with a host name, restricting matches to 1724 // URLs on that host only. Host-specific patterns take precedence over 1725 // general patterns, so that a handler might register for the two patterns 1726 // "/codesearch" and "codesearch.google.com/" without also taking over 1727 // requests for "http://www.google.com/". 1728 // 1729 // ServeMux also takes care of sanitizing the URL request path, 1730 // redirecting any request containing . or .. elements or repeated slashes 1731 // to an equivalent, cleaner URL. 1732 type ServeMux struct { 1733 mu sync.RWMutex 1734 m map[string]muxEntry 1735 hosts bool // whether any patterns contain hostnames 1736 } 1737 1738 type muxEntry struct { 1739 explicit bool 1740 h Handler 1741 pattern string 1742 } 1743 1744 // NewServeMux allocates and returns a new ServeMux. 1745 func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } 1746 1747 // DefaultServeMux is the default ServeMux used by Serve. 1748 var DefaultServeMux = NewServeMux() 1749 1750 // Does path match pattern? 1751 func pathMatch(pattern, path string) bool { 1752 if len(pattern) == 0 { 1753 // should not happen 1754 return false 1755 } 1756 n := len(pattern) 1757 if pattern[n-1] != '/' { 1758 return pattern == path 1759 } 1760 return len(path) >= n && path[0:n] == pattern 1761 } 1762 1763 // Return the canonical path for p, eliminating . and .. elements. 1764 func cleanPath(p string) string { 1765 if p == "" { 1766 return "/" 1767 } 1768 if p[0] != '/' { 1769 p = "/" + p 1770 } 1771 np := path.Clean(p) 1772 // path.Clean removes trailing slash except for root; 1773 // put the trailing slash back if necessary. 1774 if p[len(p)-1] == '/' && np != "/" { 1775 np += "/" 1776 } 1777 return np 1778 } 1779 1780 // Find a handler on a handler map given a path string 1781 // Most-specific (longest) pattern wins 1782 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 1783 var n = 0 1784 for k, v := range mux.m { 1785 if !pathMatch(k, path) { 1786 continue 1787 } 1788 if h == nil || len(k) > n { 1789 n = len(k) 1790 h = v.h 1791 pattern = v.pattern 1792 } 1793 } 1794 return 1795 } 1796 1797 // Handler returns the handler to use for the given request, 1798 // consulting r.Method, r.Host, and r.URL.Path. It always returns 1799 // a non-nil handler. If the path is not in its canonical form, the 1800 // handler will be an internally-generated handler that redirects 1801 // to the canonical path. 1802 // 1803 // Handler also returns the registered pattern that matches the 1804 // request or, in the case of internally-generated redirects, 1805 // the pattern that will match after following the redirect. 1806 // 1807 // If there is no registered handler that applies to the request, 1808 // Handler returns a ``page not found'' handler and an empty pattern. 1809 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 1810 if r.Method != "CONNECT" { 1811 if p := cleanPath(r.URL.Path); p != r.URL.Path { 1812 _, pattern = mux.handler(r.Host, p) 1813 url := *r.URL 1814 url.Path = p 1815 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 1816 } 1817 } 1818 1819 return mux.handler(r.Host, r.URL.Path) 1820 } 1821 1822 // handler is the main implementation of Handler. 1823 // The path is known to be in canonical form, except for CONNECT methods. 1824 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 1825 mux.mu.RLock() 1826 defer mux.mu.RUnlock() 1827 1828 // Host-specific pattern takes precedence over generic ones 1829 if mux.hosts { 1830 h, pattern = mux.match(host + path) 1831 } 1832 if h == nil { 1833 h, pattern = mux.match(path) 1834 } 1835 if h == nil { 1836 h, pattern = NotFoundHandler(), "" 1837 } 1838 return 1839 } 1840 1841 // ServeHTTP dispatches the request to the handler whose 1842 // pattern most closely matches the request URL. 1843 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 1844 if r.RequestURI == "*" { 1845 if r.ProtoAtLeast(1, 1) { 1846 w.Header().Set("Connection", "close") 1847 } 1848 w.WriteHeader(StatusBadRequest) 1849 return 1850 } 1851 h, _ := mux.Handler(r) 1852 h.ServeHTTP(w, r) 1853 } 1854 1855 // Handle registers the handler for the given pattern. 1856 // If a handler already exists for pattern, Handle panics. 1857 func (mux *ServeMux) Handle(pattern string, handler Handler) { 1858 mux.mu.Lock() 1859 defer mux.mu.Unlock() 1860 1861 if pattern == "" { 1862 panic("http: invalid pattern " + pattern) 1863 } 1864 if handler == nil { 1865 panic("http: nil handler") 1866 } 1867 if mux.m[pattern].explicit { 1868 panic("http: multiple registrations for " + pattern) 1869 } 1870 1871 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 1872 1873 if pattern[0] != '/' { 1874 mux.hosts = true 1875 } 1876 1877 // Helpful behavior: 1878 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 1879 // It can be overridden by an explicit registration. 1880 n := len(pattern) 1881 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 1882 // If pattern contains a host name, strip it and use remaining 1883 // path for redirect. 1884 path := pattern 1885 if pattern[0] != '/' { 1886 // In pattern, at least the last character is a '/', so 1887 // strings.Index can't be -1. 1888 path = pattern[strings.Index(pattern, "/"):] 1889 } 1890 url := &url.URL{Path: path} 1891 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 1892 } 1893 } 1894 1895 // HandleFunc registers the handler function for the given pattern. 1896 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 1897 mux.Handle(pattern, HandlerFunc(handler)) 1898 } 1899 1900 // Handle registers the handler for the given pattern 1901 // in the DefaultServeMux. 1902 // The documentation for ServeMux explains how patterns are matched. 1903 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 1904 1905 // HandleFunc registers the handler function for the given pattern 1906 // in the DefaultServeMux. 1907 // The documentation for ServeMux explains how patterns are matched. 1908 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 1909 DefaultServeMux.HandleFunc(pattern, handler) 1910 } 1911 1912 // Serve accepts incoming HTTP connections on the listener l, 1913 // creating a new service goroutine for each. The service goroutines 1914 // read requests and then call handler to reply to them. 1915 // Handler is typically nil, in which case the DefaultServeMux is used. 1916 func Serve(l net.Listener, handler Handler) error { 1917 srv := &Server{Handler: handler} 1918 return srv.Serve(l) 1919 } 1920 1921 // A Server defines parameters for running an HTTP server. 1922 // The zero value for Server is a valid configuration. 1923 type Server struct { 1924 Addr string // TCP address to listen on, ":http" if empty 1925 Handler Handler // handler to invoke, http.DefaultServeMux if nil 1926 ReadTimeout time.Duration // maximum duration before timing out read of the request 1927 WriteTimeout time.Duration // maximum duration before timing out write of the response 1928 MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 1929 TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS 1930 1931 // TLSNextProto optionally specifies a function to take over 1932 // ownership of the provided TLS connection when an NPN 1933 // protocol upgrade has occurred. The map key is the protocol 1934 // name negotiated. The Handler argument should be used to 1935 // handle HTTP requests and will initialize the Request's TLS 1936 // and RemoteAddr if not already set. The connection is 1937 // automatically closed when the function returns. 1938 // If TLSNextProto is nil, HTTP/2 support is enabled automatically. 1939 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 1940 1941 // ConnState specifies an optional callback function that is 1942 // called when a client connection changes state. See the 1943 // ConnState type and associated constants for details. 1944 ConnState func(net.Conn, ConnState) 1945 1946 // ErrorLog specifies an optional logger for errors accepting 1947 // connections and unexpected behavior from handlers. 1948 // If nil, logging goes to os.Stderr via the log package's 1949 // standard logger. 1950 ErrorLog *log.Logger 1951 1952 disableKeepAlives int32 // accessed atomically. 1953 nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve 1954 nextProtoErr error 1955 } 1956 1957 // A ConnState represents the state of a client connection to a server. 1958 // It's used by the optional Server.ConnState hook. 1959 type ConnState int 1960 1961 const ( 1962 // StateNew represents a new connection that is expected to 1963 // send a request immediately. Connections begin at this 1964 // state and then transition to either StateActive or 1965 // StateClosed. 1966 StateNew ConnState = iota 1967 1968 // StateActive represents a connection that has read 1 or more 1969 // bytes of a request. The Server.ConnState hook for 1970 // StateActive fires before the request has entered a handler 1971 // and doesn't fire again until the request has been 1972 // handled. After the request is handled, the state 1973 // transitions to StateClosed, StateHijacked, or StateIdle. 1974 StateActive 1975 1976 // StateIdle represents a connection that has finished 1977 // handling a request and is in the keep-alive state, waiting 1978 // for a new request. Connections transition from StateIdle 1979 // to either StateActive or StateClosed. 1980 StateIdle 1981 1982 // StateHijacked represents a hijacked connection. 1983 // This is a terminal state. It does not transition to StateClosed. 1984 StateHijacked 1985 1986 // StateClosed represents a closed connection. 1987 // This is a terminal state. Hijacked connections do not 1988 // transition to StateClosed. 1989 StateClosed 1990 ) 1991 1992 var stateName = map[ConnState]string{ 1993 StateNew: "new", 1994 StateActive: "active", 1995 StateIdle: "idle", 1996 StateHijacked: "hijacked", 1997 StateClosed: "closed", 1998 } 1999 2000 func (c ConnState) String() string { 2001 return stateName[c] 2002 } 2003 2004 // serverHandler delegates to either the server's Handler or 2005 // DefaultServeMux and also handles "OPTIONS *" requests. 2006 type serverHandler struct { 2007 srv *Server 2008 } 2009 2010 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2011 handler := sh.srv.Handler 2012 if handler == nil { 2013 handler = DefaultServeMux 2014 } 2015 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2016 handler = globalOptionsHandler{} 2017 } 2018 handler.ServeHTTP(rw, req) 2019 } 2020 2021 // ListenAndServe listens on the TCP network address srv.Addr and then 2022 // calls Serve to handle requests on incoming connections. 2023 // Accepted connections are configured to enable TCP keep-alives. 2024 // If srv.Addr is blank, ":http" is used. 2025 // ListenAndServe always returns a non-nil error. 2026 func (srv *Server) ListenAndServe() error { 2027 addr := srv.Addr 2028 if addr == "" { 2029 addr = ":http" 2030 } 2031 ln, err := net.Listen("tcp", addr) 2032 if err != nil { 2033 return err 2034 } 2035 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2036 } 2037 2038 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2039 2040 // Serve accepts incoming connections on the Listener l, creating a 2041 // new service goroutine for each. The service goroutines read requests and 2042 // then call srv.Handler to reply to them. 2043 // Serve always returns a non-nil error. 2044 func (srv *Server) Serve(l net.Listener) error { 2045 defer l.Close() 2046 if fn := testHookServerServe; fn != nil { 2047 fn(srv, l) 2048 } 2049 var tempDelay time.Duration // how long to sleep on accept failure 2050 if err := srv.setupHTTP2(); err != nil { 2051 return err 2052 } 2053 for { 2054 rw, e := l.Accept() 2055 if e != nil { 2056 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2057 if tempDelay == 0 { 2058 tempDelay = 5 * time.Millisecond 2059 } else { 2060 tempDelay *= 2 2061 } 2062 if max := 1 * time.Second; tempDelay > max { 2063 tempDelay = max 2064 } 2065 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2066 time.Sleep(tempDelay) 2067 continue 2068 } 2069 return e 2070 } 2071 tempDelay = 0 2072 c := srv.newConn(rw) 2073 c.setState(c.rwc, StateNew) // before Serve can return 2074 go c.serve() 2075 } 2076 } 2077 2078 func (s *Server) doKeepAlives() bool { 2079 return atomic.LoadInt32(&s.disableKeepAlives) == 0 2080 } 2081 2082 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2083 // By default, keep-alives are always enabled. Only very 2084 // resource-constrained environments or servers in the process of 2085 // shutting down should disable them. 2086 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2087 if v { 2088 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2089 } else { 2090 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2091 } 2092 } 2093 2094 func (s *Server) logf(format string, args ...interface{}) { 2095 if s.ErrorLog != nil { 2096 s.ErrorLog.Printf(format, args...) 2097 } else { 2098 log.Printf(format, args...) 2099 } 2100 } 2101 2102 // ListenAndServe listens on the TCP network address addr 2103 // and then calls Serve with handler to handle requests 2104 // on incoming connections. 2105 // Accepted connections are configured to enable TCP keep-alives. 2106 // Handler is typically nil, in which case the DefaultServeMux is 2107 // used. 2108 // 2109 // A trivial example server is: 2110 // 2111 // package main 2112 // 2113 // import ( 2114 // "io" 2115 // "net/http" 2116 // "log" 2117 // ) 2118 // 2119 // // hello world, the web server 2120 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2121 // io.WriteString(w, "hello, world!\n") 2122 // } 2123 // 2124 // func main() { 2125 // http.HandleFunc("/hello", HelloServer) 2126 // log.Fatal(http.ListenAndServe(":12345", nil)) 2127 // } 2128 // 2129 // ListenAndServe always returns a non-nil error. 2130 func ListenAndServe(addr string, handler Handler) error { 2131 server := &Server{Addr: addr, Handler: handler} 2132 return server.ListenAndServe() 2133 } 2134 2135 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2136 // expects HTTPS connections. Additionally, files containing a certificate and 2137 // matching private key for the server must be provided. If the certificate 2138 // is signed by a certificate authority, the certFile should be the concatenation 2139 // of the server's certificate, any intermediates, and the CA's certificate. 2140 // 2141 // A trivial example server is: 2142 // 2143 // import ( 2144 // "log" 2145 // "net/http" 2146 // ) 2147 // 2148 // func handler(w http.ResponseWriter, req *http.Request) { 2149 // w.Header().Set("Content-Type", "text/plain") 2150 // w.Write([]byte("This is an example server.\n")) 2151 // } 2152 // 2153 // func main() { 2154 // http.HandleFunc("/", handler) 2155 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2156 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2157 // log.Fatal(err) 2158 // } 2159 // 2160 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2161 // 2162 // ListenAndServeTLS always returns a non-nil error. 2163 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2164 server := &Server{Addr: addr, Handler: handler} 2165 return server.ListenAndServeTLS(certFile, keyFile) 2166 } 2167 2168 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2169 // then calls Serve to handle requests on incoming TLS connections. 2170 // Accepted connections are configured to enable TCP keep-alives. 2171 // 2172 // Filenames containing a certificate and matching private key for the 2173 // server must be provided if the Server's TLSConfig.Certificates is 2174 // not populated. If the certificate is signed by a certificate 2175 // authority, the certFile should be the concatenation of the server's 2176 // certificate, any intermediates, and the CA's certificate. 2177 // 2178 // If srv.Addr is blank, ":https" is used. 2179 // 2180 // ListenAndServeTLS always returns a non-nil error. 2181 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2182 addr := srv.Addr 2183 if addr == "" { 2184 addr = ":https" 2185 } 2186 2187 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2188 // before we clone it and create the TLS Listener. 2189 if err := srv.setupHTTP2(); err != nil { 2190 return err 2191 } 2192 2193 config := cloneTLSConfig(srv.TLSConfig) 2194 if !strSliceContains(config.NextProtos, "http/1.1") { 2195 config.NextProtos = append(config.NextProtos, "http/1.1") 2196 } 2197 2198 if len(config.Certificates) == 0 || certFile != "" || keyFile != "" { 2199 var err error 2200 config.Certificates = make([]tls.Certificate, 1) 2201 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2202 if err != nil { 2203 return err 2204 } 2205 } 2206 2207 ln, err := net.Listen("tcp", addr) 2208 if err != nil { 2209 return err 2210 } 2211 2212 tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) 2213 return srv.Serve(tlsListener) 2214 } 2215 2216 func (srv *Server) setupHTTP2() error { 2217 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2218 return srv.nextProtoErr 2219 } 2220 2221 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2222 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2223 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2). 2224 func (srv *Server) onceSetNextProtoDefaults() { 2225 // Enable HTTP/2 by default if the user hasn't otherwise 2226 // configured their TLSNextProto map. 2227 if srv.TLSNextProto == nil { 2228 srv.nextProtoErr = http2ConfigureServer(srv, nil) 2229 } 2230 } 2231 2232 // TimeoutHandler returns a Handler that runs h with the given time limit. 2233 // 2234 // The new Handler calls h.ServeHTTP to handle each request, but if a 2235 // call runs for longer than its time limit, the handler responds with 2236 // a 503 Service Unavailable error and the given message in its body. 2237 // (If msg is empty, a suitable default message will be sent.) 2238 // After such a timeout, writes by h to its ResponseWriter will return 2239 // ErrHandlerTimeout. 2240 // 2241 // TimeoutHandler buffers all Handler writes to memory and does not 2242 // support the Hijacker or Flusher interfaces. 2243 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 2244 t := time.NewTimer(dt) 2245 return &timeoutHandler{ 2246 handler: h, 2247 body: msg, 2248 2249 // Effectively storing a *time.Timer, but decomposed 2250 // for testing: 2251 timeout: func() <-chan time.Time { return t.C }, 2252 cancelTimer: t.Stop, 2253 } 2254 } 2255 2256 // ErrHandlerTimeout is returned on ResponseWriter Write calls 2257 // in handlers which have timed out. 2258 var ErrHandlerTimeout = errors.New("http: Handler timeout") 2259 2260 type timeoutHandler struct { 2261 handler Handler 2262 body string 2263 2264 // timeout returns the channel of a *time.Timer and 2265 // cancelTimer cancels it. They're stored separately for 2266 // testing purposes. 2267 timeout func() <-chan time.Time // returns channel producing a timeout 2268 cancelTimer func() bool // optional 2269 } 2270 2271 func (h *timeoutHandler) errorBody() string { 2272 if h.body != "" { 2273 return h.body 2274 } 2275 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 2276 } 2277 2278 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 2279 done := make(chan struct{}) 2280 tw := &timeoutWriter{ 2281 w: w, 2282 h: make(Header), 2283 } 2284 go func() { 2285 h.handler.ServeHTTP(tw, r) 2286 close(done) 2287 }() 2288 select { 2289 case <-done: 2290 tw.mu.Lock() 2291 defer tw.mu.Unlock() 2292 dst := w.Header() 2293 for k, vv := range tw.h { 2294 dst[k] = vv 2295 } 2296 w.WriteHeader(tw.code) 2297 w.Write(tw.wbuf.Bytes()) 2298 if h.cancelTimer != nil { 2299 h.cancelTimer() 2300 } 2301 case <-h.timeout(): 2302 tw.mu.Lock() 2303 defer tw.mu.Unlock() 2304 w.WriteHeader(StatusServiceUnavailable) 2305 io.WriteString(w, h.errorBody()) 2306 tw.timedOut = true 2307 return 2308 } 2309 } 2310 2311 type timeoutWriter struct { 2312 w ResponseWriter 2313 h Header 2314 wbuf bytes.Buffer 2315 2316 mu sync.Mutex 2317 timedOut bool 2318 wroteHeader bool 2319 code int 2320 } 2321 2322 func (tw *timeoutWriter) Header() Header { return tw.h } 2323 2324 func (tw *timeoutWriter) Write(p []byte) (int, error) { 2325 tw.mu.Lock() 2326 defer tw.mu.Unlock() 2327 if tw.timedOut { 2328 return 0, ErrHandlerTimeout 2329 } 2330 if !tw.wroteHeader { 2331 tw.writeHeader(StatusOK) 2332 } 2333 return tw.wbuf.Write(p) 2334 } 2335 2336 func (tw *timeoutWriter) WriteHeader(code int) { 2337 tw.mu.Lock() 2338 defer tw.mu.Unlock() 2339 if tw.timedOut || tw.wroteHeader { 2340 return 2341 } 2342 tw.writeHeader(code) 2343 } 2344 2345 func (tw *timeoutWriter) writeHeader(code int) { 2346 tw.wroteHeader = true 2347 tw.code = code 2348 } 2349 2350 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 2351 // connections. It's used by ListenAndServe and ListenAndServeTLS so 2352 // dead TCP connections (e.g. closing laptop mid-download) eventually 2353 // go away. 2354 type tcpKeepAliveListener struct { 2355 *net.TCPListener 2356 } 2357 2358 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 2359 tc, err := ln.AcceptTCP() 2360 if err != nil { 2361 return 2362 } 2363 tc.SetKeepAlive(true) 2364 tc.SetKeepAlivePeriod(3 * time.Minute) 2365 return tc, nil 2366 } 2367 2368 // globalOptionsHandler responds to "OPTIONS *" requests. 2369 type globalOptionsHandler struct{} 2370 2371 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 2372 w.Header().Set("Content-Length", "0") 2373 if r.ContentLength != 0 { 2374 // Read up to 4KB of OPTIONS body (as mentioned in the 2375 // spec as being reserved for future use), but anything 2376 // over that is considered a waste of server resources 2377 // (or an attack) and we abort and close the connection, 2378 // courtesy of MaxBytesReader's EOF behavior. 2379 mb := MaxBytesReader(w, r.Body, 4<<10) 2380 io.Copy(ioutil.Discard, mb) 2381 } 2382 } 2383 2384 type eofReaderWithWriteTo struct{} 2385 2386 func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } 2387 func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } 2388 2389 // eofReader is a non-nil io.ReadCloser that always returns EOF. 2390 // It has a WriteTo method so io.Copy won't need a buffer. 2391 var eofReader = &struct { 2392 eofReaderWithWriteTo 2393 io.Closer 2394 }{ 2395 eofReaderWithWriteTo{}, 2396 ioutil.NopCloser(nil), 2397 } 2398 2399 // Verify that an io.Copy from an eofReader won't require a buffer. 2400 var _ io.WriterTo = eofReader 2401 2402 // initNPNRequest is an HTTP handler that initializes certain 2403 // uninitialized fields in its *Request. Such partially-initialized 2404 // Requests come from NPN protocol handlers. 2405 type initNPNRequest struct { 2406 c *tls.Conn 2407 h serverHandler 2408 } 2409 2410 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 2411 if req.TLS == nil { 2412 req.TLS = &tls.ConnectionState{} 2413 *req.TLS = h.c.ConnectionState() 2414 } 2415 if req.Body == nil { 2416 req.Body = eofReader 2417 } 2418 if req.RemoteAddr == "" { 2419 req.RemoteAddr = h.c.RemoteAddr().String() 2420 } 2421 h.h.ServeHTTP(rw, req) 2422 } 2423 2424 // loggingConn is used for debugging. 2425 type loggingConn struct { 2426 name string 2427 net.Conn 2428 } 2429 2430 var ( 2431 uniqNameMu sync.Mutex 2432 uniqNameNext = make(map[string]int) 2433 ) 2434 2435 func newLoggingConn(baseName string, c net.Conn) net.Conn { 2436 uniqNameMu.Lock() 2437 defer uniqNameMu.Unlock() 2438 uniqNameNext[baseName]++ 2439 return &loggingConn{ 2440 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 2441 Conn: c, 2442 } 2443 } 2444 2445 func (c *loggingConn) Write(p []byte) (n int, err error) { 2446 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 2447 n, err = c.Conn.Write(p) 2448 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 2449 return 2450 } 2451 2452 func (c *loggingConn) Read(p []byte) (n int, err error) { 2453 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 2454 n, err = c.Conn.Read(p) 2455 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 2456 return 2457 } 2458 2459 func (c *loggingConn) Close() (err error) { 2460 log.Printf("%s.Close() = ...", c.name) 2461 err = c.Conn.Close() 2462 log.Printf("%s.Close() = %v", c.name, err) 2463 return 2464 } 2465 2466 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 2467 // It only contains one field (and a pointer field at that), so it 2468 // fits in an interface value without an extra allocation. 2469 type checkConnErrorWriter struct { 2470 c *conn 2471 } 2472 2473 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 2474 n, err = w.c.rwc.Write(p) 2475 if err != nil && w.c.werr == nil { 2476 w.c.werr = err 2477 } 2478 return 2479 } 2480 2481 func numLeadingCRorLF(v []byte) (n int) { 2482 for _, b := range v { 2483 if b == '\r' || b == '\n' { 2484 n++ 2485 continue 2486 } 2487 break 2488 } 2489 return 2490 2491 } 2492 2493 func strSliceContains(ss []string, s string) bool { 2494 for _, v := range ss { 2495 if v == s { 2496 return true 2497 } 2498 } 2499 return false 2500 }