github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang.org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interfaced. 44 ErrHijacked = errors.New("http: connection has been hijacked") 45 46 // ErrContentLength is returned by ResponseWriter.Write calls 47 // when a Handler set a Content-Length response header with a 48 // declared size and then attempted to write more bytes than 49 // declared. 50 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 51 52 // Deprecated: ErrWriteAfterFlush is no longer used. 53 ErrWriteAfterFlush = errors.New("unused") 54 ) 55 56 // A Handler responds to an HTTP request. 57 // 58 // ServeHTTP should write reply headers and data to the ResponseWriter 59 // and then return. Returning signals that the request is finished; it 60 // is not valid to use the ResponseWriter or read from the 61 // Request.Body after or concurrently with the completion of the 62 // ServeHTTP call. 63 // 64 // Depending on the HTTP client software, HTTP protocol version, and 65 // any intermediaries between the client and the Go server, it may not 66 // be possible to read from the Request.Body after writing to the 67 // ResponseWriter. Cautious handlers should read the Request.Body 68 // first, and then reply. 69 // 70 // Except for reading the body, handlers should not modify the 71 // provided Request. 72 // 73 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 74 // that the effect of the panic was isolated to the active request. 75 // It recovers the panic, logs a stack trace to the server error log, 76 // and hangs up the connection. 77 type Handler interface { 78 ServeHTTP(ResponseWriter, *Request) 79 } 80 81 // A ResponseWriter interface is used by an HTTP handler to 82 // construct an HTTP response. 83 // 84 // A ResponseWriter may not be used after the Handler.ServeHTTP method 85 // has returned. 86 type ResponseWriter interface { 87 // Header returns the header map that will be sent by 88 // WriteHeader. Changing the header after a call to 89 // WriteHeader (or Write) has no effect unless the modified 90 // headers were declared as trailers by setting the 91 // "Trailer" header before the call to WriteHeader (see example). 92 // To suppress implicit response headers, set their value to nil. 93 Header() Header 94 95 // Write writes the data to the connection as part of an HTTP reply. 96 // 97 // If WriteHeader has not yet been called, Write calls 98 // WriteHeader(http.StatusOK) before writing the data. If the Header 99 // does not contain a Content-Type line, Write adds a Content-Type set 100 // to the result of passing the initial 512 bytes of written data to 101 // DetectContentType. 102 // 103 // Depending on the HTTP protocol version and the client, calling 104 // Write or WriteHeader may prevent future reads on the 105 // Request.Body. For HTTP/1.x requests, handlers should read any 106 // needed request body data before writing the response. Once the 107 // headers have been flushed (due to either an explicit Flusher.Flush 108 // call or writing enough data to trigger a flush), the request body 109 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 110 // handlers to continue to read the request body while concurrently 111 // writing the response. However, such behavior may not be supported 112 // by all HTTP/2 clients. Handlers should read before writing if 113 // possible to maximize compatibility. 114 Write([]byte) (int, error) 115 116 // WriteHeader sends an HTTP response header with status code. 117 // If WriteHeader is not called explicitly, the first call to Write 118 // will trigger an implicit WriteHeader(http.StatusOK). 119 // Thus explicit calls to WriteHeader are mainly used to 120 // send error codes. 121 WriteHeader(int) 122 } 123 124 // The Flusher interface is implemented by ResponseWriters that allow 125 // an HTTP handler to flush buffered data to the client. 126 // 127 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 128 // support Flusher, but ResponseWriter wrappers may not. Handlers 129 // should always test for this ability at runtime. 130 // 131 // Note that even for ResponseWriters that support Flush, 132 // if the client is connected through an HTTP proxy, 133 // the buffered data may not reach the client until the response 134 // completes. 135 type Flusher interface { 136 // Flush sends any buffered data to the client. 137 Flush() 138 } 139 140 // The Hijacker interface is implemented by ResponseWriters that allow 141 // an HTTP handler to take over the connection. 142 // 143 // The default ResponseWriter for HTTP/1.x connections supports 144 // Hijacker, but HTTP/2 connections intentionally do not. 145 // ResponseWriter wrappers may also not support Hijacker. Handlers 146 // should always test for this ability at runtime. 147 type Hijacker interface { 148 // Hijack lets the caller take over the connection. 149 // After a call to Hijack(), the HTTP server library 150 // will not do anything else with the connection. 151 // 152 // It becomes the caller's responsibility to manage 153 // and close the connection. 154 // 155 // The returned net.Conn may have read or write deadlines 156 // already set, depending on the configuration of the 157 // Server. It is the caller's responsibility to set 158 // or clear those deadlines as needed. 159 Hijack() (net.Conn, *bufio.ReadWriter, error) 160 } 161 162 // The CloseNotifier interface is implemented by ResponseWriters which 163 // allow detecting when the underlying connection has gone away. 164 // 165 // This mechanism can be used to cancel long operations on the server 166 // if the client has disconnected before the response is ready. 167 type CloseNotifier interface { 168 // CloseNotify returns a channel that receives at most a 169 // single value (true) when the client connection has gone 170 // away. 171 // 172 // CloseNotify may wait to notify until Request.Body has been 173 // fully read. 174 // 175 // After the Handler has returned, there is no guarantee 176 // that the channel receives a value. 177 // 178 // If the protocol is HTTP/1.1 and CloseNotify is called while 179 // processing an idempotent request (such a GET) while 180 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 181 // pipelined request may cause a value to be sent on the 182 // returned channel. In practice HTTP/1.1 pipelining is not 183 // enabled in browsers and not seen often in the wild. If this 184 // is a problem, use HTTP/2 or only use CloseNotify on methods 185 // such as POST. 186 CloseNotify() <-chan bool 187 } 188 189 var ( 190 // ServerContextKey is a context key. It can be used in HTTP 191 // handlers with context.WithValue to access the server that 192 // started the handler. The associated value will be of 193 // type *Server. 194 ServerContextKey = &contextKey{"http-server"} 195 196 // LocalAddrContextKey is a context key. It can be used in 197 // HTTP handlers with context.WithValue to access the address 198 // the local address the connection arrived on. 199 // The associated value will be of type net.Addr. 200 LocalAddrContextKey = &contextKey{"local-addr"} 201 ) 202 203 // A conn represents the server side of an HTTP connection. 204 type conn struct { 205 // server is the server on which the connection arrived. 206 // Immutable; never nil. 207 server *Server 208 209 // rwc is the underlying network connection. 210 // This is never wrapped by other types and is the value given out 211 // to CloseNotifier callers. It is usually of type *net.TCPConn or 212 // *tls.Conn. 213 rwc net.Conn 214 215 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 216 // inside the Listener's Accept goroutine, as some implementations block. 217 // It is populated immediately inside the (*conn).serve goroutine. 218 // This is the value of a Handler's (*Request).RemoteAddr. 219 remoteAddr string 220 221 // tlsState is the TLS connection state when using TLS. 222 // nil means not TLS. 223 tlsState *tls.ConnectionState 224 225 // werr is set to the first write error to rwc. 226 // It is set via checkConnErrorWriter{w}, where bufw writes. 227 werr error 228 229 // r is bufr's read source. It's a wrapper around rwc that provides 230 // io.LimitedReader-style limiting (while reading request headers) 231 // and functionality to support CloseNotifier. See *connReader docs. 232 r *connReader 233 234 // bufr reads from r. 235 // Users of bufr must hold mu. 236 bufr *bufio.Reader 237 238 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 239 bufw *bufio.Writer 240 241 // lastMethod is the method of the most recent request 242 // on this connection, if any. 243 lastMethod string 244 245 // mu guards hijackedv, use of bufr, (*response).closeNotifyCh. 246 mu sync.Mutex 247 248 // hijackedv is whether this connection has been hijacked 249 // by a Handler with the Hijacker interface. 250 // It is guarded by mu. 251 hijackedv bool 252 } 253 254 func (c *conn) hijacked() bool { 255 c.mu.Lock() 256 defer c.mu.Unlock() 257 return c.hijackedv 258 } 259 260 // c.mu must be held. 261 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 262 if c.hijackedv { 263 return nil, nil, ErrHijacked 264 } 265 c.hijackedv = true 266 rwc = c.rwc 267 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 268 c.setState(rwc, StateHijacked) 269 return 270 } 271 272 // This should be >= 512 bytes for DetectContentType, 273 // but otherwise it's somewhat arbitrary. 274 const bufferBeforeChunkingSize = 2048 275 276 // chunkWriter writes to a response's conn buffer, and is the writer 277 // wrapped by the response.bufw buffered writer. 278 // 279 // chunkWriter also is responsible for finalizing the Header, including 280 // conditionally setting the Content-Type and setting a Content-Length 281 // in cases where the handler's final output is smaller than the buffer 282 // size. It also conditionally adds chunk headers, when in chunking mode. 283 // 284 // See the comment above (*response).Write for the entire write flow. 285 type chunkWriter struct { 286 res *response 287 288 // header is either nil or a deep clone of res.handlerHeader 289 // at the time of res.WriteHeader, if res.WriteHeader is 290 // called and extra buffering is being done to calculate 291 // Content-Type and/or Content-Length. 292 header Header 293 294 // wroteHeader tells whether the header's been written to "the 295 // wire" (or rather: w.conn.buf). this is unlike 296 // (*response).wroteHeader, which tells only whether it was 297 // logically written. 298 wroteHeader bool 299 300 // set by the writeHeader method: 301 chunking bool // using chunked transfer encoding for reply body 302 } 303 304 var ( 305 crlf = []byte("\r\n") 306 colonSpace = []byte(": ") 307 ) 308 309 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 310 if !cw.wroteHeader { 311 cw.writeHeader(p) 312 } 313 if cw.res.req.Method == "HEAD" { 314 // Eat writes. 315 return len(p), nil 316 } 317 if cw.chunking { 318 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 319 if err != nil { 320 cw.res.conn.rwc.Close() 321 return 322 } 323 } 324 n, err = cw.res.conn.bufw.Write(p) 325 if cw.chunking && err == nil { 326 _, err = cw.res.conn.bufw.Write(crlf) 327 } 328 if err != nil { 329 cw.res.conn.rwc.Close() 330 } 331 return 332 } 333 334 func (cw *chunkWriter) flush() { 335 if !cw.wroteHeader { 336 cw.writeHeader(nil) 337 } 338 cw.res.conn.bufw.Flush() 339 } 340 341 func (cw *chunkWriter) close() { 342 if !cw.wroteHeader { 343 cw.writeHeader(nil) 344 } 345 if cw.chunking { 346 bw := cw.res.conn.bufw // conn's bufio writer 347 // zero chunk to mark EOF 348 bw.WriteString("0\r\n") 349 if len(cw.res.trailers) > 0 { 350 trailers := make(Header) 351 for _, h := range cw.res.trailers { 352 if vv := cw.res.handlerHeader[h]; len(vv) > 0 { 353 trailers[h] = vv 354 } 355 } 356 trailers.Write(bw) // the writer handles noting errors 357 } 358 // final blank line after the trailers (whether 359 // present or not) 360 bw.WriteString("\r\n") 361 } 362 } 363 364 // A response represents the server side of an HTTP response. 365 type response struct { 366 conn *conn 367 req *Request // request for this response 368 reqBody io.ReadCloser 369 cancelCtx context.CancelFunc // when ServeHTTP exits 370 wroteHeader bool // reply header has been (logically) written 371 wroteContinue bool // 100 Continue response was written 372 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 373 wantsClose bool // HTTP request has Connection "close" 374 375 w *bufio.Writer // buffers output in chunks to chunkWriter 376 cw chunkWriter 377 378 // handlerHeader is the Header that Handlers get access to, 379 // which may be retained and mutated even after WriteHeader. 380 // handlerHeader is copied into cw.header at WriteHeader 381 // time, and privately mutated thereafter. 382 handlerHeader Header 383 calledHeader bool // handler accessed handlerHeader via Header 384 385 written int64 // number of bytes written in body 386 contentLength int64 // explicitly-declared Content-Length; or -1 387 status int // status code passed to WriteHeader 388 389 // close connection after this reply. set on request and 390 // updated after response from handler if there's a 391 // "Connection: keep-alive" response header and a 392 // Content-Length. 393 closeAfterReply bool 394 395 // requestBodyLimitHit is set by requestTooLarge when 396 // maxBytesReader hits its max size. It is checked in 397 // WriteHeader, to make sure we don't consume the 398 // remaining request body to try to advance to the next HTTP 399 // request. Instead, when this is set, we stop reading 400 // subsequent requests on this connection and stop reading 401 // input from it. 402 requestBodyLimitHit bool 403 404 // trailers are the headers to be sent after the handler 405 // finishes writing the body. This field is initialized from 406 // the Trailer response header when the response header is 407 // written. 408 trailers []string 409 410 handlerDone atomicBool // set true when the handler exits 411 412 // Buffers for Date and Content-Length 413 dateBuf [len(TimeFormat)]byte 414 clenBuf [10]byte 415 416 // closeNotifyCh is non-nil once CloseNotify is called. 417 // Guarded by conn.mu 418 closeNotifyCh <-chan bool 419 } 420 421 type atomicBool int32 422 423 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 424 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 425 426 // declareTrailer is called for each Trailer header when the 427 // response header is written. It notes that a header will need to be 428 // written in the trailers at the end of the response. 429 func (w *response) declareTrailer(k string) { 430 k = CanonicalHeaderKey(k) 431 switch k { 432 case "Transfer-Encoding", "Content-Length", "Trailer": 433 // Forbidden by RFC 2616 14.40. 434 return 435 } 436 w.trailers = append(w.trailers, k) 437 } 438 439 // requestTooLarge is called by maxBytesReader when too much input has 440 // been read from the client. 441 func (w *response) requestTooLarge() { 442 w.closeAfterReply = true 443 w.requestBodyLimitHit = true 444 if !w.wroteHeader { 445 w.Header().Set("Connection", "close") 446 } 447 } 448 449 // needsSniff reports whether a Content-Type still needs to be sniffed. 450 func (w *response) needsSniff() bool { 451 _, haveType := w.handlerHeader["Content-Type"] 452 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 453 } 454 455 // writerOnly hides an io.Writer value's optional ReadFrom method 456 // from io.Copy. 457 type writerOnly struct { 458 io.Writer 459 } 460 461 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 462 switch v := src.(type) { 463 case *os.File: 464 fi, err := v.Stat() 465 if err != nil { 466 return false, err 467 } 468 return fi.Mode().IsRegular(), nil 469 case *io.LimitedReader: 470 return srcIsRegularFile(v.R) 471 default: 472 return 473 } 474 } 475 476 // ReadFrom is here to optimize copying from an *os.File regular file 477 // to a *net.TCPConn with sendfile. 478 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 479 // Our underlying w.conn.rwc is usually a *TCPConn (with its 480 // own ReadFrom method). If not, or if our src isn't a regular 481 // file, just fall back to the normal copy method. 482 rf, ok := w.conn.rwc.(io.ReaderFrom) 483 regFile, err := srcIsRegularFile(src) 484 if err != nil { 485 return 0, err 486 } 487 if !ok || !regFile { 488 bufp := copyBufPool.Get().(*[]byte) 489 defer copyBufPool.Put(bufp) 490 return io.CopyBuffer(writerOnly{w}, src, *bufp) 491 } 492 493 // sendfile path: 494 495 if !w.wroteHeader { 496 w.WriteHeader(StatusOK) 497 } 498 499 if w.needsSniff() { 500 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 501 n += n0 502 if err != nil { 503 return n, err 504 } 505 } 506 507 w.w.Flush() // get rid of any previous writes 508 w.cw.flush() // make sure Header is written; flush data to rwc 509 510 // Now that cw has been flushed, its chunking field is guaranteed initialized. 511 if !w.cw.chunking && w.bodyAllowed() { 512 n0, err := rf.ReadFrom(src) 513 n += n0 514 w.written += n0 515 return n, err 516 } 517 518 n0, err := io.Copy(writerOnly{w}, src) 519 n += n0 520 return n, err 521 } 522 523 // debugServerConnections controls whether all server connections are wrapped 524 // with a verbose logging wrapper. 525 const debugServerConnections = false 526 527 // Create new connection from rwc. 528 func (srv *Server) newConn(rwc net.Conn) *conn { 529 c := &conn{ 530 server: srv, 531 rwc: rwc, 532 } 533 if debugServerConnections { 534 c.rwc = newLoggingConn("server", c.rwc) 535 } 536 return c 537 } 538 539 type readResult struct { 540 n int 541 err error 542 b byte // byte read, if n == 1 543 } 544 545 // connReader is the io.Reader wrapper used by *conn. It combines a 546 // selectively-activated io.LimitedReader (to bound request header 547 // read sizes) with support for selectively keeping an io.Reader.Read 548 // call blocked in a background goroutine to wait for activity and 549 // trigger a CloseNotifier channel. 550 type connReader struct { 551 r io.Reader 552 remain int64 // bytes remaining 553 554 // ch is non-nil if a background read is in progress. 555 // It is guarded by conn.mu. 556 ch chan readResult 557 } 558 559 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 560 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 561 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 562 563 func (cr *connReader) Read(p []byte) (n int, err error) { 564 if cr.hitReadLimit() { 565 return 0, io.EOF 566 } 567 if len(p) == 0 { 568 return 569 } 570 if int64(len(p)) > cr.remain { 571 p = p[:cr.remain] 572 } 573 574 // Is a background read (started by CloseNotifier) already in 575 // flight? If so, wait for it and use its result. 576 ch := cr.ch 577 if ch != nil { 578 cr.ch = nil 579 res := <-ch 580 if res.n == 1 { 581 p[0] = res.b 582 cr.remain -= 1 583 } 584 return res.n, res.err 585 } 586 n, err = cr.r.Read(p) 587 cr.remain -= int64(n) 588 return 589 } 590 591 func (cr *connReader) startBackgroundRead(onReadComplete func()) { 592 if cr.ch != nil { 593 // Background read already started. 594 return 595 } 596 cr.ch = make(chan readResult, 1) 597 go cr.closeNotifyAwaitActivityRead(cr.ch, onReadComplete) 598 } 599 600 func (cr *connReader) closeNotifyAwaitActivityRead(ch chan<- readResult, onReadComplete func()) { 601 var buf [1]byte 602 n, err := cr.r.Read(buf[:1]) 603 onReadComplete() 604 ch <- readResult{n, err, buf[0]} 605 } 606 607 var ( 608 bufioReaderPool sync.Pool 609 bufioWriter2kPool sync.Pool 610 bufioWriter4kPool sync.Pool 611 ) 612 613 var copyBufPool = sync.Pool{ 614 New: func() interface{} { 615 b := make([]byte, 32*1024) 616 return &b 617 }, 618 } 619 620 func bufioWriterPool(size int) *sync.Pool { 621 switch size { 622 case 2 << 10: 623 return &bufioWriter2kPool 624 case 4 << 10: 625 return &bufioWriter4kPool 626 } 627 return nil 628 } 629 630 func newBufioReader(r io.Reader) *bufio.Reader { 631 if v := bufioReaderPool.Get(); v != nil { 632 br := v.(*bufio.Reader) 633 br.Reset(r) 634 return br 635 } 636 // Note: if this reader size is every changed, update 637 // TestHandlerBodyClose's assumptions. 638 return bufio.NewReader(r) 639 } 640 641 func putBufioReader(br *bufio.Reader) { 642 br.Reset(nil) 643 bufioReaderPool.Put(br) 644 } 645 646 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 647 pool := bufioWriterPool(size) 648 if pool != nil { 649 if v := pool.Get(); v != nil { 650 bw := v.(*bufio.Writer) 651 bw.Reset(w) 652 return bw 653 } 654 } 655 return bufio.NewWriterSize(w, size) 656 } 657 658 func putBufioWriter(bw *bufio.Writer) { 659 bw.Reset(nil) 660 if pool := bufioWriterPool(bw.Available()); pool != nil { 661 pool.Put(bw) 662 } 663 } 664 665 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 666 // in an HTTP request. 667 // This can be overridden by setting Server.MaxHeaderBytes. 668 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 669 670 func (srv *Server) maxHeaderBytes() int { 671 if srv.MaxHeaderBytes > 0 { 672 return srv.MaxHeaderBytes 673 } 674 return DefaultMaxHeaderBytes 675 } 676 677 func (srv *Server) initialReadLimitSize() int64 { 678 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 679 } 680 681 // wrapper around io.ReaderCloser which on first read, sends an 682 // HTTP/1.1 100 Continue header 683 type expectContinueReader struct { 684 resp *response 685 readCloser io.ReadCloser 686 closed bool 687 sawEOF bool 688 } 689 690 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 691 if ecr.closed { 692 return 0, ErrBodyReadAfterClose 693 } 694 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 695 ecr.resp.wroteContinue = true 696 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 697 ecr.resp.conn.bufw.Flush() 698 } 699 n, err = ecr.readCloser.Read(p) 700 if err == io.EOF { 701 ecr.sawEOF = true 702 } 703 return 704 } 705 706 func (ecr *expectContinueReader) Close() error { 707 ecr.closed = true 708 return ecr.readCloser.Close() 709 } 710 711 // TimeFormat is the time format to use when generating times in HTTP 712 // headers. It is like time.RFC1123 but hard-codes GMT as the time 713 // zone. The time being formatted must be in UTC for Format to 714 // generate the correct format. 715 // 716 // For parsing this time format, see ParseTime. 717 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 718 719 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 720 func appendTime(b []byte, t time.Time) []byte { 721 const days = "SunMonTueWedThuFriSat" 722 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 723 724 t = t.UTC() 725 yy, mm, dd := t.Date() 726 hh, mn, ss := t.Clock() 727 day := days[3*t.Weekday():] 728 mon := months[3*(mm-1):] 729 730 return append(b, 731 day[0], day[1], day[2], ',', ' ', 732 byte('0'+dd/10), byte('0'+dd%10), ' ', 733 mon[0], mon[1], mon[2], ' ', 734 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 735 byte('0'+hh/10), byte('0'+hh%10), ':', 736 byte('0'+mn/10), byte('0'+mn%10), ':', 737 byte('0'+ss/10), byte('0'+ss%10), ' ', 738 'G', 'M', 'T') 739 } 740 741 var errTooLarge = errors.New("http: request too large") 742 743 // Read next request from connection. 744 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 745 if c.hijacked() { 746 return nil, ErrHijacked 747 } 748 749 if d := c.server.ReadTimeout; d != 0 { 750 c.rwc.SetReadDeadline(time.Now().Add(d)) 751 } 752 if d := c.server.WriteTimeout; d != 0 { 753 defer func() { 754 c.rwc.SetWriteDeadline(time.Now().Add(d)) 755 }() 756 } 757 758 c.r.setReadLimit(c.server.initialReadLimitSize()) 759 c.mu.Lock() // while using bufr 760 if c.lastMethod == "POST" { 761 // RFC 2616 section 4.1 tolerance for old buggy clients. 762 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 763 c.bufr.Discard(numLeadingCRorLF(peek)) 764 } 765 req, err := readRequest(c.bufr, keepHostHeader) 766 c.mu.Unlock() 767 if err != nil { 768 if c.r.hitReadLimit() { 769 return nil, errTooLarge 770 } 771 return nil, err 772 } 773 774 ctx, cancelCtx := context.WithCancel(ctx) 775 req.ctx = ctx 776 777 c.lastMethod = req.Method 778 c.r.setInfiniteReadLimit() 779 780 hosts, haveHost := req.Header["Host"] 781 isH2Upgrade := req.isH2Upgrade() 782 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade { 783 return nil, badRequestError("missing required Host header") 784 } 785 if len(hosts) > 1 { 786 return nil, badRequestError("too many Host headers") 787 } 788 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 789 return nil, badRequestError("malformed Host header") 790 } 791 for k, vv := range req.Header { 792 if !httplex.ValidHeaderFieldName(k) { 793 return nil, badRequestError("invalid header name") 794 } 795 for _, v := range vv { 796 if !httplex.ValidHeaderFieldValue(v) { 797 return nil, badRequestError("invalid header value") 798 } 799 } 800 } 801 delete(req.Header, "Host") 802 803 req.RemoteAddr = c.remoteAddr 804 req.TLS = c.tlsState 805 if body, ok := req.Body.(*body); ok { 806 body.doEarlyClose = true 807 } 808 809 w = &response{ 810 conn: c, 811 cancelCtx: cancelCtx, 812 req: req, 813 reqBody: req.Body, 814 handlerHeader: make(Header), 815 contentLength: -1, 816 817 // We populate these ahead of time so we're not 818 // reading from req.Header after their Handler starts 819 // and maybe mutates it (Issue 14940) 820 wants10KeepAlive: req.wantsHttp10KeepAlive(), 821 wantsClose: req.wantsClose(), 822 } 823 if isH2Upgrade { 824 w.closeAfterReply = true 825 } 826 w.cw.res = w 827 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 828 return w, nil 829 } 830 831 func (w *response) Header() Header { 832 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 833 // Accessing the header between logically writing it 834 // and physically writing it means we need to allocate 835 // a clone to snapshot the logically written state. 836 w.cw.header = w.handlerHeader.clone() 837 } 838 w.calledHeader = true 839 return w.handlerHeader 840 } 841 842 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 843 // consumed by a handler that the server will read from the client 844 // in order to keep a connection alive. If there are more bytes than 845 // this then the server to be paranoid instead sends a "Connection: 846 // close" response. 847 // 848 // This number is approximately what a typical machine's TCP buffer 849 // size is anyway. (if we have the bytes on the machine, we might as 850 // well read them) 851 const maxPostHandlerReadBytes = 256 << 10 852 853 func (w *response) WriteHeader(code int) { 854 if w.conn.hijacked() { 855 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 856 return 857 } 858 if w.wroteHeader { 859 w.conn.server.logf("http: multiple response.WriteHeader calls") 860 return 861 } 862 w.wroteHeader = true 863 w.status = code 864 865 if w.calledHeader && w.cw.header == nil { 866 w.cw.header = w.handlerHeader.clone() 867 } 868 869 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 870 v, err := strconv.ParseInt(cl, 10, 64) 871 if err == nil && v >= 0 { 872 w.contentLength = v 873 } else { 874 w.conn.server.logf("http: invalid Content-Length of %q", cl) 875 w.handlerHeader.Del("Content-Length") 876 } 877 } 878 } 879 880 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 881 // This type is used to avoid extra allocations from cloning and/or populating 882 // the response Header map and all its 1-element slices. 883 type extraHeader struct { 884 contentType string 885 connection string 886 transferEncoding string 887 date []byte // written if not nil 888 contentLength []byte // written if not nil 889 } 890 891 // Sorted the same as extraHeader.Write's loop. 892 var extraHeaderKeys = [][]byte{ 893 []byte("Content-Type"), 894 []byte("Connection"), 895 []byte("Transfer-Encoding"), 896 } 897 898 var ( 899 headerContentLength = []byte("Content-Length: ") 900 headerDate = []byte("Date: ") 901 ) 902 903 // Write writes the headers described in h to w. 904 // 905 // This method has a value receiver, despite the somewhat large size 906 // of h, because it prevents an allocation. The escape analysis isn't 907 // smart enough to realize this function doesn't mutate h. 908 func (h extraHeader) Write(w *bufio.Writer) { 909 if h.date != nil { 910 w.Write(headerDate) 911 w.Write(h.date) 912 w.Write(crlf) 913 } 914 if h.contentLength != nil { 915 w.Write(headerContentLength) 916 w.Write(h.contentLength) 917 w.Write(crlf) 918 } 919 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 920 if v != "" { 921 w.Write(extraHeaderKeys[i]) 922 w.Write(colonSpace) 923 w.WriteString(v) 924 w.Write(crlf) 925 } 926 } 927 } 928 929 // writeHeader finalizes the header sent to the client and writes it 930 // to cw.res.conn.bufw. 931 // 932 // p is not written by writeHeader, but is the first chunk of the body 933 // that will be written. It is sniffed for a Content-Type if none is 934 // set explicitly. It's also used to set the Content-Length, if the 935 // total body size was small and the handler has already finished 936 // running. 937 func (cw *chunkWriter) writeHeader(p []byte) { 938 if cw.wroteHeader { 939 return 940 } 941 cw.wroteHeader = true 942 943 w := cw.res 944 keepAlivesEnabled := w.conn.server.doKeepAlives() 945 isHEAD := w.req.Method == "HEAD" 946 947 // header is written out to w.conn.buf below. Depending on the 948 // state of the handler, we either own the map or not. If we 949 // don't own it, the exclude map is created lazily for 950 // WriteSubset to remove headers. The setHeader struct holds 951 // headers we need to add. 952 header := cw.header 953 owned := header != nil 954 if !owned { 955 header = w.handlerHeader 956 } 957 var excludeHeader map[string]bool 958 delHeader := func(key string) { 959 if owned { 960 header.Del(key) 961 return 962 } 963 if _, ok := header[key]; !ok { 964 return 965 } 966 if excludeHeader == nil { 967 excludeHeader = make(map[string]bool) 968 } 969 excludeHeader[key] = true 970 } 971 var setHeader extraHeader 972 973 trailers := false 974 for _, v := range cw.header["Trailer"] { 975 trailers = true 976 foreachHeaderElement(v, cw.res.declareTrailer) 977 } 978 979 te := header.get("Transfer-Encoding") 980 hasTE := te != "" 981 982 // If the handler is done but never sent a Content-Length 983 // response header and this is our first (and last) write, set 984 // it, even to zero. This helps HTTP/1.0 clients keep their 985 // "keep-alive" connections alive. 986 // Exceptions: 304/204/1xx responses never get Content-Length, and if 987 // it was a HEAD request, we don't know the difference between 988 // 0 actual bytes and 0 bytes because the handler noticed it 989 // was a HEAD request and chose not to write anything. So for 990 // HEAD, the handler should either write the Content-Length or 991 // write non-zero bytes. If it's actually 0 bytes and the 992 // handler never looked at the Request.Method, we just don't 993 // send a Content-Length header. 994 // Further, we don't send an automatic Content-Length if they 995 // set a Transfer-Encoding, because they're generally incompatible. 996 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 997 w.contentLength = int64(len(p)) 998 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 999 } 1000 1001 // If this was an HTTP/1.0 request with keep-alive and we sent a 1002 // Content-Length back, we can make this a keep-alive response ... 1003 if w.wants10KeepAlive && keepAlivesEnabled { 1004 sentLength := header.get("Content-Length") != "" 1005 if sentLength && header.get("Connection") == "keep-alive" { 1006 w.closeAfterReply = false 1007 } 1008 } 1009 1010 // Check for a explicit (and valid) Content-Length header. 1011 hasCL := w.contentLength != -1 1012 1013 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1014 _, connectionHeaderSet := header["Connection"] 1015 if !connectionHeaderSet { 1016 setHeader.connection = "keep-alive" 1017 } 1018 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1019 w.closeAfterReply = true 1020 } 1021 1022 if header.get("Connection") == "close" || !keepAlivesEnabled { 1023 w.closeAfterReply = true 1024 } 1025 1026 // If the client wanted a 100-continue but we never sent it to 1027 // them (or, more strictly: we never finished reading their 1028 // request body), don't reuse this connection because it's now 1029 // in an unknown state: we might be sending this response at 1030 // the same time the client is now sending its request body 1031 // after a timeout. (Some HTTP clients send Expect: 1032 // 100-continue but knowing that some servers don't support 1033 // it, the clients set a timer and send the body later anyway) 1034 // If we haven't seen EOF, we can't skip over the unread body 1035 // because we don't know if the next bytes on the wire will be 1036 // the body-following-the-timer or the subsequent request. 1037 // See Issue 11549. 1038 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1039 w.closeAfterReply = true 1040 } 1041 1042 // Per RFC 2616, we should consume the request body before 1043 // replying, if the handler hasn't already done so. But we 1044 // don't want to do an unbounded amount of reading here for 1045 // DoS reasons, so we only try up to a threshold. 1046 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1047 // about HTTP/1.x Handlers concurrently reading and writing, like 1048 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1049 if w.req.ContentLength != 0 && !w.closeAfterReply { 1050 var discard, tooBig bool 1051 1052 switch bdy := w.req.Body.(type) { 1053 case *expectContinueReader: 1054 if bdy.resp.wroteContinue { 1055 discard = true 1056 } 1057 case *body: 1058 bdy.mu.Lock() 1059 switch { 1060 case bdy.closed: 1061 if !bdy.sawEOF { 1062 // Body was closed in handler with non-EOF error. 1063 w.closeAfterReply = true 1064 } 1065 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1066 tooBig = true 1067 default: 1068 discard = true 1069 } 1070 bdy.mu.Unlock() 1071 default: 1072 discard = true 1073 } 1074 1075 if discard { 1076 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1077 switch err { 1078 case nil: 1079 // There must be even more data left over. 1080 tooBig = true 1081 case ErrBodyReadAfterClose: 1082 // Body was already consumed and closed. 1083 case io.EOF: 1084 // The remaining body was just consumed, close it. 1085 err = w.reqBody.Close() 1086 if err != nil { 1087 w.closeAfterReply = true 1088 } 1089 default: 1090 // Some other kind of error occurred, like a read timeout, or 1091 // corrupt chunked encoding. In any case, whatever remains 1092 // on the wire must not be parsed as another HTTP request. 1093 w.closeAfterReply = true 1094 } 1095 } 1096 1097 if tooBig { 1098 w.requestTooLarge() 1099 delHeader("Connection") 1100 setHeader.connection = "close" 1101 } 1102 } 1103 1104 code := w.status 1105 if bodyAllowedForStatus(code) { 1106 // If no content type, apply sniffing algorithm to body. 1107 _, haveType := header["Content-Type"] 1108 if !haveType && !hasTE { 1109 setHeader.contentType = DetectContentType(p) 1110 } 1111 } else { 1112 for _, k := range suppressedHeaders(code) { 1113 delHeader(k) 1114 } 1115 } 1116 1117 if _, ok := header["Date"]; !ok { 1118 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1119 } 1120 1121 if hasCL && hasTE && te != "identity" { 1122 // TODO: return an error if WriteHeader gets a return parameter 1123 // For now just ignore the Content-Length. 1124 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1125 te, w.contentLength) 1126 delHeader("Content-Length") 1127 hasCL = false 1128 } 1129 1130 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1131 // do nothing 1132 } else if code == StatusNoContent { 1133 delHeader("Transfer-Encoding") 1134 } else if hasCL { 1135 delHeader("Transfer-Encoding") 1136 } else if w.req.ProtoAtLeast(1, 1) { 1137 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1138 // content-length has been provided. The connection must be closed after the 1139 // reply is written, and no chunking is to be done. This is the setup 1140 // recommended in the Server-Sent Events candidate recommendation 11, 1141 // section 8. 1142 if hasTE && te == "identity" { 1143 cw.chunking = false 1144 w.closeAfterReply = true 1145 } else { 1146 // HTTP/1.1 or greater: use chunked transfer encoding 1147 // to avoid closing the connection at EOF. 1148 cw.chunking = true 1149 setHeader.transferEncoding = "chunked" 1150 delHeader("Transfer-Encoding") 1151 } 1152 } else { 1153 // HTTP version < 1.1: cannot do chunked transfer 1154 // encoding and we don't know the Content-Length so 1155 // signal EOF by closing connection. 1156 w.closeAfterReply = true 1157 delHeader("Transfer-Encoding") // in case already set 1158 } 1159 1160 // Cannot use Content-Length with non-identity Transfer-Encoding. 1161 if cw.chunking { 1162 delHeader("Content-Length") 1163 } 1164 if !w.req.ProtoAtLeast(1, 0) { 1165 return 1166 } 1167 1168 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1169 delHeader("Connection") 1170 if w.req.ProtoAtLeast(1, 1) { 1171 setHeader.connection = "close" 1172 } 1173 } 1174 1175 w.conn.bufw.WriteString(statusLine(w.req, code)) 1176 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1177 setHeader.Write(w.conn.bufw) 1178 w.conn.bufw.Write(crlf) 1179 } 1180 1181 // foreachHeaderElement splits v according to the "#rule" construction 1182 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1183 func foreachHeaderElement(v string, fn func(string)) { 1184 v = textproto.TrimString(v) 1185 if v == "" { 1186 return 1187 } 1188 if !strings.Contains(v, ",") { 1189 fn(v) 1190 return 1191 } 1192 for _, f := range strings.Split(v, ",") { 1193 if f = textproto.TrimString(f); f != "" { 1194 fn(f) 1195 } 1196 } 1197 } 1198 1199 // statusLines is a cache of Status-Line strings, keyed by code (for 1200 // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a 1201 // map keyed by struct of two fields. This map's max size is bounded 1202 // by 2*len(statusText), two protocol types for each known official 1203 // status code in the statusText map. 1204 var ( 1205 statusMu sync.RWMutex 1206 statusLines = make(map[int]string) 1207 ) 1208 1209 // statusLine returns a response Status-Line (RFC 2616 Section 6.1) 1210 // for the given request and response status code. 1211 func statusLine(req *Request, code int) string { 1212 // Fast path: 1213 key := code 1214 proto11 := req.ProtoAtLeast(1, 1) 1215 if !proto11 { 1216 key = -key 1217 } 1218 statusMu.RLock() 1219 line, ok := statusLines[key] 1220 statusMu.RUnlock() 1221 if ok { 1222 return line 1223 } 1224 1225 // Slow path: 1226 proto := "HTTP/1.0" 1227 if proto11 { 1228 proto = "HTTP/1.1" 1229 } 1230 codestring := fmt.Sprintf("%03d", code) 1231 text, ok := statusText[code] 1232 if !ok { 1233 text = "status code " + codestring 1234 } 1235 line = proto + " " + codestring + " " + text + "\r\n" 1236 if ok { 1237 statusMu.Lock() 1238 defer statusMu.Unlock() 1239 statusLines[key] = line 1240 } 1241 return line 1242 } 1243 1244 // bodyAllowed reports whether a Write is allowed for this response type. 1245 // It's illegal to call this before the header has been flushed. 1246 func (w *response) bodyAllowed() bool { 1247 if !w.wroteHeader { 1248 panic("") 1249 } 1250 return bodyAllowedForStatus(w.status) 1251 } 1252 1253 // The Life Of A Write is like this: 1254 // 1255 // Handler starts. No header has been sent. The handler can either 1256 // write a header, or just start writing. Writing before sending a header 1257 // sends an implicitly empty 200 OK header. 1258 // 1259 // If the handler didn't declare a Content-Length up front, we either 1260 // go into chunking mode or, if the handler finishes running before 1261 // the chunking buffer size, we compute a Content-Length and send that 1262 // in the header instead. 1263 // 1264 // Likewise, if the handler didn't set a Content-Type, we sniff that 1265 // from the initial chunk of output. 1266 // 1267 // The Writers are wired together like: 1268 // 1269 // 1. *response (the ResponseWriter) -> 1270 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1271 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1272 // and which writes the chunk headers, if needed. 1273 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1274 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1275 // and populates c.werr with it if so. but otherwise writes to: 1276 // 6. the rwc, the net.Conn. 1277 // 1278 // TODO(bradfitz): short-circuit some of the buffering when the 1279 // initial header contains both a Content-Type and Content-Length. 1280 // Also short-circuit in (1) when the header's been sent and not in 1281 // chunking mode, writing directly to (4) instead, if (2) has no 1282 // buffered data. More generally, we could short-circuit from (1) to 1283 // (3) even in chunking mode if the write size from (1) is over some 1284 // threshold and nothing is in (2). The answer might be mostly making 1285 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1286 // with this instead. 1287 func (w *response) Write(data []byte) (n int, err error) { 1288 return w.write(len(data), data, "") 1289 } 1290 1291 func (w *response) WriteString(data string) (n int, err error) { 1292 return w.write(len(data), nil, data) 1293 } 1294 1295 // either dataB or dataS is non-zero. 1296 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1297 if w.conn.hijacked() { 1298 w.conn.server.logf("http: response.Write on hijacked connection") 1299 return 0, ErrHijacked 1300 } 1301 if !w.wroteHeader { 1302 w.WriteHeader(StatusOK) 1303 } 1304 if lenData == 0 { 1305 return 0, nil 1306 } 1307 if !w.bodyAllowed() { 1308 return 0, ErrBodyNotAllowed 1309 } 1310 1311 w.written += int64(lenData) // ignoring errors, for errorKludge 1312 if w.contentLength != -1 && w.written > w.contentLength { 1313 return 0, ErrContentLength 1314 } 1315 if dataB != nil { 1316 return w.w.Write(dataB) 1317 } else { 1318 return w.w.WriteString(dataS) 1319 } 1320 } 1321 1322 func (w *response) finishRequest() { 1323 w.handlerDone.setTrue() 1324 1325 if !w.wroteHeader { 1326 w.WriteHeader(StatusOK) 1327 } 1328 1329 w.w.Flush() 1330 putBufioWriter(w.w) 1331 w.cw.close() 1332 w.conn.bufw.Flush() 1333 1334 // Close the body (regardless of w.closeAfterReply) so we can 1335 // re-use its bufio.Reader later safely. 1336 w.reqBody.Close() 1337 1338 if w.req.MultipartForm != nil { 1339 w.req.MultipartForm.RemoveAll() 1340 } 1341 } 1342 1343 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1344 // It must only be called after the handler is done executing. 1345 func (w *response) shouldReuseConnection() bool { 1346 if w.closeAfterReply { 1347 // The request or something set while executing the 1348 // handler indicated we shouldn't reuse this 1349 // connection. 1350 return false 1351 } 1352 1353 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1354 // Did not write enough. Avoid getting out of sync. 1355 return false 1356 } 1357 1358 // There was some error writing to the underlying connection 1359 // during the request, so don't re-use this conn. 1360 if w.conn.werr != nil { 1361 return false 1362 } 1363 1364 if w.closedRequestBodyEarly() { 1365 return false 1366 } 1367 1368 return true 1369 } 1370 1371 func (w *response) closedRequestBodyEarly() bool { 1372 body, ok := w.req.Body.(*body) 1373 return ok && body.didEarlyClose() 1374 } 1375 1376 func (w *response) Flush() { 1377 if !w.wroteHeader { 1378 w.WriteHeader(StatusOK) 1379 } 1380 w.w.Flush() 1381 w.cw.flush() 1382 } 1383 1384 func (c *conn) finalFlush() { 1385 if c.bufr != nil { 1386 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1387 // reader for a future connection. 1388 putBufioReader(c.bufr) 1389 c.bufr = nil 1390 } 1391 1392 if c.bufw != nil { 1393 c.bufw.Flush() 1394 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1395 // writer for a future connection. 1396 putBufioWriter(c.bufw) 1397 c.bufw = nil 1398 } 1399 } 1400 1401 // Close the connection. 1402 func (c *conn) close() { 1403 c.finalFlush() 1404 c.rwc.Close() 1405 } 1406 1407 // rstAvoidanceDelay is the amount of time we sleep after closing the 1408 // write side of a TCP connection before closing the entire socket. 1409 // By sleeping, we increase the chances that the client sees our FIN 1410 // and processes its final data before they process the subsequent RST 1411 // from closing a connection with known unread data. 1412 // This RST seems to occur mostly on BSD systems. (And Windows?) 1413 // This timeout is somewhat arbitrary (~latency around the planet). 1414 const rstAvoidanceDelay = 500 * time.Millisecond 1415 1416 type closeWriter interface { 1417 CloseWrite() error 1418 } 1419 1420 var _ closeWriter = (*net.TCPConn)(nil) 1421 1422 // closeWrite flushes any outstanding data and sends a FIN packet (if 1423 // client is connected via TCP), signalling that we're done. We then 1424 // pause for a bit, hoping the client processes it before any 1425 // subsequent RST. 1426 // 1427 // See https://golang.org/issue/3595 1428 func (c *conn) closeWriteAndWait() { 1429 c.finalFlush() 1430 if tcp, ok := c.rwc.(closeWriter); ok { 1431 tcp.CloseWrite() 1432 } 1433 time.Sleep(rstAvoidanceDelay) 1434 } 1435 1436 // validNPN reports whether the proto is not a blacklisted Next 1437 // Protocol Negotiation protocol. Empty and built-in protocol types 1438 // are blacklisted and can't be overridden with alternate 1439 // implementations. 1440 func validNPN(proto string) bool { 1441 switch proto { 1442 case "", "http/1.1", "http/1.0": 1443 return false 1444 } 1445 return true 1446 } 1447 1448 func (c *conn) setState(nc net.Conn, state ConnState) { 1449 if hook := c.server.ConnState; hook != nil { 1450 hook(nc, state) 1451 } 1452 } 1453 1454 // badRequestError is a literal string (used by in the server in HTML, 1455 // unescaped) to tell the user why their request was bad. It should 1456 // be plain text without user info or other embedded errors. 1457 type badRequestError string 1458 1459 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1460 1461 // Serve a new connection. 1462 func (c *conn) serve(ctx context.Context) { 1463 c.remoteAddr = c.rwc.RemoteAddr().String() 1464 defer func() { 1465 if err := recover(); err != nil { 1466 const size = 64 << 10 1467 buf := make([]byte, size) 1468 buf = buf[:runtime.Stack(buf, false)] 1469 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1470 } 1471 if !c.hijacked() { 1472 c.close() 1473 c.setState(c.rwc, StateClosed) 1474 } 1475 }() 1476 1477 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1478 if d := c.server.ReadTimeout; d != 0 { 1479 c.rwc.SetReadDeadline(time.Now().Add(d)) 1480 } 1481 if d := c.server.WriteTimeout; d != 0 { 1482 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1483 } 1484 if err := tlsConn.Handshake(); err != nil { 1485 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1486 return 1487 } 1488 c.tlsState = new(tls.ConnectionState) 1489 *c.tlsState = tlsConn.ConnectionState() 1490 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1491 if fn := c.server.TLSNextProto[proto]; fn != nil { 1492 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1493 fn(c.server, tlsConn, h) 1494 } 1495 return 1496 } 1497 } 1498 1499 // HTTP/1.x from here on. 1500 1501 c.r = &connReader{r: c.rwc} 1502 c.bufr = newBufioReader(c.r) 1503 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1504 1505 ctx, cancelCtx := context.WithCancel(ctx) 1506 defer cancelCtx() 1507 1508 for { 1509 w, err := c.readRequest(ctx) 1510 if c.r.remain != c.server.initialReadLimitSize() { 1511 // If we read any bytes off the wire, we're active. 1512 c.setState(c.rwc, StateActive) 1513 } 1514 if err != nil { 1515 if err == errTooLarge { 1516 // Their HTTP client may or may not be 1517 // able to read this if we're 1518 // responding to them and hanging up 1519 // while they're still writing their 1520 // request. Undefined behavior. 1521 io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large") 1522 c.closeWriteAndWait() 1523 return 1524 } 1525 if err == io.EOF { 1526 return // don't reply 1527 } 1528 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1529 return // don't reply 1530 } 1531 var publicErr string 1532 if v, ok := err.(badRequestError); ok { 1533 publicErr = ": " + string(v) 1534 } 1535 io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr) 1536 return 1537 } 1538 1539 // Expect 100 Continue support 1540 req := w.req 1541 if req.expectsContinue() { 1542 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1543 // Wrap the Body reader with one that replies on the connection 1544 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1545 } 1546 } else if req.Header.get("Expect") != "" { 1547 w.sendExpectationFailed() 1548 return 1549 } 1550 1551 // HTTP cannot have multiple simultaneous active requests.[*] 1552 // Until the server replies to this request, it can't read another, 1553 // so we might as well run the handler in this goroutine. 1554 // [*] Not strictly true: HTTP pipelining. We could let them all process 1555 // in parallel even if their responses need to be serialized. 1556 serverHandler{c.server}.ServeHTTP(w, w.req) 1557 w.cancelCtx() 1558 if c.hijacked() { 1559 return 1560 } 1561 w.finishRequest() 1562 if !w.shouldReuseConnection() { 1563 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1564 c.closeWriteAndWait() 1565 } 1566 return 1567 } 1568 c.setState(c.rwc, StateIdle) 1569 } 1570 } 1571 1572 func (w *response) sendExpectationFailed() { 1573 // TODO(bradfitz): let ServeHTTP handlers handle 1574 // requests with non-standard expectation[s]? Seems 1575 // theoretical at best, and doesn't fit into the 1576 // current ServeHTTP model anyway. We'd need to 1577 // make the ResponseWriter an optional 1578 // "ExpectReplier" interface or something. 1579 // 1580 // For now we'll just obey RFC 2616 14.20 which says 1581 // "If a server receives a request containing an 1582 // Expect field that includes an expectation- 1583 // extension that it does not support, it MUST 1584 // respond with a 417 (Expectation Failed) status." 1585 w.Header().Set("Connection", "close") 1586 w.WriteHeader(StatusExpectationFailed) 1587 w.finishRequest() 1588 } 1589 1590 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1591 // and a Hijacker. 1592 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1593 if w.handlerDone.isSet() { 1594 panic("net/http: Hijack called after ServeHTTP finished") 1595 } 1596 if w.wroteHeader { 1597 w.cw.flush() 1598 } 1599 1600 c := w.conn 1601 c.mu.Lock() 1602 defer c.mu.Unlock() 1603 1604 if w.closeNotifyCh != nil { 1605 return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier in same ServeHTTP call") 1606 } 1607 1608 // Release the bufioWriter that writes to the chunk writer, it is not 1609 // used after a connection has been hijacked. 1610 rwc, buf, err = c.hijackLocked() 1611 if err == nil { 1612 putBufioWriter(w.w) 1613 w.w = nil 1614 } 1615 return rwc, buf, err 1616 } 1617 1618 func (w *response) CloseNotify() <-chan bool { 1619 if w.handlerDone.isSet() { 1620 panic("net/http: CloseNotify called after ServeHTTP finished") 1621 } 1622 c := w.conn 1623 c.mu.Lock() 1624 defer c.mu.Unlock() 1625 1626 if w.closeNotifyCh != nil { 1627 return w.closeNotifyCh 1628 } 1629 ch := make(chan bool, 1) 1630 w.closeNotifyCh = ch 1631 1632 if w.conn.hijackedv { 1633 // CloseNotify is undefined after a hijack, but we have 1634 // no place to return an error, so just return a channel, 1635 // even though it'll never receive a value. 1636 return ch 1637 } 1638 1639 var once sync.Once 1640 notify := func() { once.Do(func() { ch <- true }) } 1641 1642 if requestBodyRemains(w.reqBody) { 1643 // They're still consuming the request body, so we 1644 // shouldn't notify yet. 1645 registerOnHitEOF(w.reqBody, func() { 1646 c.mu.Lock() 1647 defer c.mu.Unlock() 1648 startCloseNotifyBackgroundRead(c, notify) 1649 }) 1650 } else { 1651 startCloseNotifyBackgroundRead(c, notify) 1652 } 1653 return ch 1654 } 1655 1656 // c.mu must be held. 1657 func startCloseNotifyBackgroundRead(c *conn, notify func()) { 1658 if c.bufr.Buffered() > 0 { 1659 // They've consumed the request body, so anything 1660 // remaining is a pipelined request, which we 1661 // document as firing on. 1662 notify() 1663 } else { 1664 c.r.startBackgroundRead(notify) 1665 } 1666 } 1667 1668 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1669 switch v := rc.(type) { 1670 case *expectContinueReader: 1671 registerOnHitEOF(v.readCloser, fn) 1672 case *body: 1673 v.registerOnHitEOF(fn) 1674 default: 1675 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1676 } 1677 } 1678 1679 // requestBodyRemains reports whether future calls to Read 1680 // on rc might yield more data. 1681 func requestBodyRemains(rc io.ReadCloser) bool { 1682 if rc == eofReader { 1683 return false 1684 } 1685 switch v := rc.(type) { 1686 case *expectContinueReader: 1687 return requestBodyRemains(v.readCloser) 1688 case *body: 1689 return v.bodyRemains() 1690 default: 1691 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1692 } 1693 } 1694 1695 // The HandlerFunc type is an adapter to allow the use of 1696 // ordinary functions as HTTP handlers. If f is a function 1697 // with the appropriate signature, HandlerFunc(f) is a 1698 // Handler that calls f. 1699 type HandlerFunc func(ResponseWriter, *Request) 1700 1701 // ServeHTTP calls f(w, r). 1702 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1703 f(w, r) 1704 } 1705 1706 // Helper handlers 1707 1708 // Error replies to the request with the specified error message and HTTP code. 1709 // It does not otherwise end the request; the caller should ensure no further 1710 // writes are done to w. 1711 // The error message should be plain text. 1712 func Error(w ResponseWriter, error string, code int) { 1713 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1714 w.Header().Set("X-Content-Type-Options", "nosniff") 1715 w.WriteHeader(code) 1716 fmt.Fprintln(w, error) 1717 } 1718 1719 // NotFound replies to the request with an HTTP 404 not found error. 1720 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1721 1722 // NotFoundHandler returns a simple request handler 1723 // that replies to each request with a ``404 page not found'' reply. 1724 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1725 1726 // StripPrefix returns a handler that serves HTTP requests 1727 // by removing the given prefix from the request URL's Path 1728 // and invoking the handler h. StripPrefix handles a 1729 // request for a path that doesn't begin with prefix by 1730 // replying with an HTTP 404 not found error. 1731 func StripPrefix(prefix string, h Handler) Handler { 1732 if prefix == "" { 1733 return h 1734 } 1735 return HandlerFunc(func(w ResponseWriter, r *Request) { 1736 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1737 r.URL.Path = p 1738 h.ServeHTTP(w, r) 1739 } else { 1740 NotFound(w, r) 1741 } 1742 }) 1743 } 1744 1745 // Redirect replies to the request with a redirect to url, 1746 // which may be a path relative to the request path. 1747 // 1748 // The provided code should be in the 3xx range and is usually 1749 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1750 func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { 1751 if u, err := url.Parse(urlStr); err == nil { 1752 // If url was relative, make absolute by 1753 // combining with request path. 1754 // The browser would probably do this for us, 1755 // but doing it ourselves is more reliable. 1756 1757 // NOTE(rsc): RFC 2616 says that the Location 1758 // line must be an absolute URI, like 1759 // "http://www.google.com/redirect/", 1760 // not a path like "/redirect/". 1761 // Unfortunately, we don't know what to 1762 // put in the host name section to get the 1763 // client to connect to us again, so we can't 1764 // know the right absolute URI to send back. 1765 // Because of this problem, no one pays attention 1766 // to the RFC; they all send back just a new path. 1767 // So do we. 1768 if u.Scheme == "" && u.Host == "" { 1769 oldpath := r.URL.Path 1770 if oldpath == "" { // should not happen, but avoid a crash if it does 1771 oldpath = "/" 1772 } 1773 1774 // no leading http://server 1775 if urlStr == "" || urlStr[0] != '/' { 1776 // make relative path absolute 1777 olddir, _ := path.Split(oldpath) 1778 urlStr = olddir + urlStr 1779 } 1780 1781 var query string 1782 if i := strings.Index(urlStr, "?"); i != -1 { 1783 urlStr, query = urlStr[:i], urlStr[i:] 1784 } 1785 1786 // clean up but preserve trailing slash 1787 trailing := strings.HasSuffix(urlStr, "/") 1788 urlStr = path.Clean(urlStr) 1789 if trailing && !strings.HasSuffix(urlStr, "/") { 1790 urlStr += "/" 1791 } 1792 urlStr += query 1793 } 1794 } 1795 1796 w.Header().Set("Location", urlStr) 1797 w.WriteHeader(code) 1798 1799 // RFC 2616 recommends that a short note "SHOULD" be included in the 1800 // response because older user agents may not understand 301/307. 1801 // Shouldn't send the response for POST or HEAD; that leaves GET. 1802 if r.Method == "GET" { 1803 note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" 1804 fmt.Fprintln(w, note) 1805 } 1806 } 1807 1808 var htmlReplacer = strings.NewReplacer( 1809 "&", "&", 1810 "<", "<", 1811 ">", ">", 1812 // """ is shorter than """. 1813 `"`, """, 1814 // "'" is shorter than "'" and apos was not in HTML until HTML5. 1815 "'", "'", 1816 ) 1817 1818 func htmlEscape(s string) string { 1819 return htmlReplacer.Replace(s) 1820 } 1821 1822 // Redirect to a fixed URL 1823 type redirectHandler struct { 1824 url string 1825 code int 1826 } 1827 1828 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 1829 Redirect(w, r, rh.url, rh.code) 1830 } 1831 1832 // RedirectHandler returns a request handler that redirects 1833 // each request it receives to the given url using the given 1834 // status code. 1835 // 1836 // The provided code should be in the 3xx range and is usually 1837 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1838 func RedirectHandler(url string, code int) Handler { 1839 return &redirectHandler{url, code} 1840 } 1841 1842 // ServeMux is an HTTP request multiplexer. 1843 // It matches the URL of each incoming request against a list of registered 1844 // patterns and calls the handler for the pattern that 1845 // most closely matches the URL. 1846 // 1847 // Patterns name fixed, rooted paths, like "/favicon.ico", 1848 // or rooted subtrees, like "/images/" (note the trailing slash). 1849 // Longer patterns take precedence over shorter ones, so that 1850 // if there are handlers registered for both "/images/" 1851 // and "/images/thumbnails/", the latter handler will be 1852 // called for paths beginning "/images/thumbnails/" and the 1853 // former will receive requests for any other paths in the 1854 // "/images/" subtree. 1855 // 1856 // Note that since a pattern ending in a slash names a rooted subtree, 1857 // the pattern "/" matches all paths not matched by other registered 1858 // patterns, not just the URL with Path == "/". 1859 // 1860 // If a subtree has been registered and a request is received naming the 1861 // subtree root without its trailing slash, ServeMux redirects that 1862 // request to the subtree root (adding the trailing slash). This behavior can 1863 // be overridden with a separate registration for the path without 1864 // the trailing slash. For example, registering "/images/" causes ServeMux 1865 // to redirect a request for "/images" to "/images/", unless "/images" has 1866 // been registered separately. 1867 // 1868 // Patterns may optionally begin with a host name, restricting matches to 1869 // URLs on that host only. Host-specific patterns take precedence over 1870 // general patterns, so that a handler might register for the two patterns 1871 // "/codesearch" and "codesearch.google.com/" without also taking over 1872 // requests for "http://www.google.com/". 1873 // 1874 // ServeMux also takes care of sanitizing the URL request path, 1875 // redirecting any request containing . or .. elements or repeated slashes 1876 // to an equivalent, cleaner URL. 1877 type ServeMux struct { 1878 mu sync.RWMutex 1879 m map[string]muxEntry 1880 hosts bool // whether any patterns contain hostnames 1881 } 1882 1883 type muxEntry struct { 1884 explicit bool 1885 h Handler 1886 pattern string 1887 } 1888 1889 // NewServeMux allocates and returns a new ServeMux. 1890 func NewServeMux() *ServeMux { return new(ServeMux) } 1891 1892 // DefaultServeMux is the default ServeMux used by Serve. 1893 var DefaultServeMux = &defaultServeMux 1894 1895 var defaultServeMux ServeMux 1896 1897 // Does path match pattern? 1898 func pathMatch(pattern, path string) bool { 1899 if len(pattern) == 0 { 1900 // should not happen 1901 return false 1902 } 1903 n := len(pattern) 1904 if pattern[n-1] != '/' { 1905 return pattern == path 1906 } 1907 return len(path) >= n && path[0:n] == pattern 1908 } 1909 1910 // Return the canonical path for p, eliminating . and .. elements. 1911 func cleanPath(p string) string { 1912 if p == "" { 1913 return "/" 1914 } 1915 if p[0] != '/' { 1916 p = "/" + p 1917 } 1918 np := path.Clean(p) 1919 // path.Clean removes trailing slash except for root; 1920 // put the trailing slash back if necessary. 1921 if p[len(p)-1] == '/' && np != "/" { 1922 np += "/" 1923 } 1924 return np 1925 } 1926 1927 // Find a handler on a handler map given a path string 1928 // Most-specific (longest) pattern wins 1929 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 1930 var n = 0 1931 for k, v := range mux.m { 1932 if !pathMatch(k, path) { 1933 continue 1934 } 1935 if h == nil || len(k) > n { 1936 n = len(k) 1937 h = v.h 1938 pattern = v.pattern 1939 } 1940 } 1941 return 1942 } 1943 1944 // Handler returns the handler to use for the given request, 1945 // consulting r.Method, r.Host, and r.URL.Path. It always returns 1946 // a non-nil handler. If the path is not in its canonical form, the 1947 // handler will be an internally-generated handler that redirects 1948 // to the canonical path. 1949 // 1950 // Handler also returns the registered pattern that matches the 1951 // request or, in the case of internally-generated redirects, 1952 // the pattern that will match after following the redirect. 1953 // 1954 // If there is no registered handler that applies to the request, 1955 // Handler returns a ``page not found'' handler and an empty pattern. 1956 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 1957 if r.Method != "CONNECT" { 1958 if p := cleanPath(r.URL.Path); p != r.URL.Path { 1959 _, pattern = mux.handler(r.Host, p) 1960 url := *r.URL 1961 url.Path = p 1962 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 1963 } 1964 } 1965 1966 return mux.handler(r.Host, r.URL.Path) 1967 } 1968 1969 // handler is the main implementation of Handler. 1970 // The path is known to be in canonical form, except for CONNECT methods. 1971 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 1972 mux.mu.RLock() 1973 defer mux.mu.RUnlock() 1974 1975 // Host-specific pattern takes precedence over generic ones 1976 if mux.hosts { 1977 h, pattern = mux.match(host + path) 1978 } 1979 if h == nil { 1980 h, pattern = mux.match(path) 1981 } 1982 if h == nil { 1983 h, pattern = NotFoundHandler(), "" 1984 } 1985 return 1986 } 1987 1988 // ServeHTTP dispatches the request to the handler whose 1989 // pattern most closely matches the request URL. 1990 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 1991 if r.RequestURI == "*" { 1992 if r.ProtoAtLeast(1, 1) { 1993 w.Header().Set("Connection", "close") 1994 } 1995 w.WriteHeader(StatusBadRequest) 1996 return 1997 } 1998 h, _ := mux.Handler(r) 1999 h.ServeHTTP(w, r) 2000 } 2001 2002 // Handle registers the handler for the given pattern. 2003 // If a handler already exists for pattern, Handle panics. 2004 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2005 mux.mu.Lock() 2006 defer mux.mu.Unlock() 2007 2008 if pattern == "" { 2009 panic("http: invalid pattern " + pattern) 2010 } 2011 if handler == nil { 2012 panic("http: nil handler") 2013 } 2014 if mux.m[pattern].explicit { 2015 panic("http: multiple registrations for " + pattern) 2016 } 2017 2018 if mux.m == nil { 2019 mux.m = make(map[string]muxEntry) 2020 } 2021 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 2022 2023 if pattern[0] != '/' { 2024 mux.hosts = true 2025 } 2026 2027 // Helpful behavior: 2028 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 2029 // It can be overridden by an explicit registration. 2030 n := len(pattern) 2031 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 2032 // If pattern contains a host name, strip it and use remaining 2033 // path for redirect. 2034 path := pattern 2035 if pattern[0] != '/' { 2036 // In pattern, at least the last character is a '/', so 2037 // strings.Index can't be -1. 2038 path = pattern[strings.Index(pattern, "/"):] 2039 } 2040 url := &url.URL{Path: path} 2041 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 2042 } 2043 } 2044 2045 // HandleFunc registers the handler function for the given pattern. 2046 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2047 mux.Handle(pattern, HandlerFunc(handler)) 2048 } 2049 2050 // Handle registers the handler for the given pattern 2051 // in the DefaultServeMux. 2052 // The documentation for ServeMux explains how patterns are matched. 2053 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2054 2055 // HandleFunc registers the handler function for the given pattern 2056 // in the DefaultServeMux. 2057 // The documentation for ServeMux explains how patterns are matched. 2058 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2059 DefaultServeMux.HandleFunc(pattern, handler) 2060 } 2061 2062 // Serve accepts incoming HTTP connections on the listener l, 2063 // creating a new service goroutine for each. The service goroutines 2064 // read requests and then call handler to reply to them. 2065 // Handler is typically nil, in which case the DefaultServeMux is used. 2066 func Serve(l net.Listener, handler Handler) error { 2067 srv := &Server{Handler: handler} 2068 return srv.Serve(l) 2069 } 2070 2071 // A Server defines parameters for running an HTTP server. 2072 // The zero value for Server is a valid configuration. 2073 type Server struct { 2074 Addr string // TCP address to listen on, ":http" if empty 2075 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2076 ReadTimeout time.Duration // maximum duration before timing out read of the request 2077 WriteTimeout time.Duration // maximum duration before timing out write of the response 2078 TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS 2079 2080 // MaxHeaderBytes controls the maximum number of bytes the 2081 // server will read parsing the request header's keys and 2082 // values, including the request line. It does not limit the 2083 // size of the request body. 2084 // If zero, DefaultMaxHeaderBytes is used. 2085 MaxHeaderBytes int 2086 2087 // TLSNextProto optionally specifies a function to take over 2088 // ownership of the provided TLS connection when an NPN/ALPN 2089 // protocol upgrade has occurred. The map key is the protocol 2090 // name negotiated. The Handler argument should be used to 2091 // handle HTTP requests and will initialize the Request's TLS 2092 // and RemoteAddr if not already set. The connection is 2093 // automatically closed when the function returns. 2094 // If TLSNextProto is nil, HTTP/2 support is enabled automatically. 2095 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2096 2097 // ConnState specifies an optional callback function that is 2098 // called when a client connection changes state. See the 2099 // ConnState type and associated constants for details. 2100 ConnState func(net.Conn, ConnState) 2101 2102 // ErrorLog specifies an optional logger for errors accepting 2103 // connections and unexpected behavior from handlers. 2104 // If nil, logging goes to os.Stderr via the log package's 2105 // standard logger. 2106 ErrorLog *log.Logger 2107 2108 disableKeepAlives int32 // accessed atomically. 2109 nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve 2110 nextProtoErr error 2111 } 2112 2113 // A ConnState represents the state of a client connection to a server. 2114 // It's used by the optional Server.ConnState hook. 2115 type ConnState int 2116 2117 const ( 2118 // StateNew represents a new connection that is expected to 2119 // send a request immediately. Connections begin at this 2120 // state and then transition to either StateActive or 2121 // StateClosed. 2122 StateNew ConnState = iota 2123 2124 // StateActive represents a connection that has read 1 or more 2125 // bytes of a request. The Server.ConnState hook for 2126 // StateActive fires before the request has entered a handler 2127 // and doesn't fire again until the request has been 2128 // handled. After the request is handled, the state 2129 // transitions to StateClosed, StateHijacked, or StateIdle. 2130 // For HTTP/2, StateActive fires on the transition from zero 2131 // to one active request, and only transitions away once all 2132 // active requests are complete. That means that ConnState 2133 // cannot be used to do per-request work; ConnState only notes 2134 // the overall state of the connection. 2135 StateActive 2136 2137 // StateIdle represents a connection that has finished 2138 // handling a request and is in the keep-alive state, waiting 2139 // for a new request. Connections transition from StateIdle 2140 // to either StateActive or StateClosed. 2141 StateIdle 2142 2143 // StateHijacked represents a hijacked connection. 2144 // This is a terminal state. It does not transition to StateClosed. 2145 StateHijacked 2146 2147 // StateClosed represents a closed connection. 2148 // This is a terminal state. Hijacked connections do not 2149 // transition to StateClosed. 2150 StateClosed 2151 ) 2152 2153 var stateName = map[ConnState]string{ 2154 StateNew: "new", 2155 StateActive: "active", 2156 StateIdle: "idle", 2157 StateHijacked: "hijacked", 2158 StateClosed: "closed", 2159 } 2160 2161 func (c ConnState) String() string { 2162 return stateName[c] 2163 } 2164 2165 // serverHandler delegates to either the server's Handler or 2166 // DefaultServeMux and also handles "OPTIONS *" requests. 2167 type serverHandler struct { 2168 srv *Server 2169 } 2170 2171 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2172 handler := sh.srv.Handler 2173 if handler == nil { 2174 handler = DefaultServeMux 2175 } 2176 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2177 handler = globalOptionsHandler{} 2178 } 2179 handler.ServeHTTP(rw, req) 2180 } 2181 2182 // ListenAndServe listens on the TCP network address srv.Addr and then 2183 // calls Serve to handle requests on incoming connections. 2184 // Accepted connections are configured to enable TCP keep-alives. 2185 // If srv.Addr is blank, ":http" is used. 2186 // ListenAndServe always returns a non-nil error. 2187 func (srv *Server) ListenAndServe() error { 2188 addr := srv.Addr 2189 if addr == "" { 2190 addr = ":http" 2191 } 2192 ln, err := net.Listen("tcp", addr) 2193 if err != nil { 2194 return err 2195 } 2196 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2197 } 2198 2199 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2200 2201 // Serve accepts incoming connections on the Listener l, creating a 2202 // new service goroutine for each. The service goroutines read requests and 2203 // then call srv.Handler to reply to them. 2204 // Serve always returns a non-nil error. 2205 func (srv *Server) Serve(l net.Listener) error { 2206 defer l.Close() 2207 if fn := testHookServerServe; fn != nil { 2208 fn(srv, l) 2209 } 2210 var tempDelay time.Duration // how long to sleep on accept failure 2211 if err := srv.setupHTTP2(); err != nil { 2212 return err 2213 } 2214 // TODO: allow changing base context? can't imagine concrete 2215 // use cases yet. 2216 baseCtx := context.Background() 2217 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2218 ctx = context.WithValue(ctx, LocalAddrContextKey, l.Addr()) 2219 for { 2220 rw, e := l.Accept() 2221 if e != nil { 2222 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2223 if tempDelay == 0 { 2224 tempDelay = 5 * time.Millisecond 2225 } else { 2226 tempDelay *= 2 2227 } 2228 if max := 1 * time.Second; tempDelay > max { 2229 tempDelay = max 2230 } 2231 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2232 time.Sleep(tempDelay) 2233 continue 2234 } 2235 return e 2236 } 2237 tempDelay = 0 2238 c := srv.newConn(rw) 2239 c.setState(c.rwc, StateNew) // before Serve can return 2240 go c.serve(ctx) 2241 } 2242 } 2243 2244 func (s *Server) doKeepAlives() bool { 2245 return atomic.LoadInt32(&s.disableKeepAlives) == 0 2246 } 2247 2248 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2249 // By default, keep-alives are always enabled. Only very 2250 // resource-constrained environments or servers in the process of 2251 // shutting down should disable them. 2252 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2253 if v { 2254 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2255 } else { 2256 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2257 } 2258 } 2259 2260 func (s *Server) logf(format string, args ...interface{}) { 2261 if s.ErrorLog != nil { 2262 s.ErrorLog.Printf(format, args...) 2263 } else { 2264 log.Printf(format, args...) 2265 } 2266 } 2267 2268 // ListenAndServe listens on the TCP network address addr 2269 // and then calls Serve with handler to handle requests 2270 // on incoming connections. 2271 // Accepted connections are configured to enable TCP keep-alives. 2272 // Handler is typically nil, in which case the DefaultServeMux is 2273 // used. 2274 // 2275 // A trivial example server is: 2276 // 2277 // package main 2278 // 2279 // import ( 2280 // "io" 2281 // "net/http" 2282 // "log" 2283 // ) 2284 // 2285 // // hello world, the web server 2286 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2287 // io.WriteString(w, "hello, world!\n") 2288 // } 2289 // 2290 // func main() { 2291 // http.HandleFunc("/hello", HelloServer) 2292 // log.Fatal(http.ListenAndServe(":12345", nil)) 2293 // } 2294 // 2295 // ListenAndServe always returns a non-nil error. 2296 func ListenAndServe(addr string, handler Handler) error { 2297 server := &Server{Addr: addr, Handler: handler} 2298 return server.ListenAndServe() 2299 } 2300 2301 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2302 // expects HTTPS connections. Additionally, files containing a certificate and 2303 // matching private key for the server must be provided. If the certificate 2304 // is signed by a certificate authority, the certFile should be the concatenation 2305 // of the server's certificate, any intermediates, and the CA's certificate. 2306 // 2307 // A trivial example server is: 2308 // 2309 // import ( 2310 // "log" 2311 // "net/http" 2312 // ) 2313 // 2314 // func handler(w http.ResponseWriter, req *http.Request) { 2315 // w.Header().Set("Content-Type", "text/plain") 2316 // w.Write([]byte("This is an example server.\n")) 2317 // } 2318 // 2319 // func main() { 2320 // http.HandleFunc("/", handler) 2321 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2322 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2323 // log.Fatal(err) 2324 // } 2325 // 2326 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2327 // 2328 // ListenAndServeTLS always returns a non-nil error. 2329 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2330 server := &Server{Addr: addr, Handler: handler} 2331 return server.ListenAndServeTLS(certFile, keyFile) 2332 } 2333 2334 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2335 // then calls Serve to handle requests on incoming TLS connections. 2336 // Accepted connections are configured to enable TCP keep-alives. 2337 // 2338 // Filenames containing a certificate and matching private key for the 2339 // server must be provided if neither the Server's TLSConfig.Certificates 2340 // nor TLSConfig.GetCertificate are populated. If the certificate is 2341 // signed by a certificate authority, the certFile should be the 2342 // concatenation of the server's certificate, any intermediates, and 2343 // the CA's certificate. 2344 // 2345 // If srv.Addr is blank, ":https" is used. 2346 // 2347 // ListenAndServeTLS always returns a non-nil error. 2348 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2349 addr := srv.Addr 2350 if addr == "" { 2351 addr = ":https" 2352 } 2353 2354 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2355 // before we clone it and create the TLS Listener. 2356 if err := srv.setupHTTP2(); err != nil { 2357 return err 2358 } 2359 2360 config := cloneTLSConfig(srv.TLSConfig) 2361 if !strSliceContains(config.NextProtos, "http/1.1") { 2362 config.NextProtos = append(config.NextProtos, "http/1.1") 2363 } 2364 2365 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2366 if !configHasCert || certFile != "" || keyFile != "" { 2367 var err error 2368 config.Certificates = make([]tls.Certificate, 1) 2369 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2370 if err != nil { 2371 return err 2372 } 2373 } 2374 2375 ln, err := net.Listen("tcp", addr) 2376 if err != nil { 2377 return err 2378 } 2379 2380 tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) 2381 return srv.Serve(tlsListener) 2382 } 2383 2384 func (srv *Server) setupHTTP2() error { 2385 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2386 return srv.nextProtoErr 2387 } 2388 2389 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2390 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2391 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2). 2392 func (srv *Server) onceSetNextProtoDefaults() { 2393 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 2394 return 2395 } 2396 // Enable HTTP/2 by default if the user hasn't otherwise 2397 // configured their TLSNextProto map. 2398 if srv.TLSNextProto == nil { 2399 srv.nextProtoErr = http2ConfigureServer(srv, nil) 2400 } 2401 } 2402 2403 // TimeoutHandler returns a Handler that runs h with the given time limit. 2404 // 2405 // The new Handler calls h.ServeHTTP to handle each request, but if a 2406 // call runs for longer than its time limit, the handler responds with 2407 // a 503 Service Unavailable error and the given message in its body. 2408 // (If msg is empty, a suitable default message will be sent.) 2409 // After such a timeout, writes by h to its ResponseWriter will return 2410 // ErrHandlerTimeout. 2411 // 2412 // TimeoutHandler buffers all Handler writes to memory and does not 2413 // support the Hijacker or Flusher interfaces. 2414 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 2415 return &timeoutHandler{ 2416 handler: h, 2417 body: msg, 2418 dt: dt, 2419 } 2420 } 2421 2422 // ErrHandlerTimeout is returned on ResponseWriter Write calls 2423 // in handlers which have timed out. 2424 var ErrHandlerTimeout = errors.New("http: Handler timeout") 2425 2426 type timeoutHandler struct { 2427 handler Handler 2428 body string 2429 dt time.Duration 2430 2431 // When set, no timer will be created and this channel will 2432 // be used instead. 2433 testTimeout <-chan time.Time 2434 } 2435 2436 func (h *timeoutHandler) errorBody() string { 2437 if h.body != "" { 2438 return h.body 2439 } 2440 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 2441 } 2442 2443 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 2444 var t *time.Timer 2445 timeout := h.testTimeout 2446 if timeout == nil { 2447 t = time.NewTimer(h.dt) 2448 timeout = t.C 2449 } 2450 done := make(chan struct{}) 2451 tw := &timeoutWriter{ 2452 w: w, 2453 h: make(Header), 2454 } 2455 go func() { 2456 h.handler.ServeHTTP(tw, r) 2457 close(done) 2458 }() 2459 select { 2460 case <-done: 2461 tw.mu.Lock() 2462 defer tw.mu.Unlock() 2463 dst := w.Header() 2464 for k, vv := range tw.h { 2465 dst[k] = vv 2466 } 2467 if !tw.wroteHeader { 2468 tw.code = StatusOK 2469 } 2470 w.WriteHeader(tw.code) 2471 w.Write(tw.wbuf.Bytes()) 2472 if t != nil { 2473 t.Stop() 2474 } 2475 case <-timeout: 2476 tw.mu.Lock() 2477 defer tw.mu.Unlock() 2478 w.WriteHeader(StatusServiceUnavailable) 2479 io.WriteString(w, h.errorBody()) 2480 tw.timedOut = true 2481 return 2482 } 2483 } 2484 2485 type timeoutWriter struct { 2486 w ResponseWriter 2487 h Header 2488 wbuf bytes.Buffer 2489 2490 mu sync.Mutex 2491 timedOut bool 2492 wroteHeader bool 2493 code int 2494 } 2495 2496 func (tw *timeoutWriter) Header() Header { return tw.h } 2497 2498 func (tw *timeoutWriter) Write(p []byte) (int, error) { 2499 tw.mu.Lock() 2500 defer tw.mu.Unlock() 2501 if tw.timedOut { 2502 return 0, ErrHandlerTimeout 2503 } 2504 if !tw.wroteHeader { 2505 tw.writeHeader(StatusOK) 2506 } 2507 return tw.wbuf.Write(p) 2508 } 2509 2510 func (tw *timeoutWriter) WriteHeader(code int) { 2511 tw.mu.Lock() 2512 defer tw.mu.Unlock() 2513 if tw.timedOut || tw.wroteHeader { 2514 return 2515 } 2516 tw.writeHeader(code) 2517 } 2518 2519 func (tw *timeoutWriter) writeHeader(code int) { 2520 tw.wroteHeader = true 2521 tw.code = code 2522 } 2523 2524 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 2525 // connections. It's used by ListenAndServe and ListenAndServeTLS so 2526 // dead TCP connections (e.g. closing laptop mid-download) eventually 2527 // go away. 2528 type tcpKeepAliveListener struct { 2529 *net.TCPListener 2530 } 2531 2532 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 2533 tc, err := ln.AcceptTCP() 2534 if err != nil { 2535 return 2536 } 2537 tc.SetKeepAlive(true) 2538 tc.SetKeepAlivePeriod(3 * time.Minute) 2539 return tc, nil 2540 } 2541 2542 // globalOptionsHandler responds to "OPTIONS *" requests. 2543 type globalOptionsHandler struct{} 2544 2545 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 2546 w.Header().Set("Content-Length", "0") 2547 if r.ContentLength != 0 { 2548 // Read up to 4KB of OPTIONS body (as mentioned in the 2549 // spec as being reserved for future use), but anything 2550 // over that is considered a waste of server resources 2551 // (or an attack) and we abort and close the connection, 2552 // courtesy of MaxBytesReader's EOF behavior. 2553 mb := MaxBytesReader(w, r.Body, 4<<10) 2554 io.Copy(ioutil.Discard, mb) 2555 } 2556 } 2557 2558 type eofReaderWithWriteTo struct{} 2559 2560 func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } 2561 func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } 2562 2563 // eofReader is a non-nil io.ReadCloser that always returns EOF. 2564 // It has a WriteTo method so io.Copy won't need a buffer. 2565 var eofReader = &struct { 2566 eofReaderWithWriteTo 2567 io.Closer 2568 }{ 2569 eofReaderWithWriteTo{}, 2570 ioutil.NopCloser(nil), 2571 } 2572 2573 // Verify that an io.Copy from an eofReader won't require a buffer. 2574 var _ io.WriterTo = eofReader 2575 2576 // initNPNRequest is an HTTP handler that initializes certain 2577 // uninitialized fields in its *Request. Such partially-initialized 2578 // Requests come from NPN protocol handlers. 2579 type initNPNRequest struct { 2580 c *tls.Conn 2581 h serverHandler 2582 } 2583 2584 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 2585 if req.TLS == nil { 2586 req.TLS = &tls.ConnectionState{} 2587 *req.TLS = h.c.ConnectionState() 2588 } 2589 if req.Body == nil { 2590 req.Body = eofReader 2591 } 2592 if req.RemoteAddr == "" { 2593 req.RemoteAddr = h.c.RemoteAddr().String() 2594 } 2595 h.h.ServeHTTP(rw, req) 2596 } 2597 2598 // loggingConn is used for debugging. 2599 type loggingConn struct { 2600 name string 2601 net.Conn 2602 } 2603 2604 var ( 2605 uniqNameMu sync.Mutex 2606 uniqNameNext = make(map[string]int) 2607 ) 2608 2609 func newLoggingConn(baseName string, c net.Conn) net.Conn { 2610 uniqNameMu.Lock() 2611 defer uniqNameMu.Unlock() 2612 uniqNameNext[baseName]++ 2613 return &loggingConn{ 2614 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 2615 Conn: c, 2616 } 2617 } 2618 2619 func (c *loggingConn) Write(p []byte) (n int, err error) { 2620 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 2621 n, err = c.Conn.Write(p) 2622 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 2623 return 2624 } 2625 2626 func (c *loggingConn) Read(p []byte) (n int, err error) { 2627 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 2628 n, err = c.Conn.Read(p) 2629 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 2630 return 2631 } 2632 2633 func (c *loggingConn) Close() (err error) { 2634 log.Printf("%s.Close() = ...", c.name) 2635 err = c.Conn.Close() 2636 log.Printf("%s.Close() = %v", c.name, err) 2637 return 2638 } 2639 2640 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 2641 // It only contains one field (and a pointer field at that), so it 2642 // fits in an interface value without an extra allocation. 2643 type checkConnErrorWriter struct { 2644 c *conn 2645 } 2646 2647 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 2648 n, err = w.c.rwc.Write(p) 2649 if err != nil && w.c.werr == nil { 2650 w.c.werr = err 2651 } 2652 return 2653 } 2654 2655 func numLeadingCRorLF(v []byte) (n int) { 2656 for _, b := range v { 2657 if b == '\r' || b == '\n' { 2658 n++ 2659 continue 2660 } 2661 break 2662 } 2663 return 2664 2665 } 2666 2667 func strSliceContains(ss []string, s string) bool { 2668 for _, v := range ss { 2669 if v == s { 2670 return true 2671 } 2672 } 2673 return false 2674 }