github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "context" 13 "crypto/tls" 14 "errors" 15 "fmt" 16 "io" 17 "io/ioutil" 18 "log" 19 "net" 20 "net/textproto" 21 "net/url" 22 "os" 23 "path" 24 "runtime" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "golang_org/x/net/lex/httplex" 32 ) 33 34 // Errors used by the HTTP server. 35 var ( 36 // ErrBodyNotAllowed is returned by ResponseWriter.Write calls 37 // when the HTTP method or response code does not permit a 38 // body. 39 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 40 41 // ErrHijacked is returned by ResponseWriter.Write calls when 42 // the underlying connection has been hijacked using the 43 // Hijacker interface. A zero-byte write on a hijacked 44 // connection will return ErrHijacked without any other side 45 // effects. 46 ErrHijacked = errors.New("http: connection has been hijacked") 47 48 // ErrContentLength is returned by ResponseWriter.Write calls 49 // when a Handler set a Content-Length response header with a 50 // declared size and then attempted to write more bytes than 51 // declared. 52 ErrContentLength = errors.New("http: wrote more than the declared Content-Length") 53 54 // Deprecated: ErrWriteAfterFlush is no longer used. 55 ErrWriteAfterFlush = errors.New("unused") 56 ) 57 58 // A Handler responds to an HTTP request. 59 // 60 // ServeHTTP should write reply headers and data to the ResponseWriter 61 // and then return. Returning signals that the request is finished; it 62 // is not valid to use the ResponseWriter or read from the 63 // Request.Body after or concurrently with the completion of the 64 // ServeHTTP call. 65 // 66 // Depending on the HTTP client software, HTTP protocol version, and 67 // any intermediaries between the client and the Go server, it may not 68 // be possible to read from the Request.Body after writing to the 69 // ResponseWriter. Cautious handlers should read the Request.Body 70 // first, and then reply. 71 // 72 // Except for reading the body, handlers should not modify the 73 // provided Request. 74 // 75 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 76 // that the effect of the panic was isolated to the active request. 77 // It recovers the panic, logs a stack trace to the server error log, 78 // and hangs up the connection. 79 type Handler interface { 80 ServeHTTP(ResponseWriter, *Request) 81 } 82 83 // A ResponseWriter interface is used by an HTTP handler to 84 // construct an HTTP response. 85 // 86 // A ResponseWriter may not be used after the Handler.ServeHTTP method 87 // has returned. 88 type ResponseWriter interface { 89 // Header returns the header map that will be sent by 90 // WriteHeader. Changing the header after a call to 91 // WriteHeader (or Write) has no effect unless the modified 92 // headers were declared as trailers by setting the 93 // "Trailer" header before the call to WriteHeader (see example). 94 // To suppress implicit response headers, set their value to nil. 95 Header() Header 96 97 // Write writes the data to the connection as part of an HTTP reply. 98 // 99 // If WriteHeader has not yet been called, Write calls 100 // WriteHeader(http.StatusOK) before writing the data. If the Header 101 // does not contain a Content-Type line, Write adds a Content-Type set 102 // to the result of passing the initial 512 bytes of written data to 103 // DetectContentType. 104 // 105 // Depending on the HTTP protocol version and the client, calling 106 // Write or WriteHeader may prevent future reads on the 107 // Request.Body. For HTTP/1.x requests, handlers should read any 108 // needed request body data before writing the response. Once the 109 // headers have been flushed (due to either an explicit Flusher.Flush 110 // call or writing enough data to trigger a flush), the request body 111 // may be unavailable. For HTTP/2 requests, the Go HTTP server permits 112 // handlers to continue to read the request body while concurrently 113 // writing the response. However, such behavior may not be supported 114 // by all HTTP/2 clients. Handlers should read before writing if 115 // possible to maximize compatibility. 116 Write([]byte) (int, error) 117 118 // WriteHeader sends an HTTP response header with status code. 119 // If WriteHeader is not called explicitly, the first call to Write 120 // will trigger an implicit WriteHeader(http.StatusOK). 121 // Thus explicit calls to WriteHeader are mainly used to 122 // send error codes. 123 WriteHeader(int) 124 } 125 126 // The Flusher interface is implemented by ResponseWriters that allow 127 // an HTTP handler to flush buffered data to the client. 128 // 129 // The default HTTP/1.x and HTTP/2 ResponseWriter implementations 130 // support Flusher, but ResponseWriter wrappers may not. Handlers 131 // should always test for this ability at runtime. 132 // 133 // Note that even for ResponseWriters that support Flush, 134 // if the client is connected through an HTTP proxy, 135 // the buffered data may not reach the client until the response 136 // completes. 137 type Flusher interface { 138 // Flush sends any buffered data to the client. 139 Flush() 140 } 141 142 // The Hijacker interface is implemented by ResponseWriters that allow 143 // an HTTP handler to take over the connection. 144 // 145 // The default ResponseWriter for HTTP/1.x connections supports 146 // Hijacker, but HTTP/2 connections intentionally do not. 147 // ResponseWriter wrappers may also not support Hijacker. Handlers 148 // should always test for this ability at runtime. 149 type Hijacker interface { 150 // Hijack lets the caller take over the connection. 151 // After a call to Hijack(), the HTTP server library 152 // will not do anything else with the connection. 153 // 154 // It becomes the caller's responsibility to manage 155 // and close the connection. 156 // 157 // The returned net.Conn may have read or write deadlines 158 // already set, depending on the configuration of the 159 // Server. It is the caller's responsibility to set 160 // or clear those deadlines as needed. 161 Hijack() (net.Conn, *bufio.ReadWriter, error) 162 } 163 164 // The CloseNotifier interface is implemented by ResponseWriters which 165 // allow detecting when the underlying connection has gone away. 166 // 167 // This mechanism can be used to cancel long operations on the server 168 // if the client has disconnected before the response is ready. 169 type CloseNotifier interface { 170 // CloseNotify returns a channel that receives at most a 171 // single value (true) when the client connection has gone 172 // away. 173 // 174 // CloseNotify may wait to notify until Request.Body has been 175 // fully read. 176 // 177 // After the Handler has returned, there is no guarantee 178 // that the channel receives a value. 179 // 180 // If the protocol is HTTP/1.1 and CloseNotify is called while 181 // processing an idempotent request (such a GET) while 182 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 183 // pipelined request may cause a value to be sent on the 184 // returned channel. In practice HTTP/1.1 pipelining is not 185 // enabled in browsers and not seen often in the wild. If this 186 // is a problem, use HTTP/2 or only use CloseNotify on methods 187 // such as POST. 188 CloseNotify() <-chan bool 189 } 190 191 var ( 192 // ServerContextKey is a context key. It can be used in HTTP 193 // handlers with context.WithValue to access the server that 194 // started the handler. The associated value will be of 195 // type *Server. 196 ServerContextKey = &contextKey{"http-server"} 197 198 // LocalAddrContextKey is a context key. It can be used in 199 // HTTP handlers with context.WithValue to access the address 200 // the local address the connection arrived on. 201 // The associated value will be of type net.Addr. 202 LocalAddrContextKey = &contextKey{"local-addr"} 203 ) 204 205 // A conn represents the server side of an HTTP connection. 206 type conn struct { 207 // server is the server on which the connection arrived. 208 // Immutable; never nil. 209 server *Server 210 211 // cancelCtx cancels the connection-level context. 212 cancelCtx context.CancelFunc 213 214 // rwc is the underlying network connection. 215 // This is never wrapped by other types and is the value given out 216 // to CloseNotifier callers. It is usually of type *net.TCPConn or 217 // *tls.Conn. 218 rwc net.Conn 219 220 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 221 // inside the Listener's Accept goroutine, as some implementations block. 222 // It is populated immediately inside the (*conn).serve goroutine. 223 // This is the value of a Handler's (*Request).RemoteAddr. 224 remoteAddr string 225 226 // tlsState is the TLS connection state when using TLS. 227 // nil means not TLS. 228 tlsState *tls.ConnectionState 229 230 // werr is set to the first write error to rwc. 231 // It is set via checkConnErrorWriter{w}, where bufw writes. 232 werr error 233 234 // r is bufr's read source. It's a wrapper around rwc that provides 235 // io.LimitedReader-style limiting (while reading request headers) 236 // and functionality to support CloseNotifier. See *connReader docs. 237 r *connReader 238 239 // bufr reads from r. 240 bufr *bufio.Reader 241 242 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 243 bufw *bufio.Writer 244 245 // lastMethod is the method of the most recent request 246 // on this connection, if any. 247 lastMethod string 248 249 curReq atomic.Value // of *response (which has a Request in it) 250 251 // mu guards hijackedv 252 mu sync.Mutex 253 254 // hijackedv is whether this connection has been hijacked 255 // by a Handler with the Hijacker interface. 256 // It is guarded by mu. 257 hijackedv bool 258 } 259 260 func (c *conn) hijacked() bool { 261 c.mu.Lock() 262 defer c.mu.Unlock() 263 return c.hijackedv 264 } 265 266 // c.mu must be held. 267 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 268 if c.hijackedv { 269 return nil, nil, ErrHijacked 270 } 271 c.r.abortPendingRead() 272 273 c.hijackedv = true 274 rwc = c.rwc 275 rwc.SetDeadline(time.Time{}) 276 277 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 278 c.setState(rwc, StateHijacked) 279 return 280 } 281 282 // This should be >= 512 bytes for DetectContentType, 283 // but otherwise it's somewhat arbitrary. 284 const bufferBeforeChunkingSize = 2048 285 286 // chunkWriter writes to a response's conn buffer, and is the writer 287 // wrapped by the response.bufw buffered writer. 288 // 289 // chunkWriter also is responsible for finalizing the Header, including 290 // conditionally setting the Content-Type and setting a Content-Length 291 // in cases where the handler's final output is smaller than the buffer 292 // size. It also conditionally adds chunk headers, when in chunking mode. 293 // 294 // See the comment above (*response).Write for the entire write flow. 295 type chunkWriter struct { 296 res *response 297 298 // header is either nil or a deep clone of res.handlerHeader 299 // at the time of res.WriteHeader, if res.WriteHeader is 300 // called and extra buffering is being done to calculate 301 // Content-Type and/or Content-Length. 302 header Header 303 304 // wroteHeader tells whether the header's been written to "the 305 // wire" (or rather: w.conn.buf). this is unlike 306 // (*response).wroteHeader, which tells only whether it was 307 // logically written. 308 wroteHeader bool 309 310 // set by the writeHeader method: 311 chunking bool // using chunked transfer encoding for reply body 312 } 313 314 var ( 315 crlf = []byte("\r\n") 316 colonSpace = []byte(": ") 317 ) 318 319 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 320 if !cw.wroteHeader { 321 cw.writeHeader(p) 322 } 323 if cw.res.req.Method == "HEAD" { 324 // Eat writes. 325 return len(p), nil 326 } 327 if cw.chunking { 328 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 329 if err != nil { 330 cw.res.conn.rwc.Close() 331 return 332 } 333 } 334 n, err = cw.res.conn.bufw.Write(p) 335 if cw.chunking && err == nil { 336 _, err = cw.res.conn.bufw.Write(crlf) 337 } 338 if err != nil { 339 cw.res.conn.rwc.Close() 340 } 341 return 342 } 343 344 func (cw *chunkWriter) flush() { 345 if !cw.wroteHeader { 346 cw.writeHeader(nil) 347 } 348 cw.res.conn.bufw.Flush() 349 } 350 351 func (cw *chunkWriter) close() { 352 if !cw.wroteHeader { 353 cw.writeHeader(nil) 354 } 355 if cw.chunking { 356 bw := cw.res.conn.bufw // conn's bufio writer 357 // zero chunk to mark EOF 358 bw.WriteString("0\r\n") 359 if len(cw.res.trailers) > 0 { 360 trailers := make(Header) 361 for _, h := range cw.res.trailers { 362 if vv := cw.res.handlerHeader[h]; len(vv) > 0 { 363 trailers[h] = vv 364 } 365 } 366 trailers.Write(bw) // the writer handles noting errors 367 } 368 // final blank line after the trailers (whether 369 // present or not) 370 bw.WriteString("\r\n") 371 } 372 } 373 374 // A response represents the server side of an HTTP response. 375 type response struct { 376 conn *conn 377 req *Request // request for this response 378 reqBody io.ReadCloser 379 cancelCtx context.CancelFunc // when ServeHTTP exits 380 wroteHeader bool // reply header has been (logically) written 381 wroteContinue bool // 100 Continue response was written 382 wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" 383 wantsClose bool // HTTP request has Connection "close" 384 385 w *bufio.Writer // buffers output in chunks to chunkWriter 386 cw chunkWriter 387 388 // handlerHeader is the Header that Handlers get access to, 389 // which may be retained and mutated even after WriteHeader. 390 // handlerHeader is copied into cw.header at WriteHeader 391 // time, and privately mutated thereafter. 392 handlerHeader Header 393 calledHeader bool // handler accessed handlerHeader via Header 394 395 written int64 // number of bytes written in body 396 contentLength int64 // explicitly-declared Content-Length; or -1 397 status int // status code passed to WriteHeader 398 399 // close connection after this reply. set on request and 400 // updated after response from handler if there's a 401 // "Connection: keep-alive" response header and a 402 // Content-Length. 403 closeAfterReply bool 404 405 // requestBodyLimitHit is set by requestTooLarge when 406 // maxBytesReader hits its max size. It is checked in 407 // WriteHeader, to make sure we don't consume the 408 // remaining request body to try to advance to the next HTTP 409 // request. Instead, when this is set, we stop reading 410 // subsequent requests on this connection and stop reading 411 // input from it. 412 requestBodyLimitHit bool 413 414 // trailers are the headers to be sent after the handler 415 // finishes writing the body. This field is initialized from 416 // the Trailer response header when the response header is 417 // written. 418 trailers []string 419 420 handlerDone atomicBool // set true when the handler exits 421 422 // Buffers for Date and Content-Length 423 dateBuf [len(TimeFormat)]byte 424 clenBuf [10]byte 425 426 // closeNotifyCh is the channel returned by CloseNotify. 427 // TODO(bradfitz): this is currently (for Go 1.8) always 428 // non-nil. Make this lazily-created again as it used to be? 429 closeNotifyCh chan bool 430 didCloseNotify int32 // atomic (only 0->1 winner should send) 431 } 432 433 type atomicBool int32 434 435 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 436 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 437 438 // declareTrailer is called for each Trailer header when the 439 // response header is written. It notes that a header will need to be 440 // written in the trailers at the end of the response. 441 func (w *response) declareTrailer(k string) { 442 k = CanonicalHeaderKey(k) 443 switch k { 444 case "Transfer-Encoding", "Content-Length", "Trailer": 445 // Forbidden by RFC 2616 14.40. 446 return 447 } 448 w.trailers = append(w.trailers, k) 449 } 450 451 // requestTooLarge is called by maxBytesReader when too much input has 452 // been read from the client. 453 func (w *response) requestTooLarge() { 454 w.closeAfterReply = true 455 w.requestBodyLimitHit = true 456 if !w.wroteHeader { 457 w.Header().Set("Connection", "close") 458 } 459 } 460 461 // needsSniff reports whether a Content-Type still needs to be sniffed. 462 func (w *response) needsSniff() bool { 463 _, haveType := w.handlerHeader["Content-Type"] 464 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 465 } 466 467 // writerOnly hides an io.Writer value's optional ReadFrom method 468 // from io.Copy. 469 type writerOnly struct { 470 io.Writer 471 } 472 473 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 474 switch v := src.(type) { 475 case *os.File: 476 fi, err := v.Stat() 477 if err != nil { 478 return false, err 479 } 480 return fi.Mode().IsRegular(), nil 481 case *io.LimitedReader: 482 return srcIsRegularFile(v.R) 483 default: 484 return 485 } 486 } 487 488 // ReadFrom is here to optimize copying from an *os.File regular file 489 // to a *net.TCPConn with sendfile. 490 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 491 // Our underlying w.conn.rwc is usually a *TCPConn (with its 492 // own ReadFrom method). If not, or if our src isn't a regular 493 // file, just fall back to the normal copy method. 494 rf, ok := w.conn.rwc.(io.ReaderFrom) 495 regFile, err := srcIsRegularFile(src) 496 if err != nil { 497 return 0, err 498 } 499 if !ok || !regFile { 500 bufp := copyBufPool.Get().(*[]byte) 501 defer copyBufPool.Put(bufp) 502 return io.CopyBuffer(writerOnly{w}, src, *bufp) 503 } 504 505 // sendfile path: 506 507 if !w.wroteHeader { 508 w.WriteHeader(StatusOK) 509 } 510 511 if w.needsSniff() { 512 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 513 n += n0 514 if err != nil { 515 return n, err 516 } 517 } 518 519 w.w.Flush() // get rid of any previous writes 520 w.cw.flush() // make sure Header is written; flush data to rwc 521 522 // Now that cw has been flushed, its chunking field is guaranteed initialized. 523 if !w.cw.chunking && w.bodyAllowed() { 524 n0, err := rf.ReadFrom(src) 525 n += n0 526 w.written += n0 527 return n, err 528 } 529 530 n0, err := io.Copy(writerOnly{w}, src) 531 n += n0 532 return n, err 533 } 534 535 // debugServerConnections controls whether all server connections are wrapped 536 // with a verbose logging wrapper. 537 const debugServerConnections = false 538 539 // Create new connection from rwc. 540 func (srv *Server) newConn(rwc net.Conn) *conn { 541 c := &conn{ 542 server: srv, 543 rwc: rwc, 544 } 545 if debugServerConnections { 546 c.rwc = newLoggingConn("server", c.rwc) 547 } 548 return c 549 } 550 551 type readResult struct { 552 n int 553 err error 554 b byte // byte read, if n == 1 555 } 556 557 // connReader is the io.Reader wrapper used by *conn. It combines a 558 // selectively-activated io.LimitedReader (to bound request header 559 // read sizes) with support for selectively keeping an io.Reader.Read 560 // call blocked in a background goroutine to wait for activity and 561 // trigger a CloseNotifier channel. 562 type connReader struct { 563 conn *conn 564 565 mu sync.Mutex // guards following 566 hasByte bool 567 byteBuf [1]byte 568 bgErr error // non-nil means error happened on background read 569 cond *sync.Cond 570 inRead bool 571 aborted bool // set true before conn.rwc deadline is set to past 572 remain int64 // bytes remaining 573 } 574 575 func (cr *connReader) lock() { 576 cr.mu.Lock() 577 if cr.cond == nil { 578 cr.cond = sync.NewCond(&cr.mu) 579 } 580 } 581 582 func (cr *connReader) unlock() { cr.mu.Unlock() } 583 584 func (cr *connReader) startBackgroundRead() { 585 cr.lock() 586 defer cr.unlock() 587 if cr.inRead { 588 panic("invalid concurrent Body.Read call") 589 } 590 cr.inRead = true 591 go cr.backgroundRead() 592 } 593 594 func (cr *connReader) backgroundRead() { 595 n, err := cr.conn.rwc.Read(cr.byteBuf[:]) 596 cr.lock() 597 if n == 1 { 598 cr.hasByte = true 599 // We were at EOF already (since we wouldn't be in a 600 // background read otherwise), so this is a pipelined 601 // HTTP request. 602 cr.closeNotifyFromPipelinedRequest() 603 } 604 if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { 605 // Ignore this error. It's the expected error from 606 // another goroutine calling abortPendingRead. 607 } else if err != nil { 608 cr.handleReadError(err) 609 } 610 cr.aborted = false 611 cr.inRead = false 612 cr.unlock() 613 cr.cond.Broadcast() 614 } 615 616 func (cr *connReader) abortPendingRead() { 617 cr.lock() 618 defer cr.unlock() 619 if !cr.inRead { 620 return 621 } 622 cr.aborted = true 623 cr.conn.rwc.SetReadDeadline(aLongTimeAgo) 624 for cr.inRead { 625 cr.cond.Wait() 626 } 627 cr.conn.rwc.SetReadDeadline(time.Time{}) 628 } 629 630 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 631 func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } 632 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 633 634 // may be called from multiple goroutines. 635 func (cr *connReader) handleReadError(err error) { 636 cr.conn.cancelCtx() 637 cr.closeNotify() 638 } 639 640 // closeNotifyFromPipelinedRequest simply calls closeNotify. 641 // 642 // This method wrapper is here for documentation. The callers are the 643 // cases where we send on the closenotify channel because of a 644 // pipelined HTTP request, per the previous Go behavior and 645 // documentation (that this "MAY" happen). 646 // 647 // TODO: consider changing this behavior and making context 648 // cancelation and closenotify work the same. 649 func (cr *connReader) closeNotifyFromPipelinedRequest() { 650 cr.closeNotify() 651 } 652 653 // may be called from multiple goroutines. 654 func (cr *connReader) closeNotify() { 655 res, _ := cr.conn.curReq.Load().(*response) 656 if res != nil { 657 if atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { 658 res.closeNotifyCh <- true 659 } 660 } 661 } 662 663 func (cr *connReader) Read(p []byte) (n int, err error) { 664 cr.lock() 665 if cr.inRead { 666 cr.unlock() 667 panic("invalid concurrent Body.Read call") 668 } 669 if cr.hitReadLimit() { 670 cr.unlock() 671 return 0, io.EOF 672 } 673 if cr.bgErr != nil { 674 err = cr.bgErr 675 cr.unlock() 676 return 0, err 677 } 678 if len(p) == 0 { 679 cr.unlock() 680 return 0, nil 681 } 682 if int64(len(p)) > cr.remain { 683 p = p[:cr.remain] 684 } 685 if cr.hasByte { 686 p[0] = cr.byteBuf[0] 687 cr.hasByte = false 688 cr.unlock() 689 return 1, nil 690 } 691 cr.inRead = true 692 cr.unlock() 693 n, err = cr.conn.rwc.Read(p) 694 695 cr.lock() 696 cr.inRead = false 697 if err != nil { 698 cr.handleReadError(err) 699 } 700 cr.remain -= int64(n) 701 cr.unlock() 702 703 cr.cond.Broadcast() 704 return n, err 705 } 706 707 var ( 708 bufioReaderPool sync.Pool 709 bufioWriter2kPool sync.Pool 710 bufioWriter4kPool sync.Pool 711 ) 712 713 var copyBufPool = sync.Pool{ 714 New: func() interface{} { 715 b := make([]byte, 32*1024) 716 return &b 717 }, 718 } 719 720 func bufioWriterPool(size int) *sync.Pool { 721 switch size { 722 case 2 << 10: 723 return &bufioWriter2kPool 724 case 4 << 10: 725 return &bufioWriter4kPool 726 } 727 return nil 728 } 729 730 func newBufioReader(r io.Reader) *bufio.Reader { 731 if v := bufioReaderPool.Get(); v != nil { 732 br := v.(*bufio.Reader) 733 br.Reset(r) 734 return br 735 } 736 // Note: if this reader size is ever changed, update 737 // TestHandlerBodyClose's assumptions. 738 return bufio.NewReader(r) 739 } 740 741 func putBufioReader(br *bufio.Reader) { 742 br.Reset(nil) 743 bufioReaderPool.Put(br) 744 } 745 746 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 747 pool := bufioWriterPool(size) 748 if pool != nil { 749 if v := pool.Get(); v != nil { 750 bw := v.(*bufio.Writer) 751 bw.Reset(w) 752 return bw 753 } 754 } 755 return bufio.NewWriterSize(w, size) 756 } 757 758 func putBufioWriter(bw *bufio.Writer) { 759 bw.Reset(nil) 760 if pool := bufioWriterPool(bw.Available()); pool != nil { 761 pool.Put(bw) 762 } 763 } 764 765 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 766 // in an HTTP request. 767 // This can be overridden by setting Server.MaxHeaderBytes. 768 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 769 770 func (srv *Server) maxHeaderBytes() int { 771 if srv.MaxHeaderBytes > 0 { 772 return srv.MaxHeaderBytes 773 } 774 return DefaultMaxHeaderBytes 775 } 776 777 func (srv *Server) initialReadLimitSize() int64 { 778 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 779 } 780 781 // wrapper around io.ReaderCloser which on first read, sends an 782 // HTTP/1.1 100 Continue header 783 type expectContinueReader struct { 784 resp *response 785 readCloser io.ReadCloser 786 closed bool 787 sawEOF bool 788 } 789 790 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 791 if ecr.closed { 792 return 0, ErrBodyReadAfterClose 793 } 794 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 795 ecr.resp.wroteContinue = true 796 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 797 ecr.resp.conn.bufw.Flush() 798 } 799 n, err = ecr.readCloser.Read(p) 800 if err == io.EOF { 801 ecr.sawEOF = true 802 } 803 return 804 } 805 806 func (ecr *expectContinueReader) Close() error { 807 ecr.closed = true 808 return ecr.readCloser.Close() 809 } 810 811 // TimeFormat is the time format to use when generating times in HTTP 812 // headers. It is like time.RFC1123 but hard-codes GMT as the time 813 // zone. The time being formatted must be in UTC for Format to 814 // generate the correct format. 815 // 816 // For parsing this time format, see ParseTime. 817 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 818 819 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 820 func appendTime(b []byte, t time.Time) []byte { 821 const days = "SunMonTueWedThuFriSat" 822 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 823 824 t = t.UTC() 825 yy, mm, dd := t.Date() 826 hh, mn, ss := t.Clock() 827 day := days[3*t.Weekday():] 828 mon := months[3*(mm-1):] 829 830 return append(b, 831 day[0], day[1], day[2], ',', ' ', 832 byte('0'+dd/10), byte('0'+dd%10), ' ', 833 mon[0], mon[1], mon[2], ' ', 834 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 835 byte('0'+hh/10), byte('0'+hh%10), ':', 836 byte('0'+mn/10), byte('0'+mn%10), ':', 837 byte('0'+ss/10), byte('0'+ss%10), ' ', 838 'G', 'M', 'T') 839 } 840 841 var errTooLarge = errors.New("http: request too large") 842 843 // Read next request from connection. 844 func (c *conn) readRequest(ctx context.Context) (w *response, err error) { 845 if c.hijacked() { 846 return nil, ErrHijacked 847 } 848 849 var ( 850 wholeReqDeadline time.Time // or zero if none 851 hdrDeadline time.Time // or zero if none 852 ) 853 t0 := time.Now() 854 if d := c.server.readHeaderTimeout(); d != 0 { 855 hdrDeadline = t0.Add(d) 856 } 857 if d := c.server.ReadTimeout; d != 0 { 858 wholeReqDeadline = t0.Add(d) 859 } 860 c.rwc.SetReadDeadline(hdrDeadline) 861 if d := c.server.WriteTimeout; d != 0 { 862 defer func() { 863 c.rwc.SetWriteDeadline(time.Now().Add(d)) 864 }() 865 } 866 867 c.r.setReadLimit(c.server.initialReadLimitSize()) 868 if c.lastMethod == "POST" { 869 // RFC 2616 section 4.1 tolerance for old buggy clients. 870 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 871 c.bufr.Discard(numLeadingCRorLF(peek)) 872 } 873 req, err := readRequest(c.bufr, keepHostHeader) 874 if err != nil { 875 if c.r.hitReadLimit() { 876 return nil, errTooLarge 877 } 878 return nil, err 879 } 880 881 if !http1ServerSupportsRequest(req) { 882 return nil, badRequestError("unsupported protocol version") 883 } 884 885 c.lastMethod = req.Method 886 c.r.setInfiniteReadLimit() 887 888 hosts, haveHost := req.Header["Host"] 889 isH2Upgrade := req.isH2Upgrade() 890 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade { 891 return nil, badRequestError("missing required Host header") 892 } 893 if len(hosts) > 1 { 894 return nil, badRequestError("too many Host headers") 895 } 896 if len(hosts) == 1 && !httplex.ValidHostHeader(hosts[0]) { 897 return nil, badRequestError("malformed Host header") 898 } 899 for k, vv := range req.Header { 900 if !httplex.ValidHeaderFieldName(k) { 901 return nil, badRequestError("invalid header name") 902 } 903 for _, v := range vv { 904 if !httplex.ValidHeaderFieldValue(v) { 905 return nil, badRequestError("invalid header value") 906 } 907 } 908 } 909 delete(req.Header, "Host") 910 911 ctx, cancelCtx := context.WithCancel(ctx) 912 req.ctx = ctx 913 req.RemoteAddr = c.remoteAddr 914 req.TLS = c.tlsState 915 if body, ok := req.Body.(*body); ok { 916 body.doEarlyClose = true 917 } 918 919 // Adjust the read deadline if necessary. 920 if !hdrDeadline.Equal(wholeReqDeadline) { 921 c.rwc.SetReadDeadline(wholeReqDeadline) 922 } 923 924 w = &response{ 925 conn: c, 926 cancelCtx: cancelCtx, 927 req: req, 928 reqBody: req.Body, 929 handlerHeader: make(Header), 930 contentLength: -1, 931 closeNotifyCh: make(chan bool, 1), 932 933 // We populate these ahead of time so we're not 934 // reading from req.Header after their Handler starts 935 // and maybe mutates it (Issue 14940) 936 wants10KeepAlive: req.wantsHttp10KeepAlive(), 937 wantsClose: req.wantsClose(), 938 } 939 if isH2Upgrade { 940 w.closeAfterReply = true 941 } 942 w.cw.res = w 943 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 944 return w, nil 945 } 946 947 // http1ServerSupportsRequest reports whether Go's HTTP/1.x server 948 // supports the given request. 949 func http1ServerSupportsRequest(req *Request) bool { 950 if req.ProtoMajor == 1 { 951 return true 952 } 953 // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can 954 // wire up their own HTTP/2 upgrades. 955 if req.ProtoMajor == 2 && req.ProtoMinor == 0 && 956 req.Method == "PRI" && req.RequestURI == "*" { 957 return true 958 } 959 // Reject HTTP/0.x, and all other HTTP/2+ requests (which 960 // aren't encoded in ASCII anyway). 961 return false 962 } 963 964 func (w *response) Header() Header { 965 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 966 // Accessing the header between logically writing it 967 // and physically writing it means we need to allocate 968 // a clone to snapshot the logically written state. 969 w.cw.header = w.handlerHeader.clone() 970 } 971 w.calledHeader = true 972 return w.handlerHeader 973 } 974 975 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 976 // consumed by a handler that the server will read from the client 977 // in order to keep a connection alive. If there are more bytes than 978 // this then the server to be paranoid instead sends a "Connection: 979 // close" response. 980 // 981 // This number is approximately what a typical machine's TCP buffer 982 // size is anyway. (if we have the bytes on the machine, we might as 983 // well read them) 984 const maxPostHandlerReadBytes = 256 << 10 985 986 func (w *response) WriteHeader(code int) { 987 if w.conn.hijacked() { 988 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 989 return 990 } 991 if w.wroteHeader { 992 w.conn.server.logf("http: multiple response.WriteHeader calls") 993 return 994 } 995 w.wroteHeader = true 996 w.status = code 997 998 if w.calledHeader && w.cw.header == nil { 999 w.cw.header = w.handlerHeader.clone() 1000 } 1001 1002 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 1003 v, err := strconv.ParseInt(cl, 10, 64) 1004 if err == nil && v >= 0 { 1005 w.contentLength = v 1006 } else { 1007 w.conn.server.logf("http: invalid Content-Length of %q", cl) 1008 w.handlerHeader.Del("Content-Length") 1009 } 1010 } 1011 } 1012 1013 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 1014 // This type is used to avoid extra allocations from cloning and/or populating 1015 // the response Header map and all its 1-element slices. 1016 type extraHeader struct { 1017 contentType string 1018 connection string 1019 transferEncoding string 1020 date []byte // written if not nil 1021 contentLength []byte // written if not nil 1022 } 1023 1024 // Sorted the same as extraHeader.Write's loop. 1025 var extraHeaderKeys = [][]byte{ 1026 []byte("Content-Type"), 1027 []byte("Connection"), 1028 []byte("Transfer-Encoding"), 1029 } 1030 1031 var ( 1032 headerContentLength = []byte("Content-Length: ") 1033 headerDate = []byte("Date: ") 1034 ) 1035 1036 // Write writes the headers described in h to w. 1037 // 1038 // This method has a value receiver, despite the somewhat large size 1039 // of h, because it prevents an allocation. The escape analysis isn't 1040 // smart enough to realize this function doesn't mutate h. 1041 func (h extraHeader) Write(w *bufio.Writer) { 1042 if h.date != nil { 1043 w.Write(headerDate) 1044 w.Write(h.date) 1045 w.Write(crlf) 1046 } 1047 if h.contentLength != nil { 1048 w.Write(headerContentLength) 1049 w.Write(h.contentLength) 1050 w.Write(crlf) 1051 } 1052 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 1053 if v != "" { 1054 w.Write(extraHeaderKeys[i]) 1055 w.Write(colonSpace) 1056 w.WriteString(v) 1057 w.Write(crlf) 1058 } 1059 } 1060 } 1061 1062 // writeHeader finalizes the header sent to the client and writes it 1063 // to cw.res.conn.bufw. 1064 // 1065 // p is not written by writeHeader, but is the first chunk of the body 1066 // that will be written. It is sniffed for a Content-Type if none is 1067 // set explicitly. It's also used to set the Content-Length, if the 1068 // total body size was small and the handler has already finished 1069 // running. 1070 func (cw *chunkWriter) writeHeader(p []byte) { 1071 if cw.wroteHeader { 1072 return 1073 } 1074 cw.wroteHeader = true 1075 1076 w := cw.res 1077 keepAlivesEnabled := w.conn.server.doKeepAlives() 1078 isHEAD := w.req.Method == "HEAD" 1079 1080 // header is written out to w.conn.buf below. Depending on the 1081 // state of the handler, we either own the map or not. If we 1082 // don't own it, the exclude map is created lazily for 1083 // WriteSubset to remove headers. The setHeader struct holds 1084 // headers we need to add. 1085 header := cw.header 1086 owned := header != nil 1087 if !owned { 1088 header = w.handlerHeader 1089 } 1090 var excludeHeader map[string]bool 1091 delHeader := func(key string) { 1092 if owned { 1093 header.Del(key) 1094 return 1095 } 1096 if _, ok := header[key]; !ok { 1097 return 1098 } 1099 if excludeHeader == nil { 1100 excludeHeader = make(map[string]bool) 1101 } 1102 excludeHeader[key] = true 1103 } 1104 var setHeader extraHeader 1105 1106 trailers := false 1107 for _, v := range cw.header["Trailer"] { 1108 trailers = true 1109 foreachHeaderElement(v, cw.res.declareTrailer) 1110 } 1111 1112 te := header.get("Transfer-Encoding") 1113 hasTE := te != "" 1114 1115 // If the handler is done but never sent a Content-Length 1116 // response header and this is our first (and last) write, set 1117 // it, even to zero. This helps HTTP/1.0 clients keep their 1118 // "keep-alive" connections alive. 1119 // Exceptions: 304/204/1xx responses never get Content-Length, and if 1120 // it was a HEAD request, we don't know the difference between 1121 // 0 actual bytes and 0 bytes because the handler noticed it 1122 // was a HEAD request and chose not to write anything. So for 1123 // HEAD, the handler should either write the Content-Length or 1124 // write non-zero bytes. If it's actually 0 bytes and the 1125 // handler never looked at the Request.Method, we just don't 1126 // send a Content-Length header. 1127 // Further, we don't send an automatic Content-Length if they 1128 // set a Transfer-Encoding, because they're generally incompatible. 1129 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 1130 w.contentLength = int64(len(p)) 1131 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 1132 } 1133 1134 // If this was an HTTP/1.0 request with keep-alive and we sent a 1135 // Content-Length back, we can make this a keep-alive response ... 1136 if w.wants10KeepAlive && keepAlivesEnabled { 1137 sentLength := header.get("Content-Length") != "" 1138 if sentLength && header.get("Connection") == "keep-alive" { 1139 w.closeAfterReply = false 1140 } 1141 } 1142 1143 // Check for a explicit (and valid) Content-Length header. 1144 hasCL := w.contentLength != -1 1145 1146 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { 1147 _, connectionHeaderSet := header["Connection"] 1148 if !connectionHeaderSet { 1149 setHeader.connection = "keep-alive" 1150 } 1151 } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { 1152 w.closeAfterReply = true 1153 } 1154 1155 if header.get("Connection") == "close" || !keepAlivesEnabled { 1156 w.closeAfterReply = true 1157 } 1158 1159 // If the client wanted a 100-continue but we never sent it to 1160 // them (or, more strictly: we never finished reading their 1161 // request body), don't reuse this connection because it's now 1162 // in an unknown state: we might be sending this response at 1163 // the same time the client is now sending its request body 1164 // after a timeout. (Some HTTP clients send Expect: 1165 // 100-continue but knowing that some servers don't support 1166 // it, the clients set a timer and send the body later anyway) 1167 // If we haven't seen EOF, we can't skip over the unread body 1168 // because we don't know if the next bytes on the wire will be 1169 // the body-following-the-timer or the subsequent request. 1170 // See Issue 11549. 1171 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 1172 w.closeAfterReply = true 1173 } 1174 1175 // Per RFC 2616, we should consume the request body before 1176 // replying, if the handler hasn't already done so. But we 1177 // don't want to do an unbounded amount of reading here for 1178 // DoS reasons, so we only try up to a threshold. 1179 // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 1180 // about HTTP/1.x Handlers concurrently reading and writing, like 1181 // HTTP/2 handlers can do. Maybe this code should be relaxed? 1182 if w.req.ContentLength != 0 && !w.closeAfterReply { 1183 var discard, tooBig bool 1184 1185 switch bdy := w.req.Body.(type) { 1186 case *expectContinueReader: 1187 if bdy.resp.wroteContinue { 1188 discard = true 1189 } 1190 case *body: 1191 bdy.mu.Lock() 1192 switch { 1193 case bdy.closed: 1194 if !bdy.sawEOF { 1195 // Body was closed in handler with non-EOF error. 1196 w.closeAfterReply = true 1197 } 1198 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 1199 tooBig = true 1200 default: 1201 discard = true 1202 } 1203 bdy.mu.Unlock() 1204 default: 1205 discard = true 1206 } 1207 1208 if discard { 1209 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 1210 switch err { 1211 case nil: 1212 // There must be even more data left over. 1213 tooBig = true 1214 case ErrBodyReadAfterClose: 1215 // Body was already consumed and closed. 1216 case io.EOF: 1217 // The remaining body was just consumed, close it. 1218 err = w.reqBody.Close() 1219 if err != nil { 1220 w.closeAfterReply = true 1221 } 1222 default: 1223 // Some other kind of error occurred, like a read timeout, or 1224 // corrupt chunked encoding. In any case, whatever remains 1225 // on the wire must not be parsed as another HTTP request. 1226 w.closeAfterReply = true 1227 } 1228 } 1229 1230 if tooBig { 1231 w.requestTooLarge() 1232 delHeader("Connection") 1233 setHeader.connection = "close" 1234 } 1235 } 1236 1237 code := w.status 1238 if bodyAllowedForStatus(code) { 1239 // If no content type, apply sniffing algorithm to body. 1240 _, haveType := header["Content-Type"] 1241 if !haveType && !hasTE { 1242 setHeader.contentType = DetectContentType(p) 1243 } 1244 } else { 1245 for _, k := range suppressedHeaders(code) { 1246 delHeader(k) 1247 } 1248 } 1249 1250 if _, ok := header["Date"]; !ok { 1251 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1252 } 1253 1254 if hasCL && hasTE && te != "identity" { 1255 // TODO: return an error if WriteHeader gets a return parameter 1256 // For now just ignore the Content-Length. 1257 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1258 te, w.contentLength) 1259 delHeader("Content-Length") 1260 hasCL = false 1261 } 1262 1263 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1264 // do nothing 1265 } else if code == StatusNoContent { 1266 delHeader("Transfer-Encoding") 1267 } else if hasCL { 1268 delHeader("Transfer-Encoding") 1269 } else if w.req.ProtoAtLeast(1, 1) { 1270 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1271 // content-length has been provided. The connection must be closed after the 1272 // reply is written, and no chunking is to be done. This is the setup 1273 // recommended in the Server-Sent Events candidate recommendation 11, 1274 // section 8. 1275 if hasTE && te == "identity" { 1276 cw.chunking = false 1277 w.closeAfterReply = true 1278 } else { 1279 // HTTP/1.1 or greater: use chunked transfer encoding 1280 // to avoid closing the connection at EOF. 1281 cw.chunking = true 1282 setHeader.transferEncoding = "chunked" 1283 if hasTE && te == "chunked" { 1284 // We will send the chunked Transfer-Encoding header later. 1285 delHeader("Transfer-Encoding") 1286 } 1287 } 1288 } else { 1289 // HTTP version < 1.1: cannot do chunked transfer 1290 // encoding and we don't know the Content-Length so 1291 // signal EOF by closing connection. 1292 w.closeAfterReply = true 1293 delHeader("Transfer-Encoding") // in case already set 1294 } 1295 1296 // Cannot use Content-Length with non-identity Transfer-Encoding. 1297 if cw.chunking { 1298 delHeader("Content-Length") 1299 } 1300 if !w.req.ProtoAtLeast(1, 0) { 1301 return 1302 } 1303 1304 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1305 delHeader("Connection") 1306 if w.req.ProtoAtLeast(1, 1) { 1307 setHeader.connection = "close" 1308 } 1309 } 1310 1311 w.conn.bufw.WriteString(statusLine(w.req, code)) 1312 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1313 setHeader.Write(w.conn.bufw) 1314 w.conn.bufw.Write(crlf) 1315 } 1316 1317 // foreachHeaderElement splits v according to the "#rule" construction 1318 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1319 func foreachHeaderElement(v string, fn func(string)) { 1320 v = textproto.TrimString(v) 1321 if v == "" { 1322 return 1323 } 1324 if !strings.Contains(v, ",") { 1325 fn(v) 1326 return 1327 } 1328 for _, f := range strings.Split(v, ",") { 1329 if f = textproto.TrimString(f); f != "" { 1330 fn(f) 1331 } 1332 } 1333 } 1334 1335 // statusLines is a cache of Status-Line strings, keyed by code (for 1336 // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a 1337 // map keyed by struct of two fields. This map's max size is bounded 1338 // by 2*len(statusText), two protocol types for each known official 1339 // status code in the statusText map. 1340 var ( 1341 statusMu sync.RWMutex 1342 statusLines = make(map[int]string) 1343 ) 1344 1345 // statusLine returns a response Status-Line (RFC 2616 Section 6.1) 1346 // for the given request and response status code. 1347 func statusLine(req *Request, code int) string { 1348 // Fast path: 1349 key := code 1350 proto11 := req.ProtoAtLeast(1, 1) 1351 if !proto11 { 1352 key = -key 1353 } 1354 statusMu.RLock() 1355 line, ok := statusLines[key] 1356 statusMu.RUnlock() 1357 if ok { 1358 return line 1359 } 1360 1361 // Slow path: 1362 proto := "HTTP/1.0" 1363 if proto11 { 1364 proto = "HTTP/1.1" 1365 } 1366 codestring := fmt.Sprintf("%03d", code) 1367 text, ok := statusText[code] 1368 if !ok { 1369 text = "status code " + codestring 1370 } 1371 line = proto + " " + codestring + " " + text + "\r\n" 1372 if ok { 1373 statusMu.Lock() 1374 defer statusMu.Unlock() 1375 statusLines[key] = line 1376 } 1377 return line 1378 } 1379 1380 // bodyAllowed reports whether a Write is allowed for this response type. 1381 // It's illegal to call this before the header has been flushed. 1382 func (w *response) bodyAllowed() bool { 1383 if !w.wroteHeader { 1384 panic("") 1385 } 1386 return bodyAllowedForStatus(w.status) 1387 } 1388 1389 // The Life Of A Write is like this: 1390 // 1391 // Handler starts. No header has been sent. The handler can either 1392 // write a header, or just start writing. Writing before sending a header 1393 // sends an implicitly empty 200 OK header. 1394 // 1395 // If the handler didn't declare a Content-Length up front, we either 1396 // go into chunking mode or, if the handler finishes running before 1397 // the chunking buffer size, we compute a Content-Length and send that 1398 // in the header instead. 1399 // 1400 // Likewise, if the handler didn't set a Content-Type, we sniff that 1401 // from the initial chunk of output. 1402 // 1403 // The Writers are wired together like: 1404 // 1405 // 1. *response (the ResponseWriter) -> 1406 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1407 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1408 // and which writes the chunk headers, if needed. 1409 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1410 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1411 // and populates c.werr with it if so. but otherwise writes to: 1412 // 6. the rwc, the net.Conn. 1413 // 1414 // TODO(bradfitz): short-circuit some of the buffering when the 1415 // initial header contains both a Content-Type and Content-Length. 1416 // Also short-circuit in (1) when the header's been sent and not in 1417 // chunking mode, writing directly to (4) instead, if (2) has no 1418 // buffered data. More generally, we could short-circuit from (1) to 1419 // (3) even in chunking mode if the write size from (1) is over some 1420 // threshold and nothing is in (2). The answer might be mostly making 1421 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1422 // with this instead. 1423 func (w *response) Write(data []byte) (n int, err error) { 1424 return w.write(len(data), data, "") 1425 } 1426 1427 func (w *response) WriteString(data string) (n int, err error) { 1428 return w.write(len(data), nil, data) 1429 } 1430 1431 // either dataB or dataS is non-zero. 1432 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1433 if w.conn.hijacked() { 1434 if lenData > 0 { 1435 w.conn.server.logf("http: response.Write on hijacked connection") 1436 } 1437 return 0, ErrHijacked 1438 } 1439 if !w.wroteHeader { 1440 w.WriteHeader(StatusOK) 1441 } 1442 if lenData == 0 { 1443 return 0, nil 1444 } 1445 if !w.bodyAllowed() { 1446 return 0, ErrBodyNotAllowed 1447 } 1448 1449 w.written += int64(lenData) // ignoring errors, for errorKludge 1450 if w.contentLength != -1 && w.written > w.contentLength { 1451 return 0, ErrContentLength 1452 } 1453 if dataB != nil { 1454 return w.w.Write(dataB) 1455 } else { 1456 return w.w.WriteString(dataS) 1457 } 1458 } 1459 1460 func (w *response) finishRequest() { 1461 w.handlerDone.setTrue() 1462 1463 if !w.wroteHeader { 1464 w.WriteHeader(StatusOK) 1465 } 1466 1467 w.w.Flush() 1468 putBufioWriter(w.w) 1469 w.cw.close() 1470 w.conn.bufw.Flush() 1471 1472 w.conn.r.abortPendingRead() 1473 1474 // Close the body (regardless of w.closeAfterReply) so we can 1475 // re-use its bufio.Reader later safely. 1476 w.reqBody.Close() 1477 1478 if w.req.MultipartForm != nil { 1479 w.req.MultipartForm.RemoveAll() 1480 } 1481 } 1482 1483 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1484 // It must only be called after the handler is done executing. 1485 func (w *response) shouldReuseConnection() bool { 1486 if w.closeAfterReply { 1487 // The request or something set while executing the 1488 // handler indicated we shouldn't reuse this 1489 // connection. 1490 return false 1491 } 1492 1493 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1494 // Did not write enough. Avoid getting out of sync. 1495 return false 1496 } 1497 1498 // There was some error writing to the underlying connection 1499 // during the request, so don't re-use this conn. 1500 if w.conn.werr != nil { 1501 return false 1502 } 1503 1504 if w.closedRequestBodyEarly() { 1505 return false 1506 } 1507 1508 return true 1509 } 1510 1511 func (w *response) closedRequestBodyEarly() bool { 1512 body, ok := w.req.Body.(*body) 1513 return ok && body.didEarlyClose() 1514 } 1515 1516 func (w *response) Flush() { 1517 if !w.wroteHeader { 1518 w.WriteHeader(StatusOK) 1519 } 1520 w.w.Flush() 1521 w.cw.flush() 1522 } 1523 1524 func (c *conn) finalFlush() { 1525 if c.bufr != nil { 1526 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1527 // reader for a future connection. 1528 putBufioReader(c.bufr) 1529 c.bufr = nil 1530 } 1531 1532 if c.bufw != nil { 1533 c.bufw.Flush() 1534 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1535 // writer for a future connection. 1536 putBufioWriter(c.bufw) 1537 c.bufw = nil 1538 } 1539 } 1540 1541 // Close the connection. 1542 func (c *conn) close() { 1543 c.finalFlush() 1544 c.rwc.Close() 1545 } 1546 1547 // rstAvoidanceDelay is the amount of time we sleep after closing the 1548 // write side of a TCP connection before closing the entire socket. 1549 // By sleeping, we increase the chances that the client sees our FIN 1550 // and processes its final data before they process the subsequent RST 1551 // from closing a connection with known unread data. 1552 // This RST seems to occur mostly on BSD systems. (And Windows?) 1553 // This timeout is somewhat arbitrary (~latency around the planet). 1554 const rstAvoidanceDelay = 500 * time.Millisecond 1555 1556 type closeWriter interface { 1557 CloseWrite() error 1558 } 1559 1560 var _ closeWriter = (*net.TCPConn)(nil) 1561 1562 // closeWrite flushes any outstanding data and sends a FIN packet (if 1563 // client is connected via TCP), signalling that we're done. We then 1564 // pause for a bit, hoping the client processes it before any 1565 // subsequent RST. 1566 // 1567 // See https://golang.org/issue/3595 1568 func (c *conn) closeWriteAndWait() { 1569 c.finalFlush() 1570 if tcp, ok := c.rwc.(closeWriter); ok { 1571 tcp.CloseWrite() 1572 } 1573 time.Sleep(rstAvoidanceDelay) 1574 } 1575 1576 // validNPN reports whether the proto is not a blacklisted Next 1577 // Protocol Negotiation protocol. Empty and built-in protocol types 1578 // are blacklisted and can't be overridden with alternate 1579 // implementations. 1580 func validNPN(proto string) bool { 1581 switch proto { 1582 case "", "http/1.1", "http/1.0": 1583 return false 1584 } 1585 return true 1586 } 1587 1588 func (c *conn) setState(nc net.Conn, state ConnState) { 1589 if hook := c.server.ConnState; hook != nil { 1590 hook(nc, state) 1591 } 1592 } 1593 1594 // badRequestError is a literal string (used by in the server in HTML, 1595 // unescaped) to tell the user why their request was bad. It should 1596 // be plain text without user info or other embedded errors. 1597 type badRequestError string 1598 1599 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1600 1601 // Serve a new connection. 1602 func (c *conn) serve(ctx context.Context) { 1603 c.remoteAddr = c.rwc.RemoteAddr().String() 1604 defer func() { 1605 if err := recover(); err != nil { 1606 const size = 64 << 10 1607 buf := make([]byte, size) 1608 buf = buf[:runtime.Stack(buf, false)] 1609 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1610 } 1611 if !c.hijacked() { 1612 c.close() 1613 c.setState(c.rwc, StateClosed) 1614 } 1615 }() 1616 1617 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1618 if d := c.server.ReadTimeout; d != 0 { 1619 c.rwc.SetReadDeadline(time.Now().Add(d)) 1620 } 1621 if d := c.server.WriteTimeout; d != 0 { 1622 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1623 } 1624 if err := tlsConn.Handshake(); err != nil { 1625 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1626 return 1627 } 1628 c.tlsState = new(tls.ConnectionState) 1629 *c.tlsState = tlsConn.ConnectionState() 1630 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1631 if fn := c.server.TLSNextProto[proto]; fn != nil { 1632 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1633 fn(c.server, tlsConn, h) 1634 } 1635 return 1636 } 1637 } 1638 1639 // HTTP/1.x from here on. 1640 1641 ctx, cancelCtx := context.WithCancel(ctx) 1642 c.cancelCtx = cancelCtx 1643 defer cancelCtx() 1644 1645 c.r = &connReader{conn: c} 1646 c.bufr = newBufioReader(c.r) 1647 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1648 1649 for { 1650 w, err := c.readRequest(ctx) 1651 if c.r.remain != c.server.initialReadLimitSize() { 1652 // If we read any bytes off the wire, we're active. 1653 c.setState(c.rwc, StateActive) 1654 } 1655 if err != nil { 1656 if err == errTooLarge { 1657 // Their HTTP client may or may not be 1658 // able to read this if we're 1659 // responding to them and hanging up 1660 // while they're still writing their 1661 // request. Undefined behavior. 1662 io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large") 1663 c.closeWriteAndWait() 1664 return 1665 } 1666 if err == io.EOF { 1667 return // don't reply 1668 } 1669 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1670 return // don't reply 1671 } 1672 var publicErr string 1673 if v, ok := err.(badRequestError); ok { 1674 publicErr = ": " + string(v) 1675 } 1676 io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr) 1677 return 1678 } 1679 1680 // Expect 100 Continue support 1681 req := w.req 1682 if req.expectsContinue() { 1683 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1684 // Wrap the Body reader with one that replies on the connection 1685 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1686 } 1687 } else if req.Header.get("Expect") != "" { 1688 w.sendExpectationFailed() 1689 return 1690 } 1691 1692 c.curReq.Store(w) 1693 1694 if requestBodyRemains(req.Body) { 1695 registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) 1696 } else { 1697 if w.conn.bufr.Buffered() > 0 { 1698 w.conn.r.closeNotifyFromPipelinedRequest() 1699 } 1700 w.conn.r.startBackgroundRead() 1701 } 1702 1703 // HTTP cannot have multiple simultaneous active requests.[*] 1704 // Until the server replies to this request, it can't read another, 1705 // so we might as well run the handler in this goroutine. 1706 // [*] Not strictly true: HTTP pipelining. We could let them all process 1707 // in parallel even if their responses need to be serialized. 1708 // But we're not going to implement HTTP pipelining because it 1709 // was never deployed in the wild and the answer is HTTP/2. 1710 serverHandler{c.server}.ServeHTTP(w, w.req) 1711 w.cancelCtx() 1712 if c.hijacked() { 1713 return 1714 } 1715 w.finishRequest() 1716 if !w.shouldReuseConnection() { 1717 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1718 c.closeWriteAndWait() 1719 } 1720 return 1721 } 1722 c.setState(c.rwc, StateIdle) 1723 c.curReq.Store((*response)(nil)) 1724 1725 if d := c.server.idleTimeout(); d != 0 { 1726 c.rwc.SetReadDeadline(time.Now().Add(d)) 1727 if _, err := c.bufr.Peek(4); err != nil { 1728 return 1729 } 1730 } 1731 c.rwc.SetReadDeadline(time.Time{}) 1732 } 1733 } 1734 1735 func (w *response) sendExpectationFailed() { 1736 // TODO(bradfitz): let ServeHTTP handlers handle 1737 // requests with non-standard expectation[s]? Seems 1738 // theoretical at best, and doesn't fit into the 1739 // current ServeHTTP model anyway. We'd need to 1740 // make the ResponseWriter an optional 1741 // "ExpectReplier" interface or something. 1742 // 1743 // For now we'll just obey RFC 2616 14.20 which says 1744 // "If a server receives a request containing an 1745 // Expect field that includes an expectation- 1746 // extension that it does not support, it MUST 1747 // respond with a 417 (Expectation Failed) status." 1748 w.Header().Set("Connection", "close") 1749 w.WriteHeader(StatusExpectationFailed) 1750 w.finishRequest() 1751 } 1752 1753 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1754 // and a Hijacker. 1755 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1756 if w.handlerDone.isSet() { 1757 panic("net/http: Hijack called after ServeHTTP finished") 1758 } 1759 if w.wroteHeader { 1760 w.cw.flush() 1761 } 1762 1763 c := w.conn 1764 c.mu.Lock() 1765 defer c.mu.Unlock() 1766 1767 // Release the bufioWriter that writes to the chunk writer, it is not 1768 // used after a connection has been hijacked. 1769 rwc, buf, err = c.hijackLocked() 1770 if err == nil { 1771 putBufioWriter(w.w) 1772 w.w = nil 1773 } 1774 return rwc, buf, err 1775 } 1776 1777 func (w *response) CloseNotify() <-chan bool { 1778 if w.handlerDone.isSet() { 1779 panic("net/http: CloseNotify called after ServeHTTP finished") 1780 } 1781 return w.closeNotifyCh 1782 } 1783 1784 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1785 switch v := rc.(type) { 1786 case *expectContinueReader: 1787 registerOnHitEOF(v.readCloser, fn) 1788 case *body: 1789 v.registerOnHitEOF(fn) 1790 default: 1791 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1792 } 1793 } 1794 1795 // requestBodyRemains reports whether future calls to Read 1796 // on rc might yield more data. 1797 func requestBodyRemains(rc io.ReadCloser) bool { 1798 if rc == NoBody { 1799 return false 1800 } 1801 switch v := rc.(type) { 1802 case *expectContinueReader: 1803 return requestBodyRemains(v.readCloser) 1804 case *body: 1805 return v.bodyRemains() 1806 default: 1807 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1808 } 1809 } 1810 1811 // The HandlerFunc type is an adapter to allow the use of 1812 // ordinary functions as HTTP handlers. If f is a function 1813 // with the appropriate signature, HandlerFunc(f) is a 1814 // Handler that calls f. 1815 type HandlerFunc func(ResponseWriter, *Request) 1816 1817 // ServeHTTP calls f(w, r). 1818 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1819 f(w, r) 1820 } 1821 1822 // Helper handlers 1823 1824 // Error replies to the request with the specified error message and HTTP code. 1825 // It does not otherwise end the request; the caller should ensure no further 1826 // writes are done to w. 1827 // The error message should be plain text. 1828 func Error(w ResponseWriter, error string, code int) { 1829 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1830 w.Header().Set("X-Content-Type-Options", "nosniff") 1831 w.WriteHeader(code) 1832 fmt.Fprintln(w, error) 1833 } 1834 1835 // NotFound replies to the request with an HTTP 404 not found error. 1836 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1837 1838 // NotFoundHandler returns a simple request handler 1839 // that replies to each request with a ``404 page not found'' reply. 1840 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1841 1842 // StripPrefix returns a handler that serves HTTP requests 1843 // by removing the given prefix from the request URL's Path 1844 // and invoking the handler h. StripPrefix handles a 1845 // request for a path that doesn't begin with prefix by 1846 // replying with an HTTP 404 not found error. 1847 func StripPrefix(prefix string, h Handler) Handler { 1848 if prefix == "" { 1849 return h 1850 } 1851 return HandlerFunc(func(w ResponseWriter, r *Request) { 1852 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1853 r.URL.Path = p 1854 h.ServeHTTP(w, r) 1855 } else { 1856 NotFound(w, r) 1857 } 1858 }) 1859 } 1860 1861 // Redirect replies to the request with a redirect to url, 1862 // which may be a path relative to the request path. 1863 // 1864 // The provided code should be in the 3xx range and is usually 1865 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1866 func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { 1867 if u, err := url.Parse(urlStr); err == nil { 1868 // If url was relative, make absolute by 1869 // combining with request path. 1870 // The browser would probably do this for us, 1871 // but doing it ourselves is more reliable. 1872 1873 // NOTE(rsc): RFC 2616 says that the Location 1874 // line must be an absolute URI, like 1875 // "http://www.google.com/redirect/", 1876 // not a path like "/redirect/". 1877 // Unfortunately, we don't know what to 1878 // put in the host name section to get the 1879 // client to connect to us again, so we can't 1880 // know the right absolute URI to send back. 1881 // Because of this problem, no one pays attention 1882 // to the RFC; they all send back just a new path. 1883 // So do we. 1884 if u.Scheme == "" && u.Host == "" { 1885 oldpath := r.URL.Path 1886 if oldpath == "" { // should not happen, but avoid a crash if it does 1887 oldpath = "/" 1888 } 1889 1890 // no leading http://server 1891 if urlStr == "" || urlStr[0] != '/' { 1892 // make relative path absolute 1893 olddir, _ := path.Split(oldpath) 1894 urlStr = olddir + urlStr 1895 } 1896 1897 var query string 1898 if i := strings.Index(urlStr, "?"); i != -1 { 1899 urlStr, query = urlStr[:i], urlStr[i:] 1900 } 1901 1902 // clean up but preserve trailing slash 1903 trailing := strings.HasSuffix(urlStr, "/") 1904 urlStr = path.Clean(urlStr) 1905 if trailing && !strings.HasSuffix(urlStr, "/") { 1906 urlStr += "/" 1907 } 1908 urlStr += query 1909 } 1910 } 1911 1912 w.Header().Set("Location", hexEscapeNonASCII(urlStr)) 1913 w.WriteHeader(code) 1914 1915 // RFC 2616 recommends that a short note "SHOULD" be included in the 1916 // response because older user agents may not understand 301/307. 1917 // Shouldn't send the response for POST or HEAD; that leaves GET. 1918 if r.Method == "GET" { 1919 note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" 1920 fmt.Fprintln(w, note) 1921 } 1922 } 1923 1924 var htmlReplacer = strings.NewReplacer( 1925 "&", "&", 1926 "<", "<", 1927 ">", ">", 1928 // """ is shorter than """. 1929 `"`, """, 1930 // "'" is shorter than "'" and apos was not in HTML until HTML5. 1931 "'", "'", 1932 ) 1933 1934 func htmlEscape(s string) string { 1935 return htmlReplacer.Replace(s) 1936 } 1937 1938 // Redirect to a fixed URL 1939 type redirectHandler struct { 1940 url string 1941 code int 1942 } 1943 1944 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 1945 Redirect(w, r, rh.url, rh.code) 1946 } 1947 1948 // RedirectHandler returns a request handler that redirects 1949 // each request it receives to the given url using the given 1950 // status code. 1951 // 1952 // The provided code should be in the 3xx range and is usually 1953 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1954 func RedirectHandler(url string, code int) Handler { 1955 return &redirectHandler{url, code} 1956 } 1957 1958 // ServeMux is an HTTP request multiplexer. 1959 // It matches the URL of each incoming request against a list of registered 1960 // patterns and calls the handler for the pattern that 1961 // most closely matches the URL. 1962 // 1963 // Patterns name fixed, rooted paths, like "/favicon.ico", 1964 // or rooted subtrees, like "/images/" (note the trailing slash). 1965 // Longer patterns take precedence over shorter ones, so that 1966 // if there are handlers registered for both "/images/" 1967 // and "/images/thumbnails/", the latter handler will be 1968 // called for paths beginning "/images/thumbnails/" and the 1969 // former will receive requests for any other paths in the 1970 // "/images/" subtree. 1971 // 1972 // Note that since a pattern ending in a slash names a rooted subtree, 1973 // the pattern "/" matches all paths not matched by other registered 1974 // patterns, not just the URL with Path == "/". 1975 // 1976 // If a subtree has been registered and a request is received naming the 1977 // subtree root without its trailing slash, ServeMux redirects that 1978 // request to the subtree root (adding the trailing slash). This behavior can 1979 // be overridden with a separate registration for the path without 1980 // the trailing slash. For example, registering "/images/" causes ServeMux 1981 // to redirect a request for "/images" to "/images/", unless "/images" has 1982 // been registered separately. 1983 // 1984 // Patterns may optionally begin with a host name, restricting matches to 1985 // URLs on that host only. Host-specific patterns take precedence over 1986 // general patterns, so that a handler might register for the two patterns 1987 // "/codesearch" and "codesearch.google.com/" without also taking over 1988 // requests for "http://www.google.com/". 1989 // 1990 // ServeMux also takes care of sanitizing the URL request path, 1991 // redirecting any request containing . or .. elements or repeated slashes 1992 // to an equivalent, cleaner URL. 1993 type ServeMux struct { 1994 mu sync.RWMutex 1995 m map[string]muxEntry 1996 hosts bool // whether any patterns contain hostnames 1997 } 1998 1999 type muxEntry struct { 2000 explicit bool 2001 h Handler 2002 pattern string 2003 } 2004 2005 // NewServeMux allocates and returns a new ServeMux. 2006 func NewServeMux() *ServeMux { return new(ServeMux) } 2007 2008 // DefaultServeMux is the default ServeMux used by Serve. 2009 var DefaultServeMux = &defaultServeMux 2010 2011 var defaultServeMux ServeMux 2012 2013 // Does path match pattern? 2014 func pathMatch(pattern, path string) bool { 2015 if len(pattern) == 0 { 2016 // should not happen 2017 return false 2018 } 2019 n := len(pattern) 2020 if pattern[n-1] != '/' { 2021 return pattern == path 2022 } 2023 return len(path) >= n && path[0:n] == pattern 2024 } 2025 2026 // Return the canonical path for p, eliminating . and .. elements. 2027 func cleanPath(p string) string { 2028 if p == "" { 2029 return "/" 2030 } 2031 if p[0] != '/' { 2032 p = "/" + p 2033 } 2034 np := path.Clean(p) 2035 // path.Clean removes trailing slash except for root; 2036 // put the trailing slash back if necessary. 2037 if p[len(p)-1] == '/' && np != "/" { 2038 np += "/" 2039 } 2040 return np 2041 } 2042 2043 // Find a handler on a handler map given a path string 2044 // Most-specific (longest) pattern wins 2045 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 2046 var n = 0 2047 for k, v := range mux.m { 2048 if !pathMatch(k, path) { 2049 continue 2050 } 2051 if h == nil || len(k) > n { 2052 n = len(k) 2053 h = v.h 2054 pattern = v.pattern 2055 } 2056 } 2057 return 2058 } 2059 2060 // Handler returns the handler to use for the given request, 2061 // consulting r.Method, r.Host, and r.URL.Path. It always returns 2062 // a non-nil handler. If the path is not in its canonical form, the 2063 // handler will be an internally-generated handler that redirects 2064 // to the canonical path. 2065 // 2066 // Handler also returns the registered pattern that matches the 2067 // request or, in the case of internally-generated redirects, 2068 // the pattern that will match after following the redirect. 2069 // 2070 // If there is no registered handler that applies to the request, 2071 // Handler returns a ``page not found'' handler and an empty pattern. 2072 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 2073 if r.Method != "CONNECT" { 2074 if p := cleanPath(r.URL.Path); p != r.URL.Path { 2075 _, pattern = mux.handler(r.Host, p) 2076 url := *r.URL 2077 url.Path = p 2078 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 2079 } 2080 } 2081 2082 return mux.handler(r.Host, r.URL.Path) 2083 } 2084 2085 // handler is the main implementation of Handler. 2086 // The path is known to be in canonical form, except for CONNECT methods. 2087 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 2088 mux.mu.RLock() 2089 defer mux.mu.RUnlock() 2090 2091 // Host-specific pattern takes precedence over generic ones 2092 if mux.hosts { 2093 h, pattern = mux.match(host + path) 2094 } 2095 if h == nil { 2096 h, pattern = mux.match(path) 2097 } 2098 if h == nil { 2099 h, pattern = NotFoundHandler(), "" 2100 } 2101 return 2102 } 2103 2104 // ServeHTTP dispatches the request to the handler whose 2105 // pattern most closely matches the request URL. 2106 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 2107 if r.RequestURI == "*" { 2108 if r.ProtoAtLeast(1, 1) { 2109 w.Header().Set("Connection", "close") 2110 } 2111 w.WriteHeader(StatusBadRequest) 2112 return 2113 } 2114 h, _ := mux.Handler(r) 2115 h.ServeHTTP(w, r) 2116 } 2117 2118 // Handle registers the handler for the given pattern. 2119 // If a handler already exists for pattern, Handle panics. 2120 func (mux *ServeMux) Handle(pattern string, handler Handler) { 2121 mux.mu.Lock() 2122 defer mux.mu.Unlock() 2123 2124 if pattern == "" { 2125 panic("http: invalid pattern " + pattern) 2126 } 2127 if handler == nil { 2128 panic("http: nil handler") 2129 } 2130 if mux.m[pattern].explicit { 2131 panic("http: multiple registrations for " + pattern) 2132 } 2133 2134 if mux.m == nil { 2135 mux.m = make(map[string]muxEntry) 2136 } 2137 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 2138 2139 if pattern[0] != '/' { 2140 mux.hosts = true 2141 } 2142 2143 // Helpful behavior: 2144 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 2145 // It can be overridden by an explicit registration. 2146 n := len(pattern) 2147 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 2148 // If pattern contains a host name, strip it and use remaining 2149 // path for redirect. 2150 path := pattern 2151 if pattern[0] != '/' { 2152 // In pattern, at least the last character is a '/', so 2153 // strings.Index can't be -1. 2154 path = pattern[strings.Index(pattern, "/"):] 2155 } 2156 url := &url.URL{Path: path} 2157 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 2158 } 2159 } 2160 2161 // HandleFunc registers the handler function for the given pattern. 2162 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2163 mux.Handle(pattern, HandlerFunc(handler)) 2164 } 2165 2166 // Handle registers the handler for the given pattern 2167 // in the DefaultServeMux. 2168 // The documentation for ServeMux explains how patterns are matched. 2169 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 2170 2171 // HandleFunc registers the handler function for the given pattern 2172 // in the DefaultServeMux. 2173 // The documentation for ServeMux explains how patterns are matched. 2174 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 2175 DefaultServeMux.HandleFunc(pattern, handler) 2176 } 2177 2178 // Serve accepts incoming HTTP connections on the listener l, 2179 // creating a new service goroutine for each. The service goroutines 2180 // read requests and then call handler to reply to them. 2181 // Handler is typically nil, in which case the DefaultServeMux is used. 2182 func Serve(l net.Listener, handler Handler) error { 2183 srv := &Server{Handler: handler} 2184 return srv.Serve(l) 2185 } 2186 2187 // A Server defines parameters for running an HTTP server. 2188 // The zero value for Server is a valid configuration. 2189 type Server struct { 2190 Addr string // TCP address to listen on, ":http" if empty 2191 Handler Handler // handler to invoke, http.DefaultServeMux if nil 2192 TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS 2193 2194 // ReadTimeout is the maximum duration for reading the entire 2195 // request, including the body. 2196 // 2197 // Because ReadTimeout does not let Handlers make per-request 2198 // decisions on each request body's acceptable deadline or 2199 // upload rate, most users will prefer to use 2200 // ReadHeaderTimeout. It is valid to use them both. 2201 ReadTimeout time.Duration 2202 2203 // ReadHeaderTimeout is the amount of time allowed to read 2204 // request headers. The connection's read deadline is reset 2205 // after reading the headers and the Handler can decide what 2206 // is considered too slow for the body. 2207 ReadHeaderTimeout time.Duration 2208 2209 // WriteTimeout is the maximum duration before timing out 2210 // writes of the response. It is reset whenever a new 2211 // request's header is read. Like ReadTimeout, it does not 2212 // let Handlers make decisions on a per-request basis. 2213 WriteTimeout time.Duration 2214 2215 // IdleTimeout is the maximum amount of time to wait for the 2216 // next request when keep-alives are enabled. If IdleTimeout 2217 // is zero, the value of ReadTimeout is used. If both are 2218 // zero, there is no timeout. 2219 IdleTimeout time.Duration 2220 2221 // MaxHeaderBytes controls the maximum number of bytes the 2222 // server will read parsing the request header's keys and 2223 // values, including the request line. It does not limit the 2224 // size of the request body. 2225 // If zero, DefaultMaxHeaderBytes is used. 2226 MaxHeaderBytes int 2227 2228 // TLSNextProto optionally specifies a function to take over 2229 // ownership of the provided TLS connection when an NPN/ALPN 2230 // protocol upgrade has occurred. The map key is the protocol 2231 // name negotiated. The Handler argument should be used to 2232 // handle HTTP requests and will initialize the Request's TLS 2233 // and RemoteAddr if not already set. The connection is 2234 // automatically closed when the function returns. 2235 // If TLSNextProto is nil, HTTP/2 support is enabled automatically. 2236 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 2237 2238 // ConnState specifies an optional callback function that is 2239 // called when a client connection changes state. See the 2240 // ConnState type and associated constants for details. 2241 ConnState func(net.Conn, ConnState) 2242 2243 // ErrorLog specifies an optional logger for errors accepting 2244 // connections and unexpected behavior from handlers. 2245 // If nil, logging goes to os.Stderr via the log package's 2246 // standard logger. 2247 ErrorLog *log.Logger 2248 2249 disableKeepAlives int32 // accessed atomically. 2250 nextProtoOnce sync.Once // guards setupHTTP2_* init 2251 nextProtoErr error // result of http2.ConfigureServer if used 2252 } 2253 2254 // A ConnState represents the state of a client connection to a server. 2255 // It's used by the optional Server.ConnState hook. 2256 type ConnState int 2257 2258 const ( 2259 // StateNew represents a new connection that is expected to 2260 // send a request immediately. Connections begin at this 2261 // state and then transition to either StateActive or 2262 // StateClosed. 2263 StateNew ConnState = iota 2264 2265 // StateActive represents a connection that has read 1 or more 2266 // bytes of a request. The Server.ConnState hook for 2267 // StateActive fires before the request has entered a handler 2268 // and doesn't fire again until the request has been 2269 // handled. After the request is handled, the state 2270 // transitions to StateClosed, StateHijacked, or StateIdle. 2271 // For HTTP/2, StateActive fires on the transition from zero 2272 // to one active request, and only transitions away once all 2273 // active requests are complete. That means that ConnState 2274 // cannot be used to do per-request work; ConnState only notes 2275 // the overall state of the connection. 2276 StateActive 2277 2278 // StateIdle represents a connection that has finished 2279 // handling a request and is in the keep-alive state, waiting 2280 // for a new request. Connections transition from StateIdle 2281 // to either StateActive or StateClosed. 2282 StateIdle 2283 2284 // StateHijacked represents a hijacked connection. 2285 // This is a terminal state. It does not transition to StateClosed. 2286 StateHijacked 2287 2288 // StateClosed represents a closed connection. 2289 // This is a terminal state. Hijacked connections do not 2290 // transition to StateClosed. 2291 StateClosed 2292 ) 2293 2294 var stateName = map[ConnState]string{ 2295 StateNew: "new", 2296 StateActive: "active", 2297 StateIdle: "idle", 2298 StateHijacked: "hijacked", 2299 StateClosed: "closed", 2300 } 2301 2302 func (c ConnState) String() string { 2303 return stateName[c] 2304 } 2305 2306 // serverHandler delegates to either the server's Handler or 2307 // DefaultServeMux and also handles "OPTIONS *" requests. 2308 type serverHandler struct { 2309 srv *Server 2310 } 2311 2312 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2313 handler := sh.srv.Handler 2314 if handler == nil { 2315 handler = DefaultServeMux 2316 } 2317 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2318 handler = globalOptionsHandler{} 2319 } 2320 handler.ServeHTTP(rw, req) 2321 } 2322 2323 // ListenAndServe listens on the TCP network address srv.Addr and then 2324 // calls Serve to handle requests on incoming connections. 2325 // Accepted connections are configured to enable TCP keep-alives. 2326 // If srv.Addr is blank, ":http" is used. 2327 // ListenAndServe always returns a non-nil error. 2328 func (srv *Server) ListenAndServe() error { 2329 addr := srv.Addr 2330 if addr == "" { 2331 addr = ":http" 2332 } 2333 ln, err := net.Listen("tcp", addr) 2334 if err != nil { 2335 return err 2336 } 2337 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2338 } 2339 2340 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2341 2342 // shouldDoServeHTTP2 reports whether Server.Serve should configure 2343 // automatic HTTP/2. (which sets up the srv.TLSNextProto map) 2344 func (srv *Server) shouldConfigureHTTP2ForServe() bool { 2345 if srv.TLSConfig == nil { 2346 // Compatibility with Go 1.6: 2347 // If there's no TLSConfig, it's possible that the user just 2348 // didn't set it on the http.Server, but did pass it to 2349 // tls.NewListener and passed that listener to Serve. 2350 // So we should configure HTTP/2 (to set up srv.TLSNextProto) 2351 // in case the listener returns an "h2" *tls.Conn. 2352 return true 2353 } 2354 // The user specified a TLSConfig on their http.Server. 2355 // In this, case, only configure HTTP/2 if their tls.Config 2356 // explicitly mentions "h2". Otherwise http2.ConfigureServer 2357 // would modify the tls.Config to add it, but they probably already 2358 // passed this tls.Config to tls.NewListener. And if they did, 2359 // it's too late anyway to fix it. It would only be potentially racy. 2360 // See Issue 15908. 2361 return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) 2362 } 2363 2364 // Serve accepts incoming connections on the Listener l, creating a 2365 // new service goroutine for each. The service goroutines read requests and 2366 // then call srv.Handler to reply to them. 2367 // 2368 // For HTTP/2 support, srv.TLSConfig should be initialized to the 2369 // provided listener's TLS Config before calling Serve. If 2370 // srv.TLSConfig is non-nil and doesn't include the string "h2" in 2371 // Config.NextProtos, HTTP/2 support is not enabled. 2372 // 2373 // Serve always returns a non-nil error. 2374 func (srv *Server) Serve(l net.Listener) error { 2375 defer l.Close() 2376 if fn := testHookServerServe; fn != nil { 2377 fn(srv, l) 2378 } 2379 var tempDelay time.Duration // how long to sleep on accept failure 2380 2381 if err := srv.setupHTTP2_Serve(); err != nil { 2382 return err 2383 } 2384 2385 baseCtx := context.Background() // base is always background, per Issue 16220 2386 ctx := context.WithValue(baseCtx, ServerContextKey, srv) 2387 ctx = context.WithValue(ctx, LocalAddrContextKey, l.Addr()) 2388 for { 2389 rw, e := l.Accept() 2390 if e != nil { 2391 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2392 if tempDelay == 0 { 2393 tempDelay = 5 * time.Millisecond 2394 } else { 2395 tempDelay *= 2 2396 } 2397 if max := 1 * time.Second; tempDelay > max { 2398 tempDelay = max 2399 } 2400 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2401 time.Sleep(tempDelay) 2402 continue 2403 } 2404 return e 2405 } 2406 tempDelay = 0 2407 c := srv.newConn(rw) 2408 c.setState(c.rwc, StateNew) // before Serve can return 2409 go c.serve(ctx) 2410 } 2411 } 2412 2413 func (s *Server) idleTimeout() time.Duration { 2414 if s.IdleTimeout != 0 { 2415 return s.IdleTimeout 2416 } 2417 return s.ReadTimeout 2418 } 2419 2420 func (s *Server) readHeaderTimeout() time.Duration { 2421 if s.ReadHeaderTimeout != 0 { 2422 return s.ReadHeaderTimeout 2423 } 2424 return s.ReadTimeout 2425 } 2426 2427 func (s *Server) doKeepAlives() bool { 2428 return atomic.LoadInt32(&s.disableKeepAlives) == 0 2429 } 2430 2431 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2432 // By default, keep-alives are always enabled. Only very 2433 // resource-constrained environments or servers in the process of 2434 // shutting down should disable them. 2435 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2436 if v { 2437 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2438 } else { 2439 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2440 } 2441 } 2442 2443 func (s *Server) logf(format string, args ...interface{}) { 2444 if s.ErrorLog != nil { 2445 s.ErrorLog.Printf(format, args...) 2446 } else { 2447 log.Printf(format, args...) 2448 } 2449 } 2450 2451 // ListenAndServe listens on the TCP network address addr 2452 // and then calls Serve with handler to handle requests 2453 // on incoming connections. 2454 // Accepted connections are configured to enable TCP keep-alives. 2455 // Handler is typically nil, in which case the DefaultServeMux is 2456 // used. 2457 // 2458 // A trivial example server is: 2459 // 2460 // package main 2461 // 2462 // import ( 2463 // "io" 2464 // "net/http" 2465 // "log" 2466 // ) 2467 // 2468 // // hello world, the web server 2469 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2470 // io.WriteString(w, "hello, world!\n") 2471 // } 2472 // 2473 // func main() { 2474 // http.HandleFunc("/hello", HelloServer) 2475 // log.Fatal(http.ListenAndServe(":12345", nil)) 2476 // } 2477 // 2478 // ListenAndServe always returns a non-nil error. 2479 func ListenAndServe(addr string, handler Handler) error { 2480 server := &Server{Addr: addr, Handler: handler} 2481 return server.ListenAndServe() 2482 } 2483 2484 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2485 // expects HTTPS connections. Additionally, files containing a certificate and 2486 // matching private key for the server must be provided. If the certificate 2487 // is signed by a certificate authority, the certFile should be the concatenation 2488 // of the server's certificate, any intermediates, and the CA's certificate. 2489 // 2490 // A trivial example server is: 2491 // 2492 // import ( 2493 // "log" 2494 // "net/http" 2495 // ) 2496 // 2497 // func handler(w http.ResponseWriter, req *http.Request) { 2498 // w.Header().Set("Content-Type", "text/plain") 2499 // w.Write([]byte("This is an example server.\n")) 2500 // } 2501 // 2502 // func main() { 2503 // http.HandleFunc("/", handler) 2504 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2505 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2506 // log.Fatal(err) 2507 // } 2508 // 2509 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2510 // 2511 // ListenAndServeTLS always returns a non-nil error. 2512 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2513 server := &Server{Addr: addr, Handler: handler} 2514 return server.ListenAndServeTLS(certFile, keyFile) 2515 } 2516 2517 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2518 // then calls Serve to handle requests on incoming TLS connections. 2519 // Accepted connections are configured to enable TCP keep-alives. 2520 // 2521 // Filenames containing a certificate and matching private key for the 2522 // server must be provided if neither the Server's TLSConfig.Certificates 2523 // nor TLSConfig.GetCertificate are populated. If the certificate is 2524 // signed by a certificate authority, the certFile should be the 2525 // concatenation of the server's certificate, any intermediates, and 2526 // the CA's certificate. 2527 // 2528 // If srv.Addr is blank, ":https" is used. 2529 // 2530 // ListenAndServeTLS always returns a non-nil error. 2531 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2532 addr := srv.Addr 2533 if addr == "" { 2534 addr = ":https" 2535 } 2536 2537 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2538 // before we clone it and create the TLS Listener. 2539 if err := srv.setupHTTP2_ListenAndServeTLS(); err != nil { 2540 return err 2541 } 2542 2543 config := cloneTLSConfig(srv.TLSConfig) 2544 if !strSliceContains(config.NextProtos, "http/1.1") { 2545 config.NextProtos = append(config.NextProtos, "http/1.1") 2546 } 2547 2548 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2549 if !configHasCert || certFile != "" || keyFile != "" { 2550 var err error 2551 config.Certificates = make([]tls.Certificate, 1) 2552 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2553 if err != nil { 2554 return err 2555 } 2556 } 2557 2558 ln, err := net.Listen("tcp", addr) 2559 if err != nil { 2560 return err 2561 } 2562 2563 tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) 2564 return srv.Serve(tlsListener) 2565 } 2566 2567 // setupHTTP2_ListenAndServeTLS conditionally configures HTTP/2 on 2568 // srv and returns whether there was an error setting it up. If it is 2569 // not configured for policy reasons, nil is returned. 2570 func (srv *Server) setupHTTP2_ListenAndServeTLS() error { 2571 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2572 return srv.nextProtoErr 2573 } 2574 2575 // setupHTTP2_Serve is called from (*Server).Serve and conditionally 2576 // configures HTTP/2 on srv using a more conservative policy than 2577 // setupHTTP2_ListenAndServeTLS because Serve may be called 2578 // concurrently. 2579 // 2580 // The tests named TestTransportAutomaticHTTP2* and 2581 // TestConcurrentServerServe in server_test.go demonstrate some 2582 // of the supported use cases and motivations. 2583 func (srv *Server) setupHTTP2_Serve() error { 2584 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) 2585 return srv.nextProtoErr 2586 } 2587 2588 func (srv *Server) onceSetNextProtoDefaults_Serve() { 2589 if srv.shouldConfigureHTTP2ForServe() { 2590 srv.onceSetNextProtoDefaults() 2591 } 2592 } 2593 2594 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2595 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2596 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). 2597 func (srv *Server) onceSetNextProtoDefaults() { 2598 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 2599 return 2600 } 2601 // Enable HTTP/2 by default if the user hasn't otherwise 2602 // configured their TLSNextProto map. 2603 if srv.TLSNextProto == nil { 2604 srv.nextProtoErr = http2ConfigureServer(srv, nil) 2605 } 2606 } 2607 2608 // TimeoutHandler returns a Handler that runs h with the given time limit. 2609 // 2610 // The new Handler calls h.ServeHTTP to handle each request, but if a 2611 // call runs for longer than its time limit, the handler responds with 2612 // a 503 Service Unavailable error and the given message in its body. 2613 // (If msg is empty, a suitable default message will be sent.) 2614 // After such a timeout, writes by h to its ResponseWriter will return 2615 // ErrHandlerTimeout. 2616 // 2617 // TimeoutHandler buffers all Handler writes to memory and does not 2618 // support the Hijacker or Flusher interfaces. 2619 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 2620 return &timeoutHandler{ 2621 handler: h, 2622 body: msg, 2623 dt: dt, 2624 } 2625 } 2626 2627 // ErrHandlerTimeout is returned on ResponseWriter Write calls 2628 // in handlers which have timed out. 2629 var ErrHandlerTimeout = errors.New("http: Handler timeout") 2630 2631 type timeoutHandler struct { 2632 handler Handler 2633 body string 2634 dt time.Duration 2635 2636 // When set, no timer will be created and this channel will 2637 // be used instead. 2638 testTimeout <-chan time.Time 2639 } 2640 2641 func (h *timeoutHandler) errorBody() string { 2642 if h.body != "" { 2643 return h.body 2644 } 2645 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 2646 } 2647 2648 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 2649 var t *time.Timer 2650 timeout := h.testTimeout 2651 if timeout == nil { 2652 t = time.NewTimer(h.dt) 2653 timeout = t.C 2654 } 2655 done := make(chan struct{}) 2656 tw := &timeoutWriter{ 2657 w: w, 2658 h: make(Header), 2659 } 2660 go func() { 2661 h.handler.ServeHTTP(tw, r) 2662 close(done) 2663 }() 2664 select { 2665 case <-done: 2666 tw.mu.Lock() 2667 defer tw.mu.Unlock() 2668 dst := w.Header() 2669 for k, vv := range tw.h { 2670 dst[k] = vv 2671 } 2672 if !tw.wroteHeader { 2673 tw.code = StatusOK 2674 } 2675 w.WriteHeader(tw.code) 2676 w.Write(tw.wbuf.Bytes()) 2677 if t != nil { 2678 t.Stop() 2679 } 2680 case <-timeout: 2681 tw.mu.Lock() 2682 defer tw.mu.Unlock() 2683 w.WriteHeader(StatusServiceUnavailable) 2684 io.WriteString(w, h.errorBody()) 2685 tw.timedOut = true 2686 return 2687 } 2688 } 2689 2690 type timeoutWriter struct { 2691 w ResponseWriter 2692 h Header 2693 wbuf bytes.Buffer 2694 2695 mu sync.Mutex 2696 timedOut bool 2697 wroteHeader bool 2698 code int 2699 } 2700 2701 func (tw *timeoutWriter) Header() Header { return tw.h } 2702 2703 func (tw *timeoutWriter) Write(p []byte) (int, error) { 2704 tw.mu.Lock() 2705 defer tw.mu.Unlock() 2706 if tw.timedOut { 2707 return 0, ErrHandlerTimeout 2708 } 2709 if !tw.wroteHeader { 2710 tw.writeHeader(StatusOK) 2711 } 2712 return tw.wbuf.Write(p) 2713 } 2714 2715 func (tw *timeoutWriter) WriteHeader(code int) { 2716 tw.mu.Lock() 2717 defer tw.mu.Unlock() 2718 if tw.timedOut || tw.wroteHeader { 2719 return 2720 } 2721 tw.writeHeader(code) 2722 } 2723 2724 func (tw *timeoutWriter) writeHeader(code int) { 2725 tw.wroteHeader = true 2726 tw.code = code 2727 } 2728 2729 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 2730 // connections. It's used by ListenAndServe and ListenAndServeTLS so 2731 // dead TCP connections (e.g. closing laptop mid-download) eventually 2732 // go away. 2733 type tcpKeepAliveListener struct { 2734 *net.TCPListener 2735 } 2736 2737 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 2738 tc, err := ln.AcceptTCP() 2739 if err != nil { 2740 return 2741 } 2742 tc.SetKeepAlive(true) 2743 tc.SetKeepAlivePeriod(3 * time.Minute) 2744 return tc, nil 2745 } 2746 2747 // globalOptionsHandler responds to "OPTIONS *" requests. 2748 type globalOptionsHandler struct{} 2749 2750 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 2751 w.Header().Set("Content-Length", "0") 2752 if r.ContentLength != 0 { 2753 // Read up to 4KB of OPTIONS body (as mentioned in the 2754 // spec as being reserved for future use), but anything 2755 // over that is considered a waste of server resources 2756 // (or an attack) and we abort and close the connection, 2757 // courtesy of MaxBytesReader's EOF behavior. 2758 mb := MaxBytesReader(w, r.Body, 4<<10) 2759 io.Copy(ioutil.Discard, mb) 2760 } 2761 } 2762 2763 // initNPNRequest is an HTTP handler that initializes certain 2764 // uninitialized fields in its *Request. Such partially-initialized 2765 // Requests come from NPN protocol handlers. 2766 type initNPNRequest struct { 2767 c *tls.Conn 2768 h serverHandler 2769 } 2770 2771 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 2772 if req.TLS == nil { 2773 req.TLS = &tls.ConnectionState{} 2774 *req.TLS = h.c.ConnectionState() 2775 } 2776 if req.Body == nil { 2777 req.Body = NoBody 2778 } 2779 if req.RemoteAddr == "" { 2780 req.RemoteAddr = h.c.RemoteAddr().String() 2781 } 2782 h.h.ServeHTTP(rw, req) 2783 } 2784 2785 // loggingConn is used for debugging. 2786 type loggingConn struct { 2787 name string 2788 net.Conn 2789 } 2790 2791 var ( 2792 uniqNameMu sync.Mutex 2793 uniqNameNext = make(map[string]int) 2794 ) 2795 2796 func newLoggingConn(baseName string, c net.Conn) net.Conn { 2797 uniqNameMu.Lock() 2798 defer uniqNameMu.Unlock() 2799 uniqNameNext[baseName]++ 2800 return &loggingConn{ 2801 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 2802 Conn: c, 2803 } 2804 } 2805 2806 func (c *loggingConn) Write(p []byte) (n int, err error) { 2807 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 2808 n, err = c.Conn.Write(p) 2809 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 2810 return 2811 } 2812 2813 func (c *loggingConn) Read(p []byte) (n int, err error) { 2814 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 2815 n, err = c.Conn.Read(p) 2816 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 2817 return 2818 } 2819 2820 func (c *loggingConn) Close() (err error) { 2821 log.Printf("%s.Close() = ...", c.name) 2822 err = c.Conn.Close() 2823 log.Printf("%s.Close() = %v", c.name, err) 2824 return 2825 } 2826 2827 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 2828 // It only contains one field (and a pointer field at that), so it 2829 // fits in an interface value without an extra allocation. 2830 type checkConnErrorWriter struct { 2831 c *conn 2832 } 2833 2834 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 2835 n, err = w.c.rwc.Write(p) 2836 if err != nil && w.c.werr == nil { 2837 w.c.werr = err 2838 w.c.cancelCtx() 2839 } 2840 return 2841 } 2842 2843 func numLeadingCRorLF(v []byte) (n int) { 2844 for _, b := range v { 2845 if b == '\r' || b == '\n' { 2846 n++ 2847 continue 2848 } 2849 break 2850 } 2851 return 2852 2853 } 2854 2855 func strSliceContains(ss []string, s string) bool { 2856 for _, v := range ss { 2857 if v == s { 2858 return true 2859 } 2860 } 2861 return false 2862 }