github.com/aloncn/graphics-go@v0.0.1/src/net/http/server.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // HTTP server. See RFC 2616. 6 7 package http 8 9 import ( 10 "bufio" 11 "bytes" 12 "crypto/tls" 13 "errors" 14 "fmt" 15 "io" 16 "io/ioutil" 17 "log" 18 "net" 19 "net/textproto" 20 "net/url" 21 "os" 22 "path" 23 "runtime" 24 "strconv" 25 "strings" 26 "sync" 27 "sync/atomic" 28 "time" 29 ) 30 31 // Errors introduced by the HTTP server. 32 var ( 33 ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") 34 ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") 35 ErrHijacked = errors.New("Conn has been hijacked") 36 ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") 37 ) 38 39 // A Handler responds to an HTTP request. 40 // 41 // ServeHTTP should write reply headers and data to the ResponseWriter 42 // and then return. Returning signals that the request is finished; it 43 // is not valid to use the ResponseWriter or read from the 44 // Request.Body after or concurrently with the completion of the 45 // ServeHTTP call. 46 // 47 // Depending on the HTTP client software, HTTP protocol version, and 48 // any intermediaries between the client and the Go server, it may not 49 // be possible to read from the Request.Body after writing to the 50 // ResponseWriter. Cautious handlers should read the Request.Body 51 // first, and then reply. 52 // 53 // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes 54 // that the effect of the panic was isolated to the active request. 55 // It recovers the panic, logs a stack trace to the server error log, 56 // and hangs up the connection. 57 type Handler interface { 58 ServeHTTP(ResponseWriter, *Request) 59 } 60 61 // A ResponseWriter interface is used by an HTTP handler to 62 // construct an HTTP response. 63 // 64 // A ResponseWriter may not be used after the Handler.ServeHTTP method 65 // has returned. 66 type ResponseWriter interface { 67 // Header returns the header map that will be sent by 68 // WriteHeader. Changing the header after a call to 69 // WriteHeader (or Write) has no effect unless the modified 70 // headers were declared as trailers by setting the 71 // "Trailer" header before the call to WriteHeader (see example). 72 // To suppress implicit response headers, set their value to nil. 73 Header() Header 74 75 // Write writes the data to the connection as part of an HTTP reply. 76 // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) 77 // before writing the data. If the Header does not contain a 78 // Content-Type line, Write adds a Content-Type set to the result of passing 79 // the initial 512 bytes of written data to DetectContentType. 80 Write([]byte) (int, error) 81 82 // WriteHeader sends an HTTP response header with status code. 83 // If WriteHeader is not called explicitly, the first call to Write 84 // will trigger an implicit WriteHeader(http.StatusOK). 85 // Thus explicit calls to WriteHeader are mainly used to 86 // send error codes. 87 WriteHeader(int) 88 } 89 90 // The Flusher interface is implemented by ResponseWriters that allow 91 // an HTTP handler to flush buffered data to the client. 92 // 93 // Note that even for ResponseWriters that support Flush, 94 // if the client is connected through an HTTP proxy, 95 // the buffered data may not reach the client until the response 96 // completes. 97 type Flusher interface { 98 // Flush sends any buffered data to the client. 99 Flush() 100 } 101 102 // The Hijacker interface is implemented by ResponseWriters that allow 103 // an HTTP handler to take over the connection. 104 type Hijacker interface { 105 // Hijack lets the caller take over the connection. 106 // After a call to Hijack(), the HTTP server library 107 // will not do anything else with the connection. 108 // 109 // It becomes the caller's responsibility to manage 110 // and close the connection. 111 // 112 // The returned net.Conn may have read or write deadlines 113 // already set, depending on the configuration of the 114 // Server. It is the caller's responsibility to set 115 // or clear those deadlines as needed. 116 Hijack() (net.Conn, *bufio.ReadWriter, error) 117 } 118 119 // The CloseNotifier interface is implemented by ResponseWriters which 120 // allow detecting when the underlying connection has gone away. 121 // 122 // This mechanism can be used to cancel long operations on the server 123 // if the client has disconnected before the response is ready. 124 type CloseNotifier interface { 125 // CloseNotify returns a channel that receives at most a 126 // single value (true) when the client connection has gone 127 // away. 128 // 129 // CloseNotify may wait to notify until Request.Body has been 130 // fully read. 131 // 132 // After the Handler has returned, there is no guarantee 133 // that the channel receives a value. 134 // 135 // If the protocol is HTTP/1.1 and CloseNotify is called while 136 // processing an idempotent request (such a GET) while 137 // HTTP/1.1 pipelining is in use, the arrival of a subsequent 138 // pipelined request may cause a value to be sent on the 139 // returned channel. In practice HTTP/1.1 pipelining is not 140 // enabled in browsers and not seen often in the wild. If this 141 // is a problem, use HTTP/2 or only use CloseNotify on methods 142 // such as POST. 143 CloseNotify() <-chan bool 144 } 145 146 // A conn represents the server side of an HTTP connection. 147 type conn struct { 148 // server is the server on which the connection arrived. 149 // Immutable; never nil. 150 server *Server 151 152 // rwc is the underlying network connection. 153 // This is never wrapped by other types and is the value given out 154 // to CloseNotifier callers. It is usually of type *net.TCPConn or 155 // *tls.Conn. 156 rwc net.Conn 157 158 // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously 159 // inside the Listener's Accept goroutine, as some implementations block. 160 // It is populated immediately inside the (*conn).serve goroutine. 161 // This is the value of a Handler's (*Request).RemoteAddr. 162 remoteAddr string 163 164 // tlsState is the TLS connection state when using TLS. 165 // nil means not TLS. 166 tlsState *tls.ConnectionState 167 168 // werr is set to the first write error to rwc. 169 // It is set via checkConnErrorWriter{w}, where bufw writes. 170 werr error 171 172 // r is bufr's read source. It's a wrapper around rwc that provides 173 // io.LimitedReader-style limiting (while reading request headers) 174 // and functionality to support CloseNotifier. See *connReader docs. 175 r *connReader 176 177 // bufr reads from r. 178 // Users of bufr must hold mu. 179 bufr *bufio.Reader 180 181 // bufw writes to checkConnErrorWriter{c}, which populates werr on error. 182 bufw *bufio.Writer 183 184 // lastMethod is the method of the most recent request 185 // on this connection, if any. 186 lastMethod string 187 188 // mu guards hijackedv, use of bufr, (*response).closeNotifyCh. 189 mu sync.Mutex 190 191 // hijackedv is whether this connection has been hijacked 192 // by a Handler with the Hijacker interface. 193 // It is guarded by mu. 194 hijackedv bool 195 } 196 197 func (c *conn) hijacked() bool { 198 c.mu.Lock() 199 defer c.mu.Unlock() 200 return c.hijackedv 201 } 202 203 // c.mu must be held. 204 func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 205 if c.hijackedv { 206 return nil, nil, ErrHijacked 207 } 208 c.hijackedv = true 209 rwc = c.rwc 210 buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) 211 c.setState(rwc, StateHijacked) 212 return 213 } 214 215 // This should be >= 512 bytes for DetectContentType, 216 // but otherwise it's somewhat arbitrary. 217 const bufferBeforeChunkingSize = 2048 218 219 // chunkWriter writes to a response's conn buffer, and is the writer 220 // wrapped by the response.bufw buffered writer. 221 // 222 // chunkWriter also is responsible for finalizing the Header, including 223 // conditionally setting the Content-Type and setting a Content-Length 224 // in cases where the handler's final output is smaller than the buffer 225 // size. It also conditionally adds chunk headers, when in chunking mode. 226 // 227 // See the comment above (*response).Write for the entire write flow. 228 type chunkWriter struct { 229 res *response 230 231 // header is either nil or a deep clone of res.handlerHeader 232 // at the time of res.WriteHeader, if res.WriteHeader is 233 // called and extra buffering is being done to calculate 234 // Content-Type and/or Content-Length. 235 header Header 236 237 // wroteHeader tells whether the header's been written to "the 238 // wire" (or rather: w.conn.buf). this is unlike 239 // (*response).wroteHeader, which tells only whether it was 240 // logically written. 241 wroteHeader bool 242 243 // set by the writeHeader method: 244 chunking bool // using chunked transfer encoding for reply body 245 } 246 247 var ( 248 crlf = []byte("\r\n") 249 colonSpace = []byte(": ") 250 ) 251 252 func (cw *chunkWriter) Write(p []byte) (n int, err error) { 253 if !cw.wroteHeader { 254 cw.writeHeader(p) 255 } 256 if cw.res.req.Method == "HEAD" { 257 // Eat writes. 258 return len(p), nil 259 } 260 if cw.chunking { 261 _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) 262 if err != nil { 263 cw.res.conn.rwc.Close() 264 return 265 } 266 } 267 n, err = cw.res.conn.bufw.Write(p) 268 if cw.chunking && err == nil { 269 _, err = cw.res.conn.bufw.Write(crlf) 270 } 271 if err != nil { 272 cw.res.conn.rwc.Close() 273 } 274 return 275 } 276 277 func (cw *chunkWriter) flush() { 278 if !cw.wroteHeader { 279 cw.writeHeader(nil) 280 } 281 cw.res.conn.bufw.Flush() 282 } 283 284 func (cw *chunkWriter) close() { 285 if !cw.wroteHeader { 286 cw.writeHeader(nil) 287 } 288 if cw.chunking { 289 bw := cw.res.conn.bufw // conn's bufio writer 290 // zero chunk to mark EOF 291 bw.WriteString("0\r\n") 292 if len(cw.res.trailers) > 0 { 293 trailers := make(Header) 294 for _, h := range cw.res.trailers { 295 if vv := cw.res.handlerHeader[h]; len(vv) > 0 { 296 trailers[h] = vv 297 } 298 } 299 trailers.Write(bw) // the writer handles noting errors 300 } 301 // final blank line after the trailers (whether 302 // present or not) 303 bw.WriteString("\r\n") 304 } 305 } 306 307 // A response represents the server side of an HTTP response. 308 type response struct { 309 conn *conn 310 req *Request // request for this response 311 reqBody io.ReadCloser 312 wroteHeader bool // reply header has been (logically) written 313 wroteContinue bool // 100 Continue response was written 314 315 w *bufio.Writer // buffers output in chunks to chunkWriter 316 cw chunkWriter 317 318 // handlerHeader is the Header that Handlers get access to, 319 // which may be retained and mutated even after WriteHeader. 320 // handlerHeader is copied into cw.header at WriteHeader 321 // time, and privately mutated thereafter. 322 handlerHeader Header 323 calledHeader bool // handler accessed handlerHeader via Header 324 325 written int64 // number of bytes written in body 326 contentLength int64 // explicitly-declared Content-Length; or -1 327 status int // status code passed to WriteHeader 328 329 // close connection after this reply. set on request and 330 // updated after response from handler if there's a 331 // "Connection: keep-alive" response header and a 332 // Content-Length. 333 closeAfterReply bool 334 335 // requestBodyLimitHit is set by requestTooLarge when 336 // maxBytesReader hits its max size. It is checked in 337 // WriteHeader, to make sure we don't consume the 338 // remaining request body to try to advance to the next HTTP 339 // request. Instead, when this is set, we stop reading 340 // subsequent requests on this connection and stop reading 341 // input from it. 342 requestBodyLimitHit bool 343 344 // trailers are the headers to be sent after the handler 345 // finishes writing the body. This field is initialized from 346 // the Trailer response header when the response header is 347 // written. 348 trailers []string 349 350 handlerDone atomicBool // set true when the handler exits 351 352 // Buffers for Date and Content-Length 353 dateBuf [len(TimeFormat)]byte 354 clenBuf [10]byte 355 356 // closeNotifyCh is non-nil once CloseNotify is called. 357 // Guarded by conn.mu 358 closeNotifyCh <-chan bool 359 } 360 361 type atomicBool int32 362 363 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 364 func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 365 366 // declareTrailer is called for each Trailer header when the 367 // response header is written. It notes that a header will need to be 368 // written in the trailers at the end of the response. 369 func (w *response) declareTrailer(k string) { 370 k = CanonicalHeaderKey(k) 371 switch k { 372 case "Transfer-Encoding", "Content-Length", "Trailer": 373 // Forbidden by RFC 2616 14.40. 374 return 375 } 376 w.trailers = append(w.trailers, k) 377 } 378 379 // requestTooLarge is called by maxBytesReader when too much input has 380 // been read from the client. 381 func (w *response) requestTooLarge() { 382 w.closeAfterReply = true 383 w.requestBodyLimitHit = true 384 if !w.wroteHeader { 385 w.Header().Set("Connection", "close") 386 } 387 } 388 389 // needsSniff reports whether a Content-Type still needs to be sniffed. 390 func (w *response) needsSniff() bool { 391 _, haveType := w.handlerHeader["Content-Type"] 392 return !w.cw.wroteHeader && !haveType && w.written < sniffLen 393 } 394 395 // writerOnly hides an io.Writer value's optional ReadFrom method 396 // from io.Copy. 397 type writerOnly struct { 398 io.Writer 399 } 400 401 func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { 402 switch v := src.(type) { 403 case *os.File: 404 fi, err := v.Stat() 405 if err != nil { 406 return false, err 407 } 408 return fi.Mode().IsRegular(), nil 409 case *io.LimitedReader: 410 return srcIsRegularFile(v.R) 411 default: 412 return 413 } 414 } 415 416 // ReadFrom is here to optimize copying from an *os.File regular file 417 // to a *net.TCPConn with sendfile. 418 func (w *response) ReadFrom(src io.Reader) (n int64, err error) { 419 // Our underlying w.conn.rwc is usually a *TCPConn (with its 420 // own ReadFrom method). If not, or if our src isn't a regular 421 // file, just fall back to the normal copy method. 422 rf, ok := w.conn.rwc.(io.ReaderFrom) 423 regFile, err := srcIsRegularFile(src) 424 if err != nil { 425 return 0, err 426 } 427 if !ok || !regFile { 428 bufp := copyBufPool.Get().(*[]byte) 429 defer copyBufPool.Put(bufp) 430 return io.CopyBuffer(writerOnly{w}, src, *bufp) 431 } 432 433 // sendfile path: 434 435 if !w.wroteHeader { 436 w.WriteHeader(StatusOK) 437 } 438 439 if w.needsSniff() { 440 n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) 441 n += n0 442 if err != nil { 443 return n, err 444 } 445 } 446 447 w.w.Flush() // get rid of any previous writes 448 w.cw.flush() // make sure Header is written; flush data to rwc 449 450 // Now that cw has been flushed, its chunking field is guaranteed initialized. 451 if !w.cw.chunking && w.bodyAllowed() { 452 n0, err := rf.ReadFrom(src) 453 n += n0 454 w.written += n0 455 return n, err 456 } 457 458 n0, err := io.Copy(writerOnly{w}, src) 459 n += n0 460 return n, err 461 } 462 463 // debugServerConnections controls whether all server connections are wrapped 464 // with a verbose logging wrapper. 465 const debugServerConnections = false 466 467 // Create new connection from rwc. 468 func (srv *Server) newConn(rwc net.Conn) *conn { 469 c := &conn{ 470 server: srv, 471 rwc: rwc, 472 } 473 if debugServerConnections { 474 c.rwc = newLoggingConn("server", c.rwc) 475 } 476 return c 477 } 478 479 type readResult struct { 480 n int 481 err error 482 b byte // byte read, if n == 1 483 } 484 485 // connReader is the io.Reader wrapper used by *conn. It combines a 486 // selectively-activated io.LimitedReader (to bound request header 487 // read sizes) with support for selectively keeping an io.Reader.Read 488 // call blocked in a background goroutine to wait for activity and 489 // trigger a CloseNotifier channel. 490 type connReader struct { 491 r io.Reader 492 remain int64 // bytes remaining 493 494 // ch is non-nil if a background read is in progress. 495 // It is guarded by conn.mu. 496 ch chan readResult 497 } 498 499 func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } 500 func (cr *connReader) setInfiniteReadLimit() { cr.remain = 1<<63 - 1 } 501 func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } 502 503 func (cr *connReader) Read(p []byte) (n int, err error) { 504 if cr.hitReadLimit() { 505 return 0, io.EOF 506 } 507 if len(p) == 0 { 508 return 509 } 510 if int64(len(p)) > cr.remain { 511 p = p[:cr.remain] 512 } 513 514 // Is a background read (started by CloseNotifier) already in 515 // flight? If so, wait for it and use its result. 516 ch := cr.ch 517 if ch != nil { 518 cr.ch = nil 519 res := <-ch 520 if res.n == 1 { 521 p[0] = res.b 522 cr.remain -= 1 523 } 524 return res.n, res.err 525 } 526 n, err = cr.r.Read(p) 527 cr.remain -= int64(n) 528 return 529 } 530 531 func (cr *connReader) startBackgroundRead(onReadComplete func()) { 532 if cr.ch != nil { 533 // Background read already started. 534 return 535 } 536 cr.ch = make(chan readResult, 1) 537 go cr.closeNotifyAwaitActivityRead(cr.ch, onReadComplete) 538 } 539 540 func (cr *connReader) closeNotifyAwaitActivityRead(ch chan<- readResult, onReadComplete func()) { 541 var buf [1]byte 542 n, err := cr.r.Read(buf[:1]) 543 onReadComplete() 544 ch <- readResult{n, err, buf[0]} 545 } 546 547 var ( 548 bufioReaderPool sync.Pool 549 bufioWriter2kPool sync.Pool 550 bufioWriter4kPool sync.Pool 551 ) 552 553 var copyBufPool = sync.Pool{ 554 New: func() interface{} { 555 b := make([]byte, 32*1024) 556 return &b 557 }, 558 } 559 560 func bufioWriterPool(size int) *sync.Pool { 561 switch size { 562 case 2 << 10: 563 return &bufioWriter2kPool 564 case 4 << 10: 565 return &bufioWriter4kPool 566 } 567 return nil 568 } 569 570 func newBufioReader(r io.Reader) *bufio.Reader { 571 if v := bufioReaderPool.Get(); v != nil { 572 br := v.(*bufio.Reader) 573 br.Reset(r) 574 return br 575 } 576 // Note: if this reader size is every changed, update 577 // TestHandlerBodyClose's assumptions. 578 return bufio.NewReader(r) 579 } 580 581 func putBufioReader(br *bufio.Reader) { 582 br.Reset(nil) 583 bufioReaderPool.Put(br) 584 } 585 586 func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { 587 pool := bufioWriterPool(size) 588 if pool != nil { 589 if v := pool.Get(); v != nil { 590 bw := v.(*bufio.Writer) 591 bw.Reset(w) 592 return bw 593 } 594 } 595 return bufio.NewWriterSize(w, size) 596 } 597 598 func putBufioWriter(bw *bufio.Writer) { 599 bw.Reset(nil) 600 if pool := bufioWriterPool(bw.Available()); pool != nil { 601 pool.Put(bw) 602 } 603 } 604 605 // DefaultMaxHeaderBytes is the maximum permitted size of the headers 606 // in an HTTP request. 607 // This can be overridden by setting Server.MaxHeaderBytes. 608 const DefaultMaxHeaderBytes = 1 << 20 // 1 MB 609 610 func (srv *Server) maxHeaderBytes() int { 611 if srv.MaxHeaderBytes > 0 { 612 return srv.MaxHeaderBytes 613 } 614 return DefaultMaxHeaderBytes 615 } 616 617 func (srv *Server) initialReadLimitSize() int64 { 618 return int64(srv.maxHeaderBytes()) + 4096 // bufio slop 619 } 620 621 // wrapper around io.ReaderCloser which on first read, sends an 622 // HTTP/1.1 100 Continue header 623 type expectContinueReader struct { 624 resp *response 625 readCloser io.ReadCloser 626 closed bool 627 sawEOF bool 628 } 629 630 func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { 631 if ecr.closed { 632 return 0, ErrBodyReadAfterClose 633 } 634 if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { 635 ecr.resp.wroteContinue = true 636 ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") 637 ecr.resp.conn.bufw.Flush() 638 } 639 n, err = ecr.readCloser.Read(p) 640 if err == io.EOF { 641 ecr.sawEOF = true 642 } 643 return 644 } 645 646 func (ecr *expectContinueReader) Close() error { 647 ecr.closed = true 648 return ecr.readCloser.Close() 649 } 650 651 // TimeFormat is the time format to use when generating times in HTTP 652 // headers. It is like time.RFC1123 but hard-codes GMT as the time 653 // zone. The time being formatted must be in UTC for Format to 654 // generate the correct format. 655 // 656 // For parsing this time format, see ParseTime. 657 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" 658 659 // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) 660 func appendTime(b []byte, t time.Time) []byte { 661 const days = "SunMonTueWedThuFriSat" 662 const months = "JanFebMarAprMayJunJulAugSepOctNovDec" 663 664 t = t.UTC() 665 yy, mm, dd := t.Date() 666 hh, mn, ss := t.Clock() 667 day := days[3*t.Weekday():] 668 mon := months[3*(mm-1):] 669 670 return append(b, 671 day[0], day[1], day[2], ',', ' ', 672 byte('0'+dd/10), byte('0'+dd%10), ' ', 673 mon[0], mon[1], mon[2], ' ', 674 byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', 675 byte('0'+hh/10), byte('0'+hh%10), ':', 676 byte('0'+mn/10), byte('0'+mn%10), ':', 677 byte('0'+ss/10), byte('0'+ss%10), ' ', 678 'G', 'M', 'T') 679 } 680 681 var errTooLarge = errors.New("http: request too large") 682 683 // Read next request from connection. 684 func (c *conn) readRequest() (w *response, err error) { 685 if c.hijacked() { 686 return nil, ErrHijacked 687 } 688 689 if d := c.server.ReadTimeout; d != 0 { 690 c.rwc.SetReadDeadline(time.Now().Add(d)) 691 } 692 if d := c.server.WriteTimeout; d != 0 { 693 defer func() { 694 c.rwc.SetWriteDeadline(time.Now().Add(d)) 695 }() 696 } 697 698 c.r.setReadLimit(c.server.initialReadLimitSize()) 699 c.mu.Lock() // while using bufr 700 if c.lastMethod == "POST" { 701 // RFC 2616 section 4.1 tolerance for old buggy clients. 702 peek, _ := c.bufr.Peek(4) // ReadRequest will get err below 703 c.bufr.Discard(numLeadingCRorLF(peek)) 704 } 705 req, err := readRequest(c.bufr, keepHostHeader) 706 c.mu.Unlock() 707 if err != nil { 708 if c.r.hitReadLimit() { 709 return nil, errTooLarge 710 } 711 return nil, err 712 } 713 c.lastMethod = req.Method 714 c.r.setInfiniteReadLimit() 715 716 hosts, haveHost := req.Header["Host"] 717 if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) { 718 return nil, badRequestError("missing required Host header") 719 } 720 if len(hosts) > 1 { 721 return nil, badRequestError("too many Host headers") 722 } 723 if len(hosts) == 1 && !validHostHeader(hosts[0]) { 724 return nil, badRequestError("malformed Host header") 725 } 726 for k, vv := range req.Header { 727 if !validHeaderName(k) { 728 return nil, badRequestError("invalid header name") 729 } 730 for _, v := range vv { 731 if !validHeaderValue(v) { 732 return nil, badRequestError("invalid header value") 733 } 734 } 735 } 736 delete(req.Header, "Host") 737 738 req.RemoteAddr = c.remoteAddr 739 req.TLS = c.tlsState 740 if body, ok := req.Body.(*body); ok { 741 body.doEarlyClose = true 742 } 743 744 w = &response{ 745 conn: c, 746 req: req, 747 reqBody: req.Body, 748 handlerHeader: make(Header), 749 contentLength: -1, 750 } 751 w.cw.res = w 752 w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) 753 return w, nil 754 } 755 756 func (w *response) Header() Header { 757 if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { 758 // Accessing the header between logically writing it 759 // and physically writing it means we need to allocate 760 // a clone to snapshot the logically written state. 761 w.cw.header = w.handlerHeader.clone() 762 } 763 w.calledHeader = true 764 return w.handlerHeader 765 } 766 767 // maxPostHandlerReadBytes is the max number of Request.Body bytes not 768 // consumed by a handler that the server will read from the client 769 // in order to keep a connection alive. If there are more bytes than 770 // this then the server to be paranoid instead sends a "Connection: 771 // close" response. 772 // 773 // This number is approximately what a typical machine's TCP buffer 774 // size is anyway. (if we have the bytes on the machine, we might as 775 // well read them) 776 const maxPostHandlerReadBytes = 256 << 10 777 778 func (w *response) WriteHeader(code int) { 779 if w.conn.hijacked() { 780 w.conn.server.logf("http: response.WriteHeader on hijacked connection") 781 return 782 } 783 if w.wroteHeader { 784 w.conn.server.logf("http: multiple response.WriteHeader calls") 785 return 786 } 787 w.wroteHeader = true 788 w.status = code 789 790 if w.calledHeader && w.cw.header == nil { 791 w.cw.header = w.handlerHeader.clone() 792 } 793 794 if cl := w.handlerHeader.get("Content-Length"); cl != "" { 795 v, err := strconv.ParseInt(cl, 10, 64) 796 if err == nil && v >= 0 { 797 w.contentLength = v 798 } else { 799 w.conn.server.logf("http: invalid Content-Length of %q", cl) 800 w.handlerHeader.Del("Content-Length") 801 } 802 } 803 } 804 805 // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. 806 // This type is used to avoid extra allocations from cloning and/or populating 807 // the response Header map and all its 1-element slices. 808 type extraHeader struct { 809 contentType string 810 connection string 811 transferEncoding string 812 date []byte // written if not nil 813 contentLength []byte // written if not nil 814 } 815 816 // Sorted the same as extraHeader.Write's loop. 817 var extraHeaderKeys = [][]byte{ 818 []byte("Content-Type"), 819 []byte("Connection"), 820 []byte("Transfer-Encoding"), 821 } 822 823 var ( 824 headerContentLength = []byte("Content-Length: ") 825 headerDate = []byte("Date: ") 826 ) 827 828 // Write writes the headers described in h to w. 829 // 830 // This method has a value receiver, despite the somewhat large size 831 // of h, because it prevents an allocation. The escape analysis isn't 832 // smart enough to realize this function doesn't mutate h. 833 func (h extraHeader) Write(w *bufio.Writer) { 834 if h.date != nil { 835 w.Write(headerDate) 836 w.Write(h.date) 837 w.Write(crlf) 838 } 839 if h.contentLength != nil { 840 w.Write(headerContentLength) 841 w.Write(h.contentLength) 842 w.Write(crlf) 843 } 844 for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { 845 if v != "" { 846 w.Write(extraHeaderKeys[i]) 847 w.Write(colonSpace) 848 w.WriteString(v) 849 w.Write(crlf) 850 } 851 } 852 } 853 854 // writeHeader finalizes the header sent to the client and writes it 855 // to cw.res.conn.bufw. 856 // 857 // p is not written by writeHeader, but is the first chunk of the body 858 // that will be written. It is sniffed for a Content-Type if none is 859 // set explicitly. It's also used to set the Content-Length, if the 860 // total body size was small and the handler has already finished 861 // running. 862 func (cw *chunkWriter) writeHeader(p []byte) { 863 if cw.wroteHeader { 864 return 865 } 866 cw.wroteHeader = true 867 868 w := cw.res 869 keepAlivesEnabled := w.conn.server.doKeepAlives() 870 isHEAD := w.req.Method == "HEAD" 871 872 // header is written out to w.conn.buf below. Depending on the 873 // state of the handler, we either own the map or not. If we 874 // don't own it, the exclude map is created lazily for 875 // WriteSubset to remove headers. The setHeader struct holds 876 // headers we need to add. 877 header := cw.header 878 owned := header != nil 879 if !owned { 880 header = w.handlerHeader 881 } 882 var excludeHeader map[string]bool 883 delHeader := func(key string) { 884 if owned { 885 header.Del(key) 886 return 887 } 888 if _, ok := header[key]; !ok { 889 return 890 } 891 if excludeHeader == nil { 892 excludeHeader = make(map[string]bool) 893 } 894 excludeHeader[key] = true 895 } 896 var setHeader extraHeader 897 898 trailers := false 899 for _, v := range cw.header["Trailer"] { 900 trailers = true 901 foreachHeaderElement(v, cw.res.declareTrailer) 902 } 903 904 te := header.get("Transfer-Encoding") 905 hasTE := te != "" 906 907 // If the handler is done but never sent a Content-Length 908 // response header and this is our first (and last) write, set 909 // it, even to zero. This helps HTTP/1.0 clients keep their 910 // "keep-alive" connections alive. 911 // Exceptions: 304/204/1xx responses never get Content-Length, and if 912 // it was a HEAD request, we don't know the difference between 913 // 0 actual bytes and 0 bytes because the handler noticed it 914 // was a HEAD request and chose not to write anything. So for 915 // HEAD, the handler should either write the Content-Length or 916 // write non-zero bytes. If it's actually 0 bytes and the 917 // handler never looked at the Request.Method, we just don't 918 // send a Content-Length header. 919 // Further, we don't send an automatic Content-Length if they 920 // set a Transfer-Encoding, because they're generally incompatible. 921 if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { 922 w.contentLength = int64(len(p)) 923 setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) 924 } 925 926 // If this was an HTTP/1.0 request with keep-alive and we sent a 927 // Content-Length back, we can make this a keep-alive response ... 928 if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { 929 sentLength := header.get("Content-Length") != "" 930 if sentLength && header.get("Connection") == "keep-alive" { 931 w.closeAfterReply = false 932 } 933 } 934 935 // Check for a explicit (and valid) Content-Length header. 936 hasCL := w.contentLength != -1 937 938 if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { 939 _, connectionHeaderSet := header["Connection"] 940 if !connectionHeaderSet { 941 setHeader.connection = "keep-alive" 942 } 943 } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { 944 w.closeAfterReply = true 945 } 946 947 if header.get("Connection") == "close" || !keepAlivesEnabled { 948 w.closeAfterReply = true 949 } 950 951 // If the client wanted a 100-continue but we never sent it to 952 // them (or, more strictly: we never finished reading their 953 // request body), don't reuse this connection because it's now 954 // in an unknown state: we might be sending this response at 955 // the same time the client is now sending its request body 956 // after a timeout. (Some HTTP clients send Expect: 957 // 100-continue but knowing that some servers don't support 958 // it, the clients set a timer and send the body later anyway) 959 // If we haven't seen EOF, we can't skip over the unread body 960 // because we don't know if the next bytes on the wire will be 961 // the body-following-the-timer or the subsequent request. 962 // See Issue 11549. 963 if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { 964 w.closeAfterReply = true 965 } 966 967 // Per RFC 2616, we should consume the request body before 968 // replying, if the handler hasn't already done so. But we 969 // don't want to do an unbounded amount of reading here for 970 // DoS reasons, so we only try up to a threshold. 971 if w.req.ContentLength != 0 && !w.closeAfterReply { 972 var discard, tooBig bool 973 974 switch bdy := w.req.Body.(type) { 975 case *expectContinueReader: 976 if bdy.resp.wroteContinue { 977 discard = true 978 } 979 case *body: 980 bdy.mu.Lock() 981 switch { 982 case bdy.closed: 983 if !bdy.sawEOF { 984 // Body was closed in handler with non-EOF error. 985 w.closeAfterReply = true 986 } 987 case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: 988 tooBig = true 989 default: 990 discard = true 991 } 992 bdy.mu.Unlock() 993 default: 994 discard = true 995 } 996 997 if discard { 998 _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) 999 switch err { 1000 case nil: 1001 // There must be even more data left over. 1002 tooBig = true 1003 case ErrBodyReadAfterClose: 1004 // Body was already consumed and closed. 1005 case io.EOF: 1006 // The remaining body was just consumed, close it. 1007 err = w.reqBody.Close() 1008 if err != nil { 1009 w.closeAfterReply = true 1010 } 1011 default: 1012 // Some other kind of error occured, like a read timeout, or 1013 // corrupt chunked encoding. In any case, whatever remains 1014 // on the wire must not be parsed as another HTTP request. 1015 w.closeAfterReply = true 1016 } 1017 } 1018 1019 if tooBig { 1020 w.requestTooLarge() 1021 delHeader("Connection") 1022 setHeader.connection = "close" 1023 } 1024 } 1025 1026 code := w.status 1027 if bodyAllowedForStatus(code) { 1028 // If no content type, apply sniffing algorithm to body. 1029 _, haveType := header["Content-Type"] 1030 if !haveType && !hasTE { 1031 setHeader.contentType = DetectContentType(p) 1032 } 1033 } else { 1034 for _, k := range suppressedHeaders(code) { 1035 delHeader(k) 1036 } 1037 } 1038 1039 if _, ok := header["Date"]; !ok { 1040 setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) 1041 } 1042 1043 if hasCL && hasTE && te != "identity" { 1044 // TODO: return an error if WriteHeader gets a return parameter 1045 // For now just ignore the Content-Length. 1046 w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", 1047 te, w.contentLength) 1048 delHeader("Content-Length") 1049 hasCL = false 1050 } 1051 1052 if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { 1053 // do nothing 1054 } else if code == StatusNoContent { 1055 delHeader("Transfer-Encoding") 1056 } else if hasCL { 1057 delHeader("Transfer-Encoding") 1058 } else if w.req.ProtoAtLeast(1, 1) { 1059 // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no 1060 // content-length has been provided. The connection must be closed after the 1061 // reply is written, and no chunking is to be done. This is the setup 1062 // recommended in the Server-Sent Events candidate recommendation 11, 1063 // section 8. 1064 if hasTE && te == "identity" { 1065 cw.chunking = false 1066 w.closeAfterReply = true 1067 } else { 1068 // HTTP/1.1 or greater: use chunked transfer encoding 1069 // to avoid closing the connection at EOF. 1070 cw.chunking = true 1071 setHeader.transferEncoding = "chunked" 1072 } 1073 } else { 1074 // HTTP version < 1.1: cannot do chunked transfer 1075 // encoding and we don't know the Content-Length so 1076 // signal EOF by closing connection. 1077 w.closeAfterReply = true 1078 delHeader("Transfer-Encoding") // in case already set 1079 } 1080 1081 // Cannot use Content-Length with non-identity Transfer-Encoding. 1082 if cw.chunking { 1083 delHeader("Content-Length") 1084 } 1085 if !w.req.ProtoAtLeast(1, 0) { 1086 return 1087 } 1088 1089 if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { 1090 delHeader("Connection") 1091 if w.req.ProtoAtLeast(1, 1) { 1092 setHeader.connection = "close" 1093 } 1094 } 1095 1096 w.conn.bufw.WriteString(statusLine(w.req, code)) 1097 cw.header.WriteSubset(w.conn.bufw, excludeHeader) 1098 setHeader.Write(w.conn.bufw) 1099 w.conn.bufw.Write(crlf) 1100 } 1101 1102 // foreachHeaderElement splits v according to the "#rule" construction 1103 // in RFC 2616 section 2.1 and calls fn for each non-empty element. 1104 func foreachHeaderElement(v string, fn func(string)) { 1105 v = textproto.TrimString(v) 1106 if v == "" { 1107 return 1108 } 1109 if !strings.Contains(v, ",") { 1110 fn(v) 1111 return 1112 } 1113 for _, f := range strings.Split(v, ",") { 1114 if f = textproto.TrimString(f); f != "" { 1115 fn(f) 1116 } 1117 } 1118 } 1119 1120 // statusLines is a cache of Status-Line strings, keyed by code (for 1121 // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a 1122 // map keyed by struct of two fields. This map's max size is bounded 1123 // by 2*len(statusText), two protocol types for each known official 1124 // status code in the statusText map. 1125 var ( 1126 statusMu sync.RWMutex 1127 statusLines = make(map[int]string) 1128 ) 1129 1130 // statusLine returns a response Status-Line (RFC 2616 Section 6.1) 1131 // for the given request and response status code. 1132 func statusLine(req *Request, code int) string { 1133 // Fast path: 1134 key := code 1135 proto11 := req.ProtoAtLeast(1, 1) 1136 if !proto11 { 1137 key = -key 1138 } 1139 statusMu.RLock() 1140 line, ok := statusLines[key] 1141 statusMu.RUnlock() 1142 if ok { 1143 return line 1144 } 1145 1146 // Slow path: 1147 proto := "HTTP/1.0" 1148 if proto11 { 1149 proto = "HTTP/1.1" 1150 } 1151 codestring := strconv.Itoa(code) 1152 text, ok := statusText[code] 1153 if !ok { 1154 text = "status code " + codestring 1155 } 1156 line = proto + " " + codestring + " " + text + "\r\n" 1157 if ok { 1158 statusMu.Lock() 1159 defer statusMu.Unlock() 1160 statusLines[key] = line 1161 } 1162 return line 1163 } 1164 1165 // bodyAllowed reports whether a Write is allowed for this response type. 1166 // It's illegal to call this before the header has been flushed. 1167 func (w *response) bodyAllowed() bool { 1168 if !w.wroteHeader { 1169 panic("") 1170 } 1171 return bodyAllowedForStatus(w.status) 1172 } 1173 1174 // The Life Of A Write is like this: 1175 // 1176 // Handler starts. No header has been sent. The handler can either 1177 // write a header, or just start writing. Writing before sending a header 1178 // sends an implicitly empty 200 OK header. 1179 // 1180 // If the handler didn't declare a Content-Length up front, we either 1181 // go into chunking mode or, if the handler finishes running before 1182 // the chunking buffer size, we compute a Content-Length and send that 1183 // in the header instead. 1184 // 1185 // Likewise, if the handler didn't set a Content-Type, we sniff that 1186 // from the initial chunk of output. 1187 // 1188 // The Writers are wired together like: 1189 // 1190 // 1. *response (the ResponseWriter) -> 1191 // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes 1192 // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) 1193 // and which writes the chunk headers, if needed. 1194 // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> 1195 // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write 1196 // and populates c.werr with it if so. but otherwise writes to: 1197 // 6. the rwc, the net.Conn. 1198 // 1199 // TODO(bradfitz): short-circuit some of the buffering when the 1200 // initial header contains both a Content-Type and Content-Length. 1201 // Also short-circuit in (1) when the header's been sent and not in 1202 // chunking mode, writing directly to (4) instead, if (2) has no 1203 // buffered data. More generally, we could short-circuit from (1) to 1204 // (3) even in chunking mode if the write size from (1) is over some 1205 // threshold and nothing is in (2). The answer might be mostly making 1206 // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal 1207 // with this instead. 1208 func (w *response) Write(data []byte) (n int, err error) { 1209 return w.write(len(data), data, "") 1210 } 1211 1212 func (w *response) WriteString(data string) (n int, err error) { 1213 return w.write(len(data), nil, data) 1214 } 1215 1216 // either dataB or dataS is non-zero. 1217 func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { 1218 if w.conn.hijacked() { 1219 w.conn.server.logf("http: response.Write on hijacked connection") 1220 return 0, ErrHijacked 1221 } 1222 if !w.wroteHeader { 1223 w.WriteHeader(StatusOK) 1224 } 1225 if lenData == 0 { 1226 return 0, nil 1227 } 1228 if !w.bodyAllowed() { 1229 return 0, ErrBodyNotAllowed 1230 } 1231 1232 w.written += int64(lenData) // ignoring errors, for errorKludge 1233 if w.contentLength != -1 && w.written > w.contentLength { 1234 return 0, ErrContentLength 1235 } 1236 if dataB != nil { 1237 return w.w.Write(dataB) 1238 } else { 1239 return w.w.WriteString(dataS) 1240 } 1241 } 1242 1243 func (w *response) finishRequest() { 1244 w.handlerDone.setTrue() 1245 1246 if !w.wroteHeader { 1247 w.WriteHeader(StatusOK) 1248 } 1249 1250 w.w.Flush() 1251 putBufioWriter(w.w) 1252 w.cw.close() 1253 w.conn.bufw.Flush() 1254 1255 // Close the body (regardless of w.closeAfterReply) so we can 1256 // re-use its bufio.Reader later safely. 1257 w.reqBody.Close() 1258 1259 if w.req.MultipartForm != nil { 1260 w.req.MultipartForm.RemoveAll() 1261 } 1262 } 1263 1264 // shouldReuseConnection reports whether the underlying TCP connection can be reused. 1265 // It must only be called after the handler is done executing. 1266 func (w *response) shouldReuseConnection() bool { 1267 if w.closeAfterReply { 1268 // The request or something set while executing the 1269 // handler indicated we shouldn't reuse this 1270 // connection. 1271 return false 1272 } 1273 1274 if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { 1275 // Did not write enough. Avoid getting out of sync. 1276 return false 1277 } 1278 1279 // There was some error writing to the underlying connection 1280 // during the request, so don't re-use this conn. 1281 if w.conn.werr != nil { 1282 return false 1283 } 1284 1285 if w.closedRequestBodyEarly() { 1286 return false 1287 } 1288 1289 return true 1290 } 1291 1292 func (w *response) closedRequestBodyEarly() bool { 1293 body, ok := w.req.Body.(*body) 1294 return ok && body.didEarlyClose() 1295 } 1296 1297 func (w *response) Flush() { 1298 if !w.wroteHeader { 1299 w.WriteHeader(StatusOK) 1300 } 1301 w.w.Flush() 1302 w.cw.flush() 1303 } 1304 1305 func (c *conn) finalFlush() { 1306 if c.bufr != nil { 1307 // Steal the bufio.Reader (~4KB worth of memory) and its associated 1308 // reader for a future connection. 1309 putBufioReader(c.bufr) 1310 c.bufr = nil 1311 } 1312 1313 if c.bufw != nil { 1314 c.bufw.Flush() 1315 // Steal the bufio.Writer (~4KB worth of memory) and its associated 1316 // writer for a future connection. 1317 putBufioWriter(c.bufw) 1318 c.bufw = nil 1319 } 1320 } 1321 1322 // Close the connection. 1323 func (c *conn) close() { 1324 c.finalFlush() 1325 c.rwc.Close() 1326 } 1327 1328 // rstAvoidanceDelay is the amount of time we sleep after closing the 1329 // write side of a TCP connection before closing the entire socket. 1330 // By sleeping, we increase the chances that the client sees our FIN 1331 // and processes its final data before they process the subsequent RST 1332 // from closing a connection with known unread data. 1333 // This RST seems to occur mostly on BSD systems. (And Windows?) 1334 // This timeout is somewhat arbitrary (~latency around the planet). 1335 const rstAvoidanceDelay = 500 * time.Millisecond 1336 1337 type closeWriter interface { 1338 CloseWrite() error 1339 } 1340 1341 var _ closeWriter = (*net.TCPConn)(nil) 1342 1343 // closeWrite flushes any outstanding data and sends a FIN packet (if 1344 // client is connected via TCP), signalling that we're done. We then 1345 // pause for a bit, hoping the client processes it before any 1346 // subsequent RST. 1347 // 1348 // See https://golang.org/issue/3595 1349 func (c *conn) closeWriteAndWait() { 1350 c.finalFlush() 1351 if tcp, ok := c.rwc.(closeWriter); ok { 1352 tcp.CloseWrite() 1353 } 1354 time.Sleep(rstAvoidanceDelay) 1355 } 1356 1357 // validNPN reports whether the proto is not a blacklisted Next 1358 // Protocol Negotiation protocol. Empty and built-in protocol types 1359 // are blacklisted and can't be overridden with alternate 1360 // implementations. 1361 func validNPN(proto string) bool { 1362 switch proto { 1363 case "", "http/1.1", "http/1.0": 1364 return false 1365 } 1366 return true 1367 } 1368 1369 func (c *conn) setState(nc net.Conn, state ConnState) { 1370 if hook := c.server.ConnState; hook != nil { 1371 hook(nc, state) 1372 } 1373 } 1374 1375 // badRequestError is a literal string (used by in the server in HTML, 1376 // unescaped) to tell the user why their request was bad. It should 1377 // be plain text without user info or other embeddded errors. 1378 type badRequestError string 1379 1380 func (e badRequestError) Error() string { return "Bad Request: " + string(e) } 1381 1382 // Serve a new connection. 1383 func (c *conn) serve() { 1384 c.remoteAddr = c.rwc.RemoteAddr().String() 1385 defer func() { 1386 if err := recover(); err != nil { 1387 const size = 64 << 10 1388 buf := make([]byte, size) 1389 buf = buf[:runtime.Stack(buf, false)] 1390 c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) 1391 } 1392 if !c.hijacked() { 1393 c.close() 1394 c.setState(c.rwc, StateClosed) 1395 } 1396 }() 1397 1398 if tlsConn, ok := c.rwc.(*tls.Conn); ok { 1399 if d := c.server.ReadTimeout; d != 0 { 1400 c.rwc.SetReadDeadline(time.Now().Add(d)) 1401 } 1402 if d := c.server.WriteTimeout; d != 0 { 1403 c.rwc.SetWriteDeadline(time.Now().Add(d)) 1404 } 1405 if err := tlsConn.Handshake(); err != nil { 1406 c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) 1407 return 1408 } 1409 c.tlsState = new(tls.ConnectionState) 1410 *c.tlsState = tlsConn.ConnectionState() 1411 if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { 1412 if fn := c.server.TLSNextProto[proto]; fn != nil { 1413 h := initNPNRequest{tlsConn, serverHandler{c.server}} 1414 fn(c.server, tlsConn, h) 1415 } 1416 return 1417 } 1418 } 1419 1420 c.r = &connReader{r: c.rwc} 1421 c.bufr = newBufioReader(c.r) 1422 c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) 1423 1424 for { 1425 w, err := c.readRequest() 1426 if c.r.remain != c.server.initialReadLimitSize() { 1427 // If we read any bytes off the wire, we're active. 1428 c.setState(c.rwc, StateActive) 1429 } 1430 if err != nil { 1431 if err == errTooLarge { 1432 // Their HTTP client may or may not be 1433 // able to read this if we're 1434 // responding to them and hanging up 1435 // while they're still writing their 1436 // request. Undefined behavior. 1437 io.WriteString(c.rwc, "HTTP/1.1 431 Request Header Fields Too Large\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n431 Request Header Fields Too Large") 1438 c.closeWriteAndWait() 1439 return 1440 } 1441 if err == io.EOF { 1442 return // don't reply 1443 } 1444 if neterr, ok := err.(net.Error); ok && neterr.Timeout() { 1445 return // don't reply 1446 } 1447 var publicErr string 1448 if v, ok := err.(badRequestError); ok { 1449 publicErr = ": " + string(v) 1450 } 1451 io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n400 Bad Request"+publicErr) 1452 return 1453 } 1454 1455 // Expect 100 Continue support 1456 req := w.req 1457 if req.expectsContinue() { 1458 if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { 1459 // Wrap the Body reader with one that replies on the connection 1460 req.Body = &expectContinueReader{readCloser: req.Body, resp: w} 1461 } 1462 } else if req.Header.get("Expect") != "" { 1463 w.sendExpectationFailed() 1464 return 1465 } 1466 1467 // HTTP cannot have multiple simultaneous active requests.[*] 1468 // Until the server replies to this request, it can't read another, 1469 // so we might as well run the handler in this goroutine. 1470 // [*] Not strictly true: HTTP pipelining. We could let them all process 1471 // in parallel even if their responses need to be serialized. 1472 serverHandler{c.server}.ServeHTTP(w, w.req) 1473 if c.hijacked() { 1474 return 1475 } 1476 w.finishRequest() 1477 if !w.shouldReuseConnection() { 1478 if w.requestBodyLimitHit || w.closedRequestBodyEarly() { 1479 c.closeWriteAndWait() 1480 } 1481 return 1482 } 1483 c.setState(c.rwc, StateIdle) 1484 } 1485 } 1486 1487 func (w *response) sendExpectationFailed() { 1488 // TODO(bradfitz): let ServeHTTP handlers handle 1489 // requests with non-standard expectation[s]? Seems 1490 // theoretical at best, and doesn't fit into the 1491 // current ServeHTTP model anyway. We'd need to 1492 // make the ResponseWriter an optional 1493 // "ExpectReplier" interface or something. 1494 // 1495 // For now we'll just obey RFC 2616 14.20 which says 1496 // "If a server receives a request containing an 1497 // Expect field that includes an expectation- 1498 // extension that it does not support, it MUST 1499 // respond with a 417 (Expectation Failed) status." 1500 w.Header().Set("Connection", "close") 1501 w.WriteHeader(StatusExpectationFailed) 1502 w.finishRequest() 1503 } 1504 1505 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter 1506 // and a Hijacker. 1507 func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { 1508 if w.handlerDone.isSet() { 1509 panic("net/http: Hijack called after ServeHTTP finished") 1510 } 1511 if w.wroteHeader { 1512 w.cw.flush() 1513 } 1514 1515 c := w.conn 1516 c.mu.Lock() 1517 defer c.mu.Unlock() 1518 1519 if w.closeNotifyCh != nil { 1520 return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier in same ServeHTTP call") 1521 } 1522 1523 // Release the bufioWriter that writes to the chunk writer, it is not 1524 // used after a connection has been hijacked. 1525 rwc, buf, err = c.hijackLocked() 1526 if err == nil { 1527 putBufioWriter(w.w) 1528 w.w = nil 1529 } 1530 return rwc, buf, err 1531 } 1532 1533 func (w *response) CloseNotify() <-chan bool { 1534 if w.handlerDone.isSet() { 1535 panic("net/http: CloseNotify called after ServeHTTP finished") 1536 } 1537 c := w.conn 1538 c.mu.Lock() 1539 defer c.mu.Unlock() 1540 1541 if w.closeNotifyCh != nil { 1542 return w.closeNotifyCh 1543 } 1544 ch := make(chan bool, 1) 1545 w.closeNotifyCh = ch 1546 1547 if w.conn.hijackedv { 1548 // CloseNotify is undefined after a hijack, but we have 1549 // no place to return an error, so just return a channel, 1550 // even though it'll never receive a value. 1551 return ch 1552 } 1553 1554 var once sync.Once 1555 notify := func() { once.Do(func() { ch <- true }) } 1556 1557 if requestBodyRemains(w.reqBody) { 1558 // They're still consuming the request body, so we 1559 // shouldn't notify yet. 1560 registerOnHitEOF(w.reqBody, func() { 1561 c.mu.Lock() 1562 defer c.mu.Unlock() 1563 startCloseNotifyBackgroundRead(c, notify) 1564 }) 1565 } else { 1566 startCloseNotifyBackgroundRead(c, notify) 1567 } 1568 return ch 1569 } 1570 1571 // c.mu must be held. 1572 func startCloseNotifyBackgroundRead(c *conn, notify func()) { 1573 if c.bufr.Buffered() > 0 { 1574 // They've consumed the request body, so anything 1575 // remaining is a pipelined request, which we 1576 // document as firing on. 1577 notify() 1578 } else { 1579 c.r.startBackgroundRead(notify) 1580 } 1581 } 1582 1583 func registerOnHitEOF(rc io.ReadCloser, fn func()) { 1584 switch v := rc.(type) { 1585 case *expectContinueReader: 1586 registerOnHitEOF(v.readCloser, fn) 1587 case *body: 1588 v.registerOnHitEOF(fn) 1589 default: 1590 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1591 } 1592 } 1593 1594 // requestBodyRemains reports whether future calls to Read 1595 // on rc might yield more data. 1596 func requestBodyRemains(rc io.ReadCloser) bool { 1597 if rc == eofReader { 1598 return false 1599 } 1600 switch v := rc.(type) { 1601 case *expectContinueReader: 1602 return requestBodyRemains(v.readCloser) 1603 case *body: 1604 return v.bodyRemains() 1605 default: 1606 panic("unexpected type " + fmt.Sprintf("%T", rc)) 1607 } 1608 } 1609 1610 // The HandlerFunc type is an adapter to allow the use of 1611 // ordinary functions as HTTP handlers. If f is a function 1612 // with the appropriate signature, HandlerFunc(f) is a 1613 // Handler that calls f. 1614 type HandlerFunc func(ResponseWriter, *Request) 1615 1616 // ServeHTTP calls f(w, r). 1617 func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { 1618 f(w, r) 1619 } 1620 1621 // Helper handlers 1622 1623 // Error replies to the request with the specified error message and HTTP code. 1624 // The error message should be plain text. 1625 func Error(w ResponseWriter, error string, code int) { 1626 w.Header().Set("Content-Type", "text/plain; charset=utf-8") 1627 w.Header().Set("X-Content-Type-Options", "nosniff") 1628 w.WriteHeader(code) 1629 fmt.Fprintln(w, error) 1630 } 1631 1632 // NotFound replies to the request with an HTTP 404 not found error. 1633 func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } 1634 1635 // NotFoundHandler returns a simple request handler 1636 // that replies to each request with a ``404 page not found'' reply. 1637 func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 1638 1639 // StripPrefix returns a handler that serves HTTP requests 1640 // by removing the given prefix from the request URL's Path 1641 // and invoking the handler h. StripPrefix handles a 1642 // request for a path that doesn't begin with prefix by 1643 // replying with an HTTP 404 not found error. 1644 func StripPrefix(prefix string, h Handler) Handler { 1645 if prefix == "" { 1646 return h 1647 } 1648 return HandlerFunc(func(w ResponseWriter, r *Request) { 1649 if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { 1650 r.URL.Path = p 1651 h.ServeHTTP(w, r) 1652 } else { 1653 NotFound(w, r) 1654 } 1655 }) 1656 } 1657 1658 // Redirect replies to the request with a redirect to url, 1659 // which may be a path relative to the request path. 1660 // 1661 // The provided code should be in the 3xx range and is usually 1662 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1663 func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { 1664 if u, err := url.Parse(urlStr); err == nil { 1665 // If url was relative, make absolute by 1666 // combining with request path. 1667 // The browser would probably do this for us, 1668 // but doing it ourselves is more reliable. 1669 1670 // NOTE(rsc): RFC 2616 says that the Location 1671 // line must be an absolute URI, like 1672 // "http://www.google.com/redirect/", 1673 // not a path like "/redirect/". 1674 // Unfortunately, we don't know what to 1675 // put in the host name section to get the 1676 // client to connect to us again, so we can't 1677 // know the right absolute URI to send back. 1678 // Because of this problem, no one pays attention 1679 // to the RFC; they all send back just a new path. 1680 // So do we. 1681 if u.Scheme == "" && u.Host == "" { 1682 oldpath := r.URL.Path 1683 if oldpath == "" { // should not happen, but avoid a crash if it does 1684 oldpath = "/" 1685 } 1686 1687 // no leading http://server 1688 if urlStr == "" || urlStr[0] != '/' { 1689 // make relative path absolute 1690 olddir, _ := path.Split(oldpath) 1691 urlStr = olddir + urlStr 1692 } 1693 1694 var query string 1695 if i := strings.Index(urlStr, "?"); i != -1 { 1696 urlStr, query = urlStr[:i], urlStr[i:] 1697 } 1698 1699 // clean up but preserve trailing slash 1700 trailing := strings.HasSuffix(urlStr, "/") 1701 urlStr = path.Clean(urlStr) 1702 if trailing && !strings.HasSuffix(urlStr, "/") { 1703 urlStr += "/" 1704 } 1705 urlStr += query 1706 } 1707 } 1708 1709 w.Header().Set("Location", urlStr) 1710 w.WriteHeader(code) 1711 1712 // RFC2616 recommends that a short note "SHOULD" be included in the 1713 // response because older user agents may not understand 301/307. 1714 // Shouldn't send the response for POST or HEAD; that leaves GET. 1715 if r.Method == "GET" { 1716 note := "<a href=\"" + htmlEscape(urlStr) + "\">" + statusText[code] + "</a>.\n" 1717 fmt.Fprintln(w, note) 1718 } 1719 } 1720 1721 var htmlReplacer = strings.NewReplacer( 1722 "&", "&", 1723 "<", "<", 1724 ">", ">", 1725 // """ is shorter than """. 1726 `"`, """, 1727 // "'" is shorter than "'" and apos was not in HTML until HTML5. 1728 "'", "'", 1729 ) 1730 1731 func htmlEscape(s string) string { 1732 return htmlReplacer.Replace(s) 1733 } 1734 1735 // Redirect to a fixed URL 1736 type redirectHandler struct { 1737 url string 1738 code int 1739 } 1740 1741 func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { 1742 Redirect(w, r, rh.url, rh.code) 1743 } 1744 1745 // RedirectHandler returns a request handler that redirects 1746 // each request it receives to the given url using the given 1747 // status code. 1748 // 1749 // The provided code should be in the 3xx range and is usually 1750 // StatusMovedPermanently, StatusFound or StatusSeeOther. 1751 func RedirectHandler(url string, code int) Handler { 1752 return &redirectHandler{url, code} 1753 } 1754 1755 // ServeMux is an HTTP request multiplexer. 1756 // It matches the URL of each incoming request against a list of registered 1757 // patterns and calls the handler for the pattern that 1758 // most closely matches the URL. 1759 // 1760 // Patterns name fixed, rooted paths, like "/favicon.ico", 1761 // or rooted subtrees, like "/images/" (note the trailing slash). 1762 // Longer patterns take precedence over shorter ones, so that 1763 // if there are handlers registered for both "/images/" 1764 // and "/images/thumbnails/", the latter handler will be 1765 // called for paths beginning "/images/thumbnails/" and the 1766 // former will receive requests for any other paths in the 1767 // "/images/" subtree. 1768 // 1769 // Note that since a pattern ending in a slash names a rooted subtree, 1770 // the pattern "/" matches all paths not matched by other registered 1771 // patterns, not just the URL with Path == "/". 1772 // 1773 // If a subtree has been registered and a request is received naming the 1774 // subtree root without its trailing slash, ServeMux redirects that 1775 // request to the subtree root (adding the trailing slash). This behavior can 1776 // be overridden with a separate registration for the path without 1777 // the trailing slash. For example, registering "/images/" causes ServeMux 1778 // to redirect a request for "/images" to "/images/", unless "/images" has 1779 // been registered separately. 1780 // 1781 // Patterns may optionally begin with a host name, restricting matches to 1782 // URLs on that host only. Host-specific patterns take precedence over 1783 // general patterns, so that a handler might register for the two patterns 1784 // "/codesearch" and "codesearch.google.com/" without also taking over 1785 // requests for "http://www.google.com/". 1786 // 1787 // ServeMux also takes care of sanitizing the URL request path, 1788 // redirecting any request containing . or .. elements or repeated slashes 1789 // to an equivalent, cleaner URL. 1790 type ServeMux struct { 1791 mu sync.RWMutex 1792 m map[string]muxEntry 1793 hosts bool // whether any patterns contain hostnames 1794 } 1795 1796 type muxEntry struct { 1797 explicit bool 1798 h Handler 1799 pattern string 1800 } 1801 1802 // NewServeMux allocates and returns a new ServeMux. 1803 func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } 1804 1805 // DefaultServeMux is the default ServeMux used by Serve. 1806 var DefaultServeMux = NewServeMux() 1807 1808 // Does path match pattern? 1809 func pathMatch(pattern, path string) bool { 1810 if len(pattern) == 0 { 1811 // should not happen 1812 return false 1813 } 1814 n := len(pattern) 1815 if pattern[n-1] != '/' { 1816 return pattern == path 1817 } 1818 return len(path) >= n && path[0:n] == pattern 1819 } 1820 1821 // Return the canonical path for p, eliminating . and .. elements. 1822 func cleanPath(p string) string { 1823 if p == "" { 1824 return "/" 1825 } 1826 if p[0] != '/' { 1827 p = "/" + p 1828 } 1829 np := path.Clean(p) 1830 // path.Clean removes trailing slash except for root; 1831 // put the trailing slash back if necessary. 1832 if p[len(p)-1] == '/' && np != "/" { 1833 np += "/" 1834 } 1835 return np 1836 } 1837 1838 // Find a handler on a handler map given a path string 1839 // Most-specific (longest) pattern wins 1840 func (mux *ServeMux) match(path string) (h Handler, pattern string) { 1841 var n = 0 1842 for k, v := range mux.m { 1843 if !pathMatch(k, path) { 1844 continue 1845 } 1846 if h == nil || len(k) > n { 1847 n = len(k) 1848 h = v.h 1849 pattern = v.pattern 1850 } 1851 } 1852 return 1853 } 1854 1855 // Handler returns the handler to use for the given request, 1856 // consulting r.Method, r.Host, and r.URL.Path. It always returns 1857 // a non-nil handler. If the path is not in its canonical form, the 1858 // handler will be an internally-generated handler that redirects 1859 // to the canonical path. 1860 // 1861 // Handler also returns the registered pattern that matches the 1862 // request or, in the case of internally-generated redirects, 1863 // the pattern that will match after following the redirect. 1864 // 1865 // If there is no registered handler that applies to the request, 1866 // Handler returns a ``page not found'' handler and an empty pattern. 1867 func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { 1868 if r.Method != "CONNECT" { 1869 if p := cleanPath(r.URL.Path); p != r.URL.Path { 1870 _, pattern = mux.handler(r.Host, p) 1871 url := *r.URL 1872 url.Path = p 1873 return RedirectHandler(url.String(), StatusMovedPermanently), pattern 1874 } 1875 } 1876 1877 return mux.handler(r.Host, r.URL.Path) 1878 } 1879 1880 // handler is the main implementation of Handler. 1881 // The path is known to be in canonical form, except for CONNECT methods. 1882 func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { 1883 mux.mu.RLock() 1884 defer mux.mu.RUnlock() 1885 1886 // Host-specific pattern takes precedence over generic ones 1887 if mux.hosts { 1888 h, pattern = mux.match(host + path) 1889 } 1890 if h == nil { 1891 h, pattern = mux.match(path) 1892 } 1893 if h == nil { 1894 h, pattern = NotFoundHandler(), "" 1895 } 1896 return 1897 } 1898 1899 // ServeHTTP dispatches the request to the handler whose 1900 // pattern most closely matches the request URL. 1901 func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { 1902 if r.RequestURI == "*" { 1903 if r.ProtoAtLeast(1, 1) { 1904 w.Header().Set("Connection", "close") 1905 } 1906 w.WriteHeader(StatusBadRequest) 1907 return 1908 } 1909 h, _ := mux.Handler(r) 1910 h.ServeHTTP(w, r) 1911 } 1912 1913 // Handle registers the handler for the given pattern. 1914 // If a handler already exists for pattern, Handle panics. 1915 func (mux *ServeMux) Handle(pattern string, handler Handler) { 1916 mux.mu.Lock() 1917 defer mux.mu.Unlock() 1918 1919 if pattern == "" { 1920 panic("http: invalid pattern " + pattern) 1921 } 1922 if handler == nil { 1923 panic("http: nil handler") 1924 } 1925 if mux.m[pattern].explicit { 1926 panic("http: multiple registrations for " + pattern) 1927 } 1928 1929 mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} 1930 1931 if pattern[0] != '/' { 1932 mux.hosts = true 1933 } 1934 1935 // Helpful behavior: 1936 // If pattern is /tree/, insert an implicit permanent redirect for /tree. 1937 // It can be overridden by an explicit registration. 1938 n := len(pattern) 1939 if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { 1940 // If pattern contains a host name, strip it and use remaining 1941 // path for redirect. 1942 path := pattern 1943 if pattern[0] != '/' { 1944 // In pattern, at least the last character is a '/', so 1945 // strings.Index can't be -1. 1946 path = pattern[strings.Index(pattern, "/"):] 1947 } 1948 url := &url.URL{Path: path} 1949 mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} 1950 } 1951 } 1952 1953 // HandleFunc registers the handler function for the given pattern. 1954 func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 1955 mux.Handle(pattern, HandlerFunc(handler)) 1956 } 1957 1958 // Handle registers the handler for the given pattern 1959 // in the DefaultServeMux. 1960 // The documentation for ServeMux explains how patterns are matched. 1961 func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } 1962 1963 // HandleFunc registers the handler function for the given pattern 1964 // in the DefaultServeMux. 1965 // The documentation for ServeMux explains how patterns are matched. 1966 func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { 1967 DefaultServeMux.HandleFunc(pattern, handler) 1968 } 1969 1970 // Serve accepts incoming HTTP connections on the listener l, 1971 // creating a new service goroutine for each. The service goroutines 1972 // read requests and then call handler to reply to them. 1973 // Handler is typically nil, in which case the DefaultServeMux is used. 1974 func Serve(l net.Listener, handler Handler) error { 1975 srv := &Server{Handler: handler} 1976 return srv.Serve(l) 1977 } 1978 1979 // A Server defines parameters for running an HTTP server. 1980 // The zero value for Server is a valid configuration. 1981 type Server struct { 1982 Addr string // TCP address to listen on, ":http" if empty 1983 Handler Handler // handler to invoke, http.DefaultServeMux if nil 1984 ReadTimeout time.Duration // maximum duration before timing out read of the request 1985 WriteTimeout time.Duration // maximum duration before timing out write of the response 1986 MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 1987 TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS 1988 1989 // TLSNextProto optionally specifies a function to take over 1990 // ownership of the provided TLS connection when an NPN 1991 // protocol upgrade has occurred. The map key is the protocol 1992 // name negotiated. The Handler argument should be used to 1993 // handle HTTP requests and will initialize the Request's TLS 1994 // and RemoteAddr if not already set. The connection is 1995 // automatically closed when the function returns. 1996 // If TLSNextProto is nil, HTTP/2 support is enabled automatically. 1997 TLSNextProto map[string]func(*Server, *tls.Conn, Handler) 1998 1999 // ConnState specifies an optional callback function that is 2000 // called when a client connection changes state. See the 2001 // ConnState type and associated constants for details. 2002 ConnState func(net.Conn, ConnState) 2003 2004 // ErrorLog specifies an optional logger for errors accepting 2005 // connections and unexpected behavior from handlers. 2006 // If nil, logging goes to os.Stderr via the log package's 2007 // standard logger. 2008 ErrorLog *log.Logger 2009 2010 disableKeepAlives int32 // accessed atomically. 2011 nextProtoOnce sync.Once // guards initialization of TLSNextProto in Serve 2012 nextProtoErr error 2013 } 2014 2015 // A ConnState represents the state of a client connection to a server. 2016 // It's used by the optional Server.ConnState hook. 2017 type ConnState int 2018 2019 const ( 2020 // StateNew represents a new connection that is expected to 2021 // send a request immediately. Connections begin at this 2022 // state and then transition to either StateActive or 2023 // StateClosed. 2024 StateNew ConnState = iota 2025 2026 // StateActive represents a connection that has read 1 or more 2027 // bytes of a request. The Server.ConnState hook for 2028 // StateActive fires before the request has entered a handler 2029 // and doesn't fire again until the request has been 2030 // handled. After the request is handled, the state 2031 // transitions to StateClosed, StateHijacked, or StateIdle. 2032 // For HTTP/2, StateActive fires on the transition from zero 2033 // to one active request, and only transitions away once all 2034 // active requests are complete. That means that ConnState 2035 // can not be used to do per-request work; ConnState only notes 2036 // the overall state of the connection. 2037 StateActive 2038 2039 // StateIdle represents a connection that has finished 2040 // handling a request and is in the keep-alive state, waiting 2041 // for a new request. Connections transition from StateIdle 2042 // to either StateActive or StateClosed. 2043 StateIdle 2044 2045 // StateHijacked represents a hijacked connection. 2046 // This is a terminal state. It does not transition to StateClosed. 2047 StateHijacked 2048 2049 // StateClosed represents a closed connection. 2050 // This is a terminal state. Hijacked connections do not 2051 // transition to StateClosed. 2052 StateClosed 2053 ) 2054 2055 var stateName = map[ConnState]string{ 2056 StateNew: "new", 2057 StateActive: "active", 2058 StateIdle: "idle", 2059 StateHijacked: "hijacked", 2060 StateClosed: "closed", 2061 } 2062 2063 func (c ConnState) String() string { 2064 return stateName[c] 2065 } 2066 2067 // serverHandler delegates to either the server's Handler or 2068 // DefaultServeMux and also handles "OPTIONS *" requests. 2069 type serverHandler struct { 2070 srv *Server 2071 } 2072 2073 func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { 2074 handler := sh.srv.Handler 2075 if handler == nil { 2076 handler = DefaultServeMux 2077 } 2078 if req.RequestURI == "*" && req.Method == "OPTIONS" { 2079 handler = globalOptionsHandler{} 2080 } 2081 handler.ServeHTTP(rw, req) 2082 } 2083 2084 // ListenAndServe listens on the TCP network address srv.Addr and then 2085 // calls Serve to handle requests on incoming connections. 2086 // Accepted connections are configured to enable TCP keep-alives. 2087 // If srv.Addr is blank, ":http" is used. 2088 // ListenAndServe always returns a non-nil error. 2089 func (srv *Server) ListenAndServe() error { 2090 addr := srv.Addr 2091 if addr == "" { 2092 addr = ":http" 2093 } 2094 ln, err := net.Listen("tcp", addr) 2095 if err != nil { 2096 return err 2097 } 2098 return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 2099 } 2100 2101 var testHookServerServe func(*Server, net.Listener) // used if non-nil 2102 2103 // Serve accepts incoming connections on the Listener l, creating a 2104 // new service goroutine for each. The service goroutines read requests and 2105 // then call srv.Handler to reply to them. 2106 // Serve always returns a non-nil error. 2107 func (srv *Server) Serve(l net.Listener) error { 2108 defer l.Close() 2109 if fn := testHookServerServe; fn != nil { 2110 fn(srv, l) 2111 } 2112 var tempDelay time.Duration // how long to sleep on accept failure 2113 if err := srv.setupHTTP2(); err != nil { 2114 return err 2115 } 2116 for { 2117 rw, e := l.Accept() 2118 if e != nil { 2119 if ne, ok := e.(net.Error); ok && ne.Temporary() { 2120 if tempDelay == 0 { 2121 tempDelay = 5 * time.Millisecond 2122 } else { 2123 tempDelay *= 2 2124 } 2125 if max := 1 * time.Second; tempDelay > max { 2126 tempDelay = max 2127 } 2128 srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) 2129 time.Sleep(tempDelay) 2130 continue 2131 } 2132 return e 2133 } 2134 tempDelay = 0 2135 c := srv.newConn(rw) 2136 c.setState(c.rwc, StateNew) // before Serve can return 2137 go c.serve() 2138 } 2139 } 2140 2141 func (s *Server) doKeepAlives() bool { 2142 return atomic.LoadInt32(&s.disableKeepAlives) == 0 2143 } 2144 2145 // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. 2146 // By default, keep-alives are always enabled. Only very 2147 // resource-constrained environments or servers in the process of 2148 // shutting down should disable them. 2149 func (srv *Server) SetKeepAlivesEnabled(v bool) { 2150 if v { 2151 atomic.StoreInt32(&srv.disableKeepAlives, 0) 2152 } else { 2153 atomic.StoreInt32(&srv.disableKeepAlives, 1) 2154 } 2155 } 2156 2157 func (s *Server) logf(format string, args ...interface{}) { 2158 if s.ErrorLog != nil { 2159 s.ErrorLog.Printf(format, args...) 2160 } else { 2161 log.Printf(format, args...) 2162 } 2163 } 2164 2165 // ListenAndServe listens on the TCP network address addr 2166 // and then calls Serve with handler to handle requests 2167 // on incoming connections. 2168 // Accepted connections are configured to enable TCP keep-alives. 2169 // Handler is typically nil, in which case the DefaultServeMux is 2170 // used. 2171 // 2172 // A trivial example server is: 2173 // 2174 // package main 2175 // 2176 // import ( 2177 // "io" 2178 // "net/http" 2179 // "log" 2180 // ) 2181 // 2182 // // hello world, the web server 2183 // func HelloServer(w http.ResponseWriter, req *http.Request) { 2184 // io.WriteString(w, "hello, world!\n") 2185 // } 2186 // 2187 // func main() { 2188 // http.HandleFunc("/hello", HelloServer) 2189 // log.Fatal(http.ListenAndServe(":12345", nil)) 2190 // } 2191 // 2192 // ListenAndServe always returns a non-nil error. 2193 func ListenAndServe(addr string, handler Handler) error { 2194 server := &Server{Addr: addr, Handler: handler} 2195 return server.ListenAndServe() 2196 } 2197 2198 // ListenAndServeTLS acts identically to ListenAndServe, except that it 2199 // expects HTTPS connections. Additionally, files containing a certificate and 2200 // matching private key for the server must be provided. If the certificate 2201 // is signed by a certificate authority, the certFile should be the concatenation 2202 // of the server's certificate, any intermediates, and the CA's certificate. 2203 // 2204 // A trivial example server is: 2205 // 2206 // import ( 2207 // "log" 2208 // "net/http" 2209 // ) 2210 // 2211 // func handler(w http.ResponseWriter, req *http.Request) { 2212 // w.Header().Set("Content-Type", "text/plain") 2213 // w.Write([]byte("This is an example server.\n")) 2214 // } 2215 // 2216 // func main() { 2217 // http.HandleFunc("/", handler) 2218 // log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") 2219 // err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) 2220 // log.Fatal(err) 2221 // } 2222 // 2223 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. 2224 // 2225 // ListenAndServeTLS always returns a non-nil error. 2226 func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { 2227 server := &Server{Addr: addr, Handler: handler} 2228 return server.ListenAndServeTLS(certFile, keyFile) 2229 } 2230 2231 // ListenAndServeTLS listens on the TCP network address srv.Addr and 2232 // then calls Serve to handle requests on incoming TLS connections. 2233 // Accepted connections are configured to enable TCP keep-alives. 2234 // 2235 // Filenames containing a certificate and matching private key for the 2236 // server must be provided if neither the Server's TLSConfig.Certificates 2237 // nor TLSConfig.GetCertificate are populated. If the certificate is 2238 // signed by a certificate authority, the certFile should be the 2239 // concatenation of the server's certificate, any intermediates, and 2240 // the CA's certificate. 2241 // 2242 // If srv.Addr is blank, ":https" is used. 2243 // 2244 // ListenAndServeTLS always returns a non-nil error. 2245 func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { 2246 addr := srv.Addr 2247 if addr == "" { 2248 addr = ":https" 2249 } 2250 2251 // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig 2252 // before we clone it and create the TLS Listener. 2253 if err := srv.setupHTTP2(); err != nil { 2254 return err 2255 } 2256 2257 config := cloneTLSConfig(srv.TLSConfig) 2258 if !strSliceContains(config.NextProtos, "http/1.1") { 2259 config.NextProtos = append(config.NextProtos, "http/1.1") 2260 } 2261 2262 configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil 2263 if !configHasCert || certFile != "" || keyFile != "" { 2264 var err error 2265 config.Certificates = make([]tls.Certificate, 1) 2266 config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) 2267 if err != nil { 2268 return err 2269 } 2270 } 2271 2272 ln, err := net.Listen("tcp", addr) 2273 if err != nil { 2274 return err 2275 } 2276 2277 tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) 2278 return srv.Serve(tlsListener) 2279 } 2280 2281 func (srv *Server) setupHTTP2() error { 2282 srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) 2283 return srv.nextProtoErr 2284 } 2285 2286 // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't 2287 // configured otherwise. (by setting srv.TLSNextProto non-nil) 2288 // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2). 2289 func (srv *Server) onceSetNextProtoDefaults() { 2290 if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { 2291 return 2292 } 2293 // Enable HTTP/2 by default if the user hasn't otherwise 2294 // configured their TLSNextProto map. 2295 if srv.TLSNextProto == nil { 2296 srv.nextProtoErr = http2ConfigureServer(srv, nil) 2297 } 2298 } 2299 2300 // TimeoutHandler returns a Handler that runs h with the given time limit. 2301 // 2302 // The new Handler calls h.ServeHTTP to handle each request, but if a 2303 // call runs for longer than its time limit, the handler responds with 2304 // a 503 Service Unavailable error and the given message in its body. 2305 // (If msg is empty, a suitable default message will be sent.) 2306 // After such a timeout, writes by h to its ResponseWriter will return 2307 // ErrHandlerTimeout. 2308 // 2309 // TimeoutHandler buffers all Handler writes to memory and does not 2310 // support the Hijacker or Flusher interfaces. 2311 func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { 2312 return &timeoutHandler{ 2313 handler: h, 2314 body: msg, 2315 dt: dt, 2316 } 2317 } 2318 2319 // ErrHandlerTimeout is returned on ResponseWriter Write calls 2320 // in handlers which have timed out. 2321 var ErrHandlerTimeout = errors.New("http: Handler timeout") 2322 2323 type timeoutHandler struct { 2324 handler Handler 2325 body string 2326 dt time.Duration 2327 2328 // When set, no timer will be created and this channel will 2329 // be used instead. 2330 testTimeout <-chan time.Time 2331 } 2332 2333 func (h *timeoutHandler) errorBody() string { 2334 if h.body != "" { 2335 return h.body 2336 } 2337 return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" 2338 } 2339 2340 func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { 2341 var t *time.Timer 2342 timeout := h.testTimeout 2343 if timeout == nil { 2344 t = time.NewTimer(h.dt) 2345 timeout = t.C 2346 } 2347 done := make(chan struct{}) 2348 tw := &timeoutWriter{ 2349 w: w, 2350 h: make(Header), 2351 } 2352 go func() { 2353 h.handler.ServeHTTP(tw, r) 2354 close(done) 2355 }() 2356 select { 2357 case <-done: 2358 tw.mu.Lock() 2359 defer tw.mu.Unlock() 2360 dst := w.Header() 2361 for k, vv := range tw.h { 2362 dst[k] = vv 2363 } 2364 w.WriteHeader(tw.code) 2365 w.Write(tw.wbuf.Bytes()) 2366 if t != nil { 2367 t.Stop() 2368 } 2369 case <-timeout: 2370 tw.mu.Lock() 2371 defer tw.mu.Unlock() 2372 w.WriteHeader(StatusServiceUnavailable) 2373 io.WriteString(w, h.errorBody()) 2374 tw.timedOut = true 2375 return 2376 } 2377 } 2378 2379 type timeoutWriter struct { 2380 w ResponseWriter 2381 h Header 2382 wbuf bytes.Buffer 2383 2384 mu sync.Mutex 2385 timedOut bool 2386 wroteHeader bool 2387 code int 2388 } 2389 2390 func (tw *timeoutWriter) Header() Header { return tw.h } 2391 2392 func (tw *timeoutWriter) Write(p []byte) (int, error) { 2393 tw.mu.Lock() 2394 defer tw.mu.Unlock() 2395 if tw.timedOut { 2396 return 0, ErrHandlerTimeout 2397 } 2398 if !tw.wroteHeader { 2399 tw.writeHeader(StatusOK) 2400 } 2401 return tw.wbuf.Write(p) 2402 } 2403 2404 func (tw *timeoutWriter) WriteHeader(code int) { 2405 tw.mu.Lock() 2406 defer tw.mu.Unlock() 2407 if tw.timedOut || tw.wroteHeader { 2408 return 2409 } 2410 tw.writeHeader(code) 2411 } 2412 2413 func (tw *timeoutWriter) writeHeader(code int) { 2414 tw.wroteHeader = true 2415 tw.code = code 2416 } 2417 2418 // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted 2419 // connections. It's used by ListenAndServe and ListenAndServeTLS so 2420 // dead TCP connections (e.g. closing laptop mid-download) eventually 2421 // go away. 2422 type tcpKeepAliveListener struct { 2423 *net.TCPListener 2424 } 2425 2426 func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 2427 tc, err := ln.AcceptTCP() 2428 if err != nil { 2429 return 2430 } 2431 tc.SetKeepAlive(true) 2432 tc.SetKeepAlivePeriod(3 * time.Minute) 2433 return tc, nil 2434 } 2435 2436 // globalOptionsHandler responds to "OPTIONS *" requests. 2437 type globalOptionsHandler struct{} 2438 2439 func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { 2440 w.Header().Set("Content-Length", "0") 2441 if r.ContentLength != 0 { 2442 // Read up to 4KB of OPTIONS body (as mentioned in the 2443 // spec as being reserved for future use), but anything 2444 // over that is considered a waste of server resources 2445 // (or an attack) and we abort and close the connection, 2446 // courtesy of MaxBytesReader's EOF behavior. 2447 mb := MaxBytesReader(w, r.Body, 4<<10) 2448 io.Copy(ioutil.Discard, mb) 2449 } 2450 } 2451 2452 type eofReaderWithWriteTo struct{} 2453 2454 func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } 2455 func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } 2456 2457 // eofReader is a non-nil io.ReadCloser that always returns EOF. 2458 // It has a WriteTo method so io.Copy won't need a buffer. 2459 var eofReader = &struct { 2460 eofReaderWithWriteTo 2461 io.Closer 2462 }{ 2463 eofReaderWithWriteTo{}, 2464 ioutil.NopCloser(nil), 2465 } 2466 2467 // Verify that an io.Copy from an eofReader won't require a buffer. 2468 var _ io.WriterTo = eofReader 2469 2470 // initNPNRequest is an HTTP handler that initializes certain 2471 // uninitialized fields in its *Request. Such partially-initialized 2472 // Requests come from NPN protocol handlers. 2473 type initNPNRequest struct { 2474 c *tls.Conn 2475 h serverHandler 2476 } 2477 2478 func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { 2479 if req.TLS == nil { 2480 req.TLS = &tls.ConnectionState{} 2481 *req.TLS = h.c.ConnectionState() 2482 } 2483 if req.Body == nil { 2484 req.Body = eofReader 2485 } 2486 if req.RemoteAddr == "" { 2487 req.RemoteAddr = h.c.RemoteAddr().String() 2488 } 2489 h.h.ServeHTTP(rw, req) 2490 } 2491 2492 // loggingConn is used for debugging. 2493 type loggingConn struct { 2494 name string 2495 net.Conn 2496 } 2497 2498 var ( 2499 uniqNameMu sync.Mutex 2500 uniqNameNext = make(map[string]int) 2501 ) 2502 2503 func newLoggingConn(baseName string, c net.Conn) net.Conn { 2504 uniqNameMu.Lock() 2505 defer uniqNameMu.Unlock() 2506 uniqNameNext[baseName]++ 2507 return &loggingConn{ 2508 name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), 2509 Conn: c, 2510 } 2511 } 2512 2513 func (c *loggingConn) Write(p []byte) (n int, err error) { 2514 log.Printf("%s.Write(%d) = ....", c.name, len(p)) 2515 n, err = c.Conn.Write(p) 2516 log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) 2517 return 2518 } 2519 2520 func (c *loggingConn) Read(p []byte) (n int, err error) { 2521 log.Printf("%s.Read(%d) = ....", c.name, len(p)) 2522 n, err = c.Conn.Read(p) 2523 log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) 2524 return 2525 } 2526 2527 func (c *loggingConn) Close() (err error) { 2528 log.Printf("%s.Close() = ...", c.name) 2529 err = c.Conn.Close() 2530 log.Printf("%s.Close() = %v", c.name, err) 2531 return 2532 } 2533 2534 // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. 2535 // It only contains one field (and a pointer field at that), so it 2536 // fits in an interface value without an extra allocation. 2537 type checkConnErrorWriter struct { 2538 c *conn 2539 } 2540 2541 func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { 2542 n, err = w.c.rwc.Write(p) 2543 if err != nil && w.c.werr == nil { 2544 w.c.werr = err 2545 } 2546 return 2547 } 2548 2549 func numLeadingCRorLF(v []byte) (n int) { 2550 for _, b := range v { 2551 if b == '\r' || b == '\n' { 2552 n++ 2553 continue 2554 } 2555 break 2556 } 2557 return 2558 2559 } 2560 2561 func strSliceContains(ss []string, s string) bool { 2562 for _, v := range ss { 2563 if v == s { 2564 return true 2565 } 2566 } 2567 return false 2568 }