gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/net/http2/transport.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Transport code. 6 7 package http2 8 9 import ( 10 "bufio" 11 "bytes" 12 "compress/gzip" 13 "context" 14 "crypto/rand" 15 "errors" 16 "fmt" 17 "io" 18 "io/ioutil" 19 "log" 20 "math" 21 mathrand "math/rand" 22 "net" 23 "net/textproto" 24 "os" 25 "sort" 26 "strconv" 27 "strings" 28 "sync" 29 "sync/atomic" 30 "time" 31 32 http "gitee.com/ks-custle/core-gm/gmhttp" 33 "gitee.com/ks-custle/core-gm/gmhttp/httptrace" 34 tls "gitee.com/ks-custle/core-gm/gmtls" 35 "gitee.com/ks-custle/core-gm/net/http/httpguts" 36 "gitee.com/ks-custle/core-gm/net/http2/hpack" 37 "gitee.com/ks-custle/core-gm/net/idna" 38 ) 39 40 const ( 41 // transportDefaultConnFlow is how many connection-level flow control 42 // tokens we give the server at start-up, past the default 64k. 43 transportDefaultConnFlow = 1 << 30 44 45 // transportDefaultStreamFlow is how many stream-level flow 46 // control tokens we announce to the peer, and how many bytes 47 // we buffer per stream. 48 transportDefaultStreamFlow = 4 << 20 49 50 // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send 51 // a stream-level WINDOW_UPDATE for at a time. 52 transportDefaultStreamMinRefresh = 4 << 10 53 54 defaultUserAgent = "Go-http-client/2.0" 55 56 // initialMaxConcurrentStreams is a connections maxConcurrentStreams until 57 // it's received servers initial SETTINGS frame, which corresponds with the 58 // spec's minimum recommended value. 59 initialMaxConcurrentStreams = 100 60 61 // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams 62 // if the server doesn't include one in its initial SETTINGS frame. 63 defaultMaxConcurrentStreams = 1000 64 ) 65 66 // Transport is an HTTP/2 Transport. 67 // 68 // A Transport internally caches connections to servers. It is safe 69 // for concurrent use by multiple goroutines. 70 type Transport struct { 71 // DialTLS specifies an optional dial function for creating 72 // TLS connections for requests. 73 // 74 // If DialTLS is nil, tls.Dial is used. 75 // 76 // If the returned net.Conn has a ConnectionState method like tls.Conn, 77 // it will be used to set http.Response.TLS. 78 DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) 79 80 // TLSClientConfig specifies the TLS configuration to use with 81 // tls.Client. If nil, the default configuration is used. 82 TLSClientConfig *tls.Config 83 84 // ConnPool optionally specifies an alternate connection pool to use. 85 // If nil, the default is used. 86 ConnPool ClientConnPool 87 88 // DisableCompression, if true, prevents the Transport from 89 // requesting compression with an "Accept-Encoding: gzip" 90 // request header when the Request contains no existing 91 // Accept-Encoding value. If the Transport requests gzip on 92 // its own and gets a gzipped response, it's transparently 93 // decoded in the Response.Body. However, if the user 94 // explicitly requested gzip it is not automatically 95 // uncompressed. 96 DisableCompression bool 97 98 // AllowHTTP, if true, permits HTTP/2 requests using the insecure, 99 // plain-text "http" scheme. Note that this does not enable h2c support. 100 AllowHTTP bool 101 102 // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to 103 // send in the initial settings frame. It is how many bytes 104 // of response headers are allowed. Unlike the http2 spec, zero here 105 // means to use a default limit (currently 10MB). If you actually 106 // want to advertise an unlimited value to the peer, Transport 107 // interprets the highest possible value here (0xffffffff or 1<<32-1) 108 // to mean no limit. 109 MaxHeaderListSize uint32 110 111 // StrictMaxConcurrentStreams controls whether the server's 112 // SETTINGS_MAX_CONCURRENT_STREAMS should be respected 113 // globally. If false, new TCP connections are created to the 114 // server as needed to keep each under the per-connection 115 // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the 116 // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as 117 // a global limit and callers of RoundTrip block when needed, 118 // waiting for their turn. 119 StrictMaxConcurrentStreams bool 120 121 // ReadIdleTimeout is the timeout after which a health check using ping 122 // frame will be carried out if no frame is received on the connection. 123 // Note that a ping response will is considered a received frame, so if 124 // there is no other traffic on the connection, the health check will 125 // be performed every ReadIdleTimeout interval. 126 // If zero, no health check is performed. 127 ReadIdleTimeout time.Duration 128 129 // PingTimeout is the timeout after which the connection will be closed 130 // if a response to Ping is not received. 131 // Defaults to 15s. 132 PingTimeout time.Duration 133 134 // WriteByteTimeout is the timeout after which the connection will be 135 // closed no data can be written to it. The timeout begins when data is 136 // available to write, and is extended whenever any bytes are written. 137 WriteByteTimeout time.Duration 138 139 // CountError, if non-nil, is called on HTTP/2 transport errors. 140 // It's intended to increment a metric for monitoring, such 141 // as an expvar or Prometheus metric. 142 // The errType consists of only ASCII word characters. 143 CountError func(errType string) 144 145 // t1, if non-nil, is the standard library Transport using 146 // this transport. Its settings are used (but not its 147 // RoundTrip method, etc). 148 t1 *http.Transport 149 150 connPoolOnce sync.Once 151 connPoolOrDef ClientConnPool // non-nil version of ConnPool 152 } 153 154 func (t *Transport) maxHeaderListSize() uint32 { 155 if t.MaxHeaderListSize == 0 { 156 return 10 << 20 157 } 158 if t.MaxHeaderListSize == 0xffffffff { 159 return 0 160 } 161 return t.MaxHeaderListSize 162 } 163 164 func (t *Transport) disableCompression() bool { 165 return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) 166 } 167 168 func (t *Transport) pingTimeout() time.Duration { 169 if t.PingTimeout == 0 { 170 return 15 * time.Second 171 } 172 return t.PingTimeout 173 174 } 175 176 // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. 177 // It returns an error if t1 has already been HTTP/2-enabled. 178 // 179 // Use ConfigureTransports instead to configure the HTTP/2 Transport. 180 func ConfigureTransport(t1 *http.Transport) error { 181 _, err := ConfigureTransports(t1) 182 return err 183 } 184 185 // ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. 186 // It returns a new HTTP/2 Transport for further configuration. 187 // It returns an error if t1 has already been HTTP/2-enabled. 188 func ConfigureTransports(t1 *http.Transport) (*Transport, error) { 189 return configureTransports(t1) 190 } 191 192 func configureTransports(t1 *http.Transport) (*Transport, error) { 193 connPool := new(clientConnPool) 194 t2 := &Transport{ 195 ConnPool: noDialClientConnPool{connPool}, 196 t1: t1, 197 } 198 connPool.t = t2 199 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { 200 return nil, err 201 } 202 if t1.TLSClientConfig == nil { 203 t1.TLSClientConfig = new(tls.Config) 204 } 205 if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { 206 t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) 207 } 208 if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { 209 t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") 210 } 211 upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { 212 addr := authorityAddr("https", authority) 213 if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { 214 go c.Close() 215 return erringRoundTripper{err} 216 } else if !used { 217 // Turns out we don't need this c. 218 // For example, two goroutines made requests to the same host 219 // at the same time, both kicking off TCP dials. (since protocol 220 // was unknown) 221 go c.Close() 222 } 223 return t2 224 } 225 if m := t1.TLSNextProto; len(m) == 0 { 226 t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ 227 "h2": upgradeFn, 228 } 229 } else { 230 m["h2"] = upgradeFn 231 } 232 return t2, nil 233 } 234 235 func (t *Transport) connPool() ClientConnPool { 236 t.connPoolOnce.Do(t.initConnPool) 237 return t.connPoolOrDef 238 } 239 240 func (t *Transport) initConnPool() { 241 if t.ConnPool != nil { 242 t.connPoolOrDef = t.ConnPool 243 } else { 244 t.connPoolOrDef = &clientConnPool{t: t} 245 } 246 } 247 248 // ClientConn is the state of a single HTTP/2 client connection to an 249 // HTTP/2 server. 250 type ClientConn struct { 251 t *Transport 252 tconn net.Conn // usually *tls.Conn, except specialized impls 253 tlsState *tls.ConnectionState // nil only for specialized impls 254 reused uint32 // whether conn is being reused; atomic 255 singleUse bool // whether being used for a single http.Request 256 getConnCalled bool // used by clientConnPool 257 258 // readLoop goroutine fields: 259 readerDone chan struct{} // closed on error 260 readerErr error // set before readerDone is closed 261 262 idleTimeout time.Duration // or 0 for never 263 idleTimer *time.Timer 264 265 mu sync.Mutex // guards following 266 cond *sync.Cond // hold mu; broadcast on flow/closed changes 267 flow flow // our conn-level flow control quota (cs.flow is per stream) 268 inflow flow // peer's conn-level flow control 269 doNotReuse bool // whether conn is marked to not be reused for any future requests 270 closing bool 271 closed bool 272 seenSettings bool // true if we've seen a settings frame, false otherwise 273 wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back 274 goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received 275 goAwayDebug string // goAway frame's debug data, retained as a string 276 streams map[uint32]*clientStream // client-initiated 277 streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip 278 nextStreamID uint32 279 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams 280 pings map[[8]byte]chan struct{} // in flight ping data to notification channel 281 br *bufio.Reader 282 lastActive time.Time 283 lastIdle time.Time // time last idle 284 // Settings from peer: (also guarded by wmu) 285 maxFrameSize uint32 286 maxConcurrentStreams uint32 287 peerMaxHeaderListSize uint64 288 initialWindowSize uint32 289 290 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. 291 // Write to reqHeaderMu to lock it, read from it to unlock. 292 // Lock reqmu BEFORE mu or wmu. 293 reqHeaderMu chan struct{} 294 295 // wmu is held while writing. 296 // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. 297 // Only acquire both at the same time when changing peer settings. 298 wmu sync.Mutex 299 bw *bufio.Writer 300 fr *Framer 301 werr error // first write error that has occurred 302 hbuf bytes.Buffer // HPACK encoder writes into this 303 henc *hpack.Encoder 304 } 305 306 // clientStream is the state for a single HTTP/2 stream. One of these 307 // is created for each Transport.RoundTrip call. 308 type clientStream struct { 309 cc *ClientConn 310 311 // Fields of Request that we may access even after the response body is closed. 312 ctx context.Context 313 reqCancel <-chan struct{} 314 315 trace *httptrace.ClientTrace // or nil 316 ID uint32 317 bufPipe pipe // buffered pipe with the flow-controlled response payload 318 requestedGzip bool 319 isHead bool 320 321 abortOnce sync.Once 322 abort chan struct{} // closed to signal stream should end immediately 323 abortErr error // set if abort is closed 324 325 peerClosed chan struct{} // closed when the peer sends an END_STREAM flag 326 donec chan struct{} // closed after the stream is in the closed state 327 on100 chan struct{} // buffered; written to if a 100 is received 328 329 respHeaderRecv chan struct{} // closed when headers are received 330 res *http.Response // set if respHeaderRecv is closed 331 332 flow flow // guarded by cc.mu 333 inflow flow // guarded by cc.mu 334 bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read 335 readErr error // sticky read error; owned by transportResponseBody.Read 336 337 reqBody io.ReadCloser 338 reqBodyContentLength int64 // -1 means unknown 339 reqBodyClosed bool // body has been closed; guarded by cc.mu 340 341 // owned by writeRequest: 342 sentEndStream bool // sent an END_STREAM flag to the peer 343 sentHeaders bool 344 345 // owned by clientConnReadLoop: 346 firstByte bool // got the first response byte 347 pastHeaders bool // got first MetaHeadersFrame (actual headers) 348 pastTrailers bool // got optional second MetaHeadersFrame (trailers) 349 num1xx uint8 // number of 1xx responses seen 350 readClosed bool // peer sent an END_STREAM flag 351 readAborted bool // read loop reset the stream 352 353 trailer http.Header // accumulated trailers 354 resTrailer *http.Header // client's Response.Trailer 355 } 356 357 var got1xxFuncForTests func(int, textproto.MIMEHeader) error 358 359 // get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, 360 // if any. It returns nil if not set or if the Go version is too old. 361 func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { 362 if fn := got1xxFuncForTests; fn != nil { 363 return fn 364 } 365 return traceGot1xxResponseFunc(cs.trace) 366 } 367 368 func (cs *clientStream) abortStream(err error) { 369 cs.cc.mu.Lock() 370 defer cs.cc.mu.Unlock() 371 cs.abortStreamLocked(err) 372 } 373 374 func (cs *clientStream) abortStreamLocked(err error) { 375 cs.abortOnce.Do(func() { 376 cs.abortErr = err 377 close(cs.abort) 378 }) 379 if cs.reqBody != nil && !cs.reqBodyClosed { 380 cs.reqBody.Close() 381 cs.reqBodyClosed = true 382 } 383 // TODO(dneil): Clean up tests where cs.cc.cond is nil. 384 if cs.cc.cond != nil { 385 // Wake up writeRequestBody if it is waiting on flow control. 386 cs.cc.cond.Broadcast() 387 } 388 } 389 390 func (cs *clientStream) abortRequestBodyWrite() { 391 cc := cs.cc 392 cc.mu.Lock() 393 defer cc.mu.Unlock() 394 if cs.reqBody != nil && !cs.reqBodyClosed { 395 cs.reqBody.Close() 396 cs.reqBodyClosed = true 397 cc.cond.Broadcast() 398 } 399 } 400 401 type stickyErrWriter struct { 402 conn net.Conn 403 timeout time.Duration 404 err *error 405 } 406 407 func (sew stickyErrWriter) Write(p []byte) (n int, err error) { 408 if *sew.err != nil { 409 return 0, *sew.err 410 } 411 for { 412 if sew.timeout != 0 { 413 sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) 414 } 415 nn, err := sew.conn.Write(p[n:]) 416 n += nn 417 if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { 418 // Keep extending the deadline so long as we're making progress. 419 continue 420 } 421 if sew.timeout != 0 { 422 sew.conn.SetWriteDeadline(time.Time{}) 423 } 424 *sew.err = err 425 return n, err 426 } 427 } 428 429 // noCachedConnError is the concrete type of ErrNoCachedConn, which 430 // needs to be detected by net/http regardless of whether it's its 431 // bundled version (in h2_bundle.go with a rewritten type name) or 432 // from a user's x/net/http2. As such, as it has a unique method name 433 // (IsHTTP2NoCachedConnError) that net/http sniffs for via func 434 // isNoCachedConnError. 435 type noCachedConnError struct{} 436 437 func (noCachedConnError) IsHTTP2NoCachedConnError() {} 438 func (noCachedConnError) Error() string { return "http2: no cached connection was available" } 439 440 // isNoCachedConnError reports whether err is of type noCachedConnError 441 // or its equivalent renamed type in net/http2's h2_bundle.go. Both types 442 // may coexist in the same running program. 443 func isNoCachedConnError(err error) bool { 444 _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) 445 return ok 446 } 447 448 var ErrNoCachedConn error = noCachedConnError{} 449 450 // RoundTripOpt are options for the Transport.RoundTripOpt method. 451 type RoundTripOpt struct { 452 // OnlyCachedConn controls whether RoundTripOpt may 453 // create a new TCP connection. If set true and 454 // no cached connection is available, RoundTripOpt 455 // will return ErrNoCachedConn. 456 OnlyCachedConn bool 457 } 458 459 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { 460 return t.RoundTripOpt(req, RoundTripOpt{}) 461 } 462 463 // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) 464 // and returns a host:port. The port 443 is added if needed. 465 func authorityAddr(scheme string, authority string) (addr string) { 466 host, port, err := net.SplitHostPort(authority) 467 if err != nil { // authority didn't have a port 468 port = "443" 469 if scheme == "http" { 470 port = "80" 471 } 472 host = authority 473 } 474 if a, err := idna.ToASCII(host); err == nil { 475 host = a 476 } 477 // IPv6 address literal, without a port: 478 if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { 479 return host + ":" + port 480 } 481 return net.JoinHostPort(host, port) 482 } 483 484 // RoundTripOpt is like RoundTrip, but takes options. 485 func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { 486 if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { 487 return nil, errors.New("http2: unsupported scheme") 488 } 489 490 addr := authorityAddr(req.URL.Scheme, req.URL.Host) 491 for retry := 0; ; retry++ { 492 cc, err := t.connPool().GetClientConn(req, addr) 493 if err != nil { 494 t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) 495 return nil, err 496 } 497 reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) 498 traceGotConn(req, cc, reused) 499 res, err := cc.RoundTrip(req) 500 if err != nil && retry <= 6 { 501 if req, err = shouldRetryRequest(req, err); err == nil { 502 // After the first retry, do exponential backoff with 10% jitter. 503 if retry == 0 { 504 continue 505 } 506 backoff := float64(uint(1) << (uint(retry) - 1)) 507 backoff += backoff * (0.1 * mathrand.Float64()) 508 select { 509 case <-time.After(time.Second * time.Duration(backoff)): 510 continue 511 case <-req.Context().Done(): 512 err = req.Context().Err() 513 } 514 } 515 } 516 if err != nil { 517 t.vlogf("RoundTrip failure: %v", err) 518 return nil, err 519 } 520 return res, nil 521 } 522 } 523 524 // CloseIdleConnections closes any connections which were previously 525 // connected from previous requests but are now sitting idle. 526 // It does not interrupt any connections currently in use. 527 func (t *Transport) CloseIdleConnections() { 528 if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { 529 cp.closeIdleConnections() 530 } 531 } 532 533 var ( 534 errClientConnClosed = errors.New("http2: client conn is closed") 535 errClientConnUnusable = errors.New("http2: client conn not usable") 536 errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") 537 ) 538 539 // shouldRetryRequest is called by RoundTrip when a request fails to get 540 // response headers. It is always called with a non-nil error. 541 // It returns either a request to retry (either the same request, or a 542 // modified clone), or an error if the request can't be replayed. 543 func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { 544 if !canRetryError(err) { 545 return nil, err 546 } 547 // If the Body is nil (or http.NoBody), it's safe to reuse 548 // this request and its Body. 549 if req.Body == nil || req.Body == http.NoBody { 550 return req, nil 551 } 552 553 // If the request body can be reset back to its original 554 // state via the optional req.GetBody, do that. 555 if req.GetBody != nil { 556 body, err := req.GetBody() 557 if err != nil { 558 return nil, err 559 } 560 newReq := *req 561 newReq.Body = body 562 return &newReq, nil 563 } 564 565 // The Request.Body can't reset back to the beginning, but we 566 // don't seem to have started to read from it yet, so reuse 567 // the request directly. 568 if err == errClientConnUnusable { 569 return req, nil 570 } 571 572 return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) 573 } 574 575 func canRetryError(err error) bool { 576 if err == errClientConnUnusable || err == errClientConnGotGoAway { 577 return true 578 } 579 if se, ok := err.(StreamError); ok { 580 if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { 581 // See golang/go#47635, golang/go#42777 582 return true 583 } 584 return se.Code == ErrCodeRefusedStream 585 } 586 return false 587 } 588 589 func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { 590 host, _, err := net.SplitHostPort(addr) 591 if err != nil { 592 return nil, err 593 } 594 tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) 595 if err != nil { 596 return nil, err 597 } 598 return t.newClientConn(tconn, singleUse) 599 } 600 601 func (t *Transport) newTLSConfig(host string) *tls.Config { 602 cfg := new(tls.Config) 603 if t.TLSClientConfig != nil { 604 *cfg = *t.TLSClientConfig.Clone() 605 } 606 if !strSliceContains(cfg.NextProtos, NextProtoTLS) { 607 cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) 608 } 609 if cfg.ServerName == "" { 610 cfg.ServerName = host 611 } 612 return cfg 613 } 614 615 func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { 616 if t.DialTLS != nil { 617 return t.DialTLS 618 } 619 return func(network, addr string, cfg *tls.Config) (net.Conn, error) { 620 tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) 621 if err != nil { 622 return nil, err 623 } 624 state := tlsCn.ConnectionState() 625 if p := state.NegotiatedProtocol; p != NextProtoTLS { 626 return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) 627 } 628 if !state.NegotiatedProtocolIsMutual { 629 return nil, errors.New("http2: could not negotiate protocol mutually") 630 } 631 return tlsCn, nil 632 } 633 } 634 635 // disableKeepAlives reports whether connections should be closed as 636 // soon as possible after handling the first request. 637 func (t *Transport) disableKeepAlives() bool { 638 return t.t1 != nil && t.t1.DisableKeepAlives 639 } 640 641 func (t *Transport) expectContinueTimeout() time.Duration { 642 if t.t1 == nil { 643 return 0 644 } 645 return t.t1.ExpectContinueTimeout 646 } 647 648 func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { 649 return t.newClientConn(c, t.disableKeepAlives()) 650 } 651 652 func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { 653 cc := &ClientConn{ 654 t: t, 655 tconn: c, 656 readerDone: make(chan struct{}), 657 nextStreamID: 1, 658 maxFrameSize: 16 << 10, // spec default 659 initialWindowSize: 65535, // spec default 660 maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. 661 peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. 662 streams: make(map[uint32]*clientStream), 663 singleUse: singleUse, 664 wantSettingsAck: true, 665 pings: make(map[[8]byte]chan struct{}), 666 reqHeaderMu: make(chan struct{}, 1), 667 } 668 if d := t.idleConnTimeout(); d != 0 { 669 cc.idleTimeout = d 670 cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) 671 } 672 if VerboseLogs { 673 t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) 674 } 675 676 cc.cond = sync.NewCond(&cc.mu) 677 cc.flow.add(int32(initialWindowSize)) 678 679 // TODO: adjust this writer size to account for frame size + 680 // MTU + crypto/tls record padding. 681 cc.bw = bufio.NewWriter(stickyErrWriter{ 682 conn: c, 683 timeout: t.WriteByteTimeout, 684 err: &cc.werr, 685 }) 686 cc.br = bufio.NewReader(c) 687 cc.fr = NewFramer(cc.bw, cc.br) 688 if t.CountError != nil { 689 cc.fr.countError = t.CountError 690 } 691 cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) 692 cc.fr.MaxHeaderListSize = t.maxHeaderListSize() 693 694 // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on 695 // henc in response to SETTINGS frames? 696 cc.henc = hpack.NewEncoder(&cc.hbuf) 697 698 if t.AllowHTTP { 699 cc.nextStreamID = 3 700 } 701 702 if cs, ok := c.(connectionStater); ok { 703 state := cs.ConnectionState() 704 cc.tlsState = &state 705 } 706 707 initialSettings := []Setting{ 708 {ID: SettingEnablePush, Val: 0}, 709 {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, 710 } 711 if max := t.maxHeaderListSize(); max != 0 { 712 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) 713 } 714 715 cc.bw.Write(clientPreface) 716 cc.fr.WriteSettings(initialSettings...) 717 cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) 718 cc.inflow.add(transportDefaultConnFlow + initialWindowSize) 719 cc.bw.Flush() 720 if cc.werr != nil { 721 cc.Close() 722 return nil, cc.werr 723 } 724 725 go cc.readLoop() 726 return cc, nil 727 } 728 729 func (cc *ClientConn) healthCheck() { 730 pingTimeout := cc.t.pingTimeout() 731 // We don't need to periodically ping in the health check, because the readLoop of ClientConn will 732 // trigger the healthCheck again if there is no frame received. 733 ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) 734 defer cancel() 735 err := cc.Ping(ctx) 736 if err != nil { 737 cc.closeForLostPing() 738 cc.t.connPool().MarkDead(cc) 739 return 740 } 741 } 742 743 // SetDoNotReuse marks cc as not reusable for future HTTP requests. 744 func (cc *ClientConn) SetDoNotReuse() { 745 cc.mu.Lock() 746 defer cc.mu.Unlock() 747 cc.doNotReuse = true 748 } 749 750 func (cc *ClientConn) setGoAway(f *GoAwayFrame) { 751 cc.mu.Lock() 752 defer cc.mu.Unlock() 753 754 old := cc.goAway 755 cc.goAway = f 756 757 // Merge the previous and current GoAway error frames. 758 if cc.goAwayDebug == "" { 759 cc.goAwayDebug = string(f.DebugData()) 760 } 761 if old != nil && old.ErrCode != ErrCodeNo { 762 cc.goAway.ErrCode = old.ErrCode 763 } 764 last := f.LastStreamID 765 for streamID, cs := range cc.streams { 766 if streamID > last { 767 cs.abortStreamLocked(errClientConnGotGoAway) 768 } 769 } 770 } 771 772 // CanTakeNewRequest reports whether the connection can take a new request, 773 // meaning it has not been closed or received or sent a GOAWAY. 774 // 775 // If the caller is going to immediately make a new request on this 776 // connection, use ReserveNewRequest instead. 777 func (cc *ClientConn) CanTakeNewRequest() bool { 778 cc.mu.Lock() 779 defer cc.mu.Unlock() 780 return cc.canTakeNewRequestLocked() 781 } 782 783 // ReserveNewRequest is like CanTakeNewRequest but also reserves a 784 // concurrent stream in cc. The reservation is decremented on the 785 // next call to RoundTrip. 786 func (cc *ClientConn) ReserveNewRequest() bool { 787 cc.mu.Lock() 788 defer cc.mu.Unlock() 789 if st := cc.idleStateLocked(); !st.canTakeNewRequest { 790 return false 791 } 792 cc.streamsReserved++ 793 return true 794 } 795 796 // ClientConnState describes the state of a ClientConn. 797 type ClientConnState struct { 798 // Closed is whether the connection is closed. 799 Closed bool 800 801 // Closing is whether the connection is in the process of 802 // closing. It may be closing due to shutdown, being a 803 // single-use connection, being marked as DoNotReuse, or 804 // having received a GOAWAY frame. 805 Closing bool 806 807 // StreamsActive is how many streams are active. 808 StreamsActive int 809 810 // StreamsReserved is how many streams have been reserved via 811 // ClientConn.ReserveNewRequest. 812 StreamsReserved int 813 814 // StreamsPending is how many requests have been sent in excess 815 // of the peer's advertised MaxConcurrentStreams setting and 816 // are waiting for other streams to complete. 817 StreamsPending int 818 819 // MaxConcurrentStreams is how many concurrent streams the 820 // peer advertised as acceptable. Zero means no SETTINGS 821 // frame has been received yet. 822 MaxConcurrentStreams uint32 823 824 // LastIdle, if non-zero, is when the connection last 825 // transitioned to idle state. 826 LastIdle time.Time 827 } 828 829 // State returns a snapshot of cc's state. 830 func (cc *ClientConn) State() ClientConnState { 831 cc.wmu.Lock() 832 maxConcurrent := cc.maxConcurrentStreams 833 if !cc.seenSettings { 834 maxConcurrent = 0 835 } 836 cc.wmu.Unlock() 837 838 cc.mu.Lock() 839 defer cc.mu.Unlock() 840 return ClientConnState{ 841 Closed: cc.closed, 842 Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, 843 StreamsActive: len(cc.streams), 844 StreamsReserved: cc.streamsReserved, 845 StreamsPending: cc.pendingRequests, 846 LastIdle: cc.lastIdle, 847 MaxConcurrentStreams: maxConcurrent, 848 } 849 } 850 851 // clientConnIdleState describes the suitability of a client 852 // connection to initiate a new RoundTrip request. 853 type clientConnIdleState struct { 854 canTakeNewRequest bool 855 } 856 857 func (cc *ClientConn) idleState() clientConnIdleState { 858 cc.mu.Lock() 859 defer cc.mu.Unlock() 860 return cc.idleStateLocked() 861 } 862 863 func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { 864 if cc.singleUse && cc.nextStreamID > 1 { 865 return 866 } 867 var maxConcurrentOkay bool 868 if cc.t.StrictMaxConcurrentStreams { 869 // We'll tell the caller we can take a new request to 870 // prevent the caller from dialing a new TCP 871 // connection, but then we'll block later before 872 // writing it. 873 maxConcurrentOkay = true 874 } else { 875 maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) 876 } 877 878 st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && 879 !cc.doNotReuse && 880 int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && 881 !cc.tooIdleLocked() 882 return 883 } 884 885 func (cc *ClientConn) canTakeNewRequestLocked() bool { 886 st := cc.idleStateLocked() 887 return st.canTakeNewRequest 888 } 889 890 // tooIdleLocked reports whether this connection has been been sitting idle 891 // for too much wall time. 892 func (cc *ClientConn) tooIdleLocked() bool { 893 // The Round(0) strips the monontonic clock reading so the 894 // times are compared based on their wall time. We don't want 895 // to reuse a connection that's been sitting idle during 896 // VM/laptop suspend if monotonic time was also frozen. 897 return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout 898 } 899 900 // onIdleTimeout is called from a time.AfterFunc goroutine. It will 901 // only be called when we're idle, but because we're coming from a new 902 // goroutine, there could be a new request coming in at the same time, 903 // so this simply calls the synchronized closeIfIdle to shut down this 904 // connection. The timer could just call closeIfIdle, but this is more 905 // clear. 906 func (cc *ClientConn) onIdleTimeout() { 907 cc.closeIfIdle() 908 } 909 910 func (cc *ClientConn) closeIfIdle() { 911 cc.mu.Lock() 912 if len(cc.streams) > 0 || cc.streamsReserved > 0 { 913 cc.mu.Unlock() 914 return 915 } 916 cc.closed = true 917 nextID := cc.nextStreamID 918 // TODO: do clients send GOAWAY too? maybe? Just Close: 919 cc.mu.Unlock() 920 921 if VerboseLogs { 922 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) 923 } 924 cc.tconn.Close() 925 } 926 927 func (cc *ClientConn) isDoNotReuseAndIdle() bool { 928 cc.mu.Lock() 929 defer cc.mu.Unlock() 930 return cc.doNotReuse && len(cc.streams) == 0 931 } 932 933 var shutdownEnterWaitStateHook = func() {} 934 935 // Shutdown gracefully closes the client connection, waiting for running streams to complete. 936 func (cc *ClientConn) Shutdown(ctx context.Context) error { 937 if err := cc.sendGoAway(); err != nil { 938 return err 939 } 940 // Wait for all in-flight streams to complete or connection to close 941 done := make(chan error, 1) 942 cancelled := false // guarded by cc.mu 943 go func() { 944 cc.mu.Lock() 945 defer cc.mu.Unlock() 946 for { 947 if len(cc.streams) == 0 || cc.closed { 948 cc.closed = true 949 done <- cc.tconn.Close() 950 break 951 } 952 if cancelled { 953 break 954 } 955 cc.cond.Wait() 956 } 957 }() 958 shutdownEnterWaitStateHook() 959 select { 960 case err := <-done: 961 return err 962 case <-ctx.Done(): 963 cc.mu.Lock() 964 // Free the goroutine above 965 cancelled = true 966 cc.cond.Broadcast() 967 cc.mu.Unlock() 968 return ctx.Err() 969 } 970 } 971 972 func (cc *ClientConn) sendGoAway() error { 973 cc.mu.Lock() 974 closing := cc.closing 975 cc.closing = true 976 maxStreamID := cc.nextStreamID 977 cc.mu.Unlock() 978 if closing { 979 // GOAWAY sent already 980 return nil 981 } 982 983 cc.wmu.Lock() 984 defer cc.wmu.Unlock() 985 // Send a graceful shutdown frame to server 986 if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { 987 return err 988 } 989 if err := cc.bw.Flush(); err != nil { 990 return err 991 } 992 // Prevent new requests 993 return nil 994 } 995 996 // closes the client connection immediately. In-flight requests are interrupted. 997 // err is sent to streams. 998 func (cc *ClientConn) closeForError(err error) error { 999 cc.mu.Lock() 1000 cc.closed = true 1001 for _, cs := range cc.streams { 1002 cs.abortStreamLocked(err) 1003 } 1004 defer cc.cond.Broadcast() 1005 defer cc.mu.Unlock() 1006 return cc.tconn.Close() 1007 } 1008 1009 // Close closes the client connection immediately. 1010 // 1011 // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. 1012 func (cc *ClientConn) Close() error { 1013 err := errors.New("http2: client connection force closed via ClientConn.Close") 1014 return cc.closeForError(err) 1015 } 1016 1017 // closes the client connection immediately. In-flight requests are interrupted. 1018 func (cc *ClientConn) closeForLostPing() error { 1019 err := errors.New("http2: client connection lost") 1020 if f := cc.t.CountError; f != nil { 1021 f("conn_close_lost_ping") 1022 } 1023 return cc.closeForError(err) 1024 } 1025 1026 // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not 1027 // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. 1028 var errRequestCanceled = errors.New("net/http: request canceled") 1029 1030 func commaSeparatedTrailers(req *http.Request) (string, error) { 1031 keys := make([]string, 0, len(req.Trailer)) 1032 for k := range req.Trailer { 1033 k = http.CanonicalHeaderKey(k) 1034 switch k { 1035 case "Transfer-Encoding", "Trailer", "Content-Length": 1036 return "", fmt.Errorf("invalid Trailer key %q", k) 1037 } 1038 keys = append(keys, k) 1039 } 1040 if len(keys) > 0 { 1041 sort.Strings(keys) 1042 return strings.Join(keys, ","), nil 1043 } 1044 return "", nil 1045 } 1046 1047 func (cc *ClientConn) responseHeaderTimeout() time.Duration { 1048 if cc.t.t1 != nil { 1049 return cc.t.t1.ResponseHeaderTimeout 1050 } 1051 // No way to do this (yet?) with just an http2.Transport. Probably 1052 // no need. Request.Cancel this is the new way. We only need to support 1053 // this for compatibility with the old http.Transport fields when 1054 // we're doing transparent http2. 1055 return 0 1056 } 1057 1058 // checkConnHeaders checks whether req has any invalid connection-level headers. 1059 // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. 1060 // Certain headers are special-cased as okay but not transmitted later. 1061 func checkConnHeaders(req *http.Request) error { 1062 if v := req.Header.Get("Upgrade"); v != "" { 1063 return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) 1064 } 1065 if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { 1066 return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) 1067 } 1068 if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { 1069 return fmt.Errorf("http2: invalid Connection request header: %q", vv) 1070 } 1071 return nil 1072 } 1073 1074 // actualContentLength returns a sanitized version of 1075 // req.ContentLength, where 0 actually means zero (not unknown) and -1 1076 // means unknown. 1077 func actualContentLength(req *http.Request) int64 { 1078 if req.Body == nil || req.Body == http.NoBody { 1079 return 0 1080 } 1081 if req.ContentLength != 0 { 1082 return req.ContentLength 1083 } 1084 return -1 1085 } 1086 1087 func (cc *ClientConn) decrStreamReservations() { 1088 cc.mu.Lock() 1089 defer cc.mu.Unlock() 1090 cc.decrStreamReservationsLocked() 1091 } 1092 1093 func (cc *ClientConn) decrStreamReservationsLocked() { 1094 if cc.streamsReserved > 0 { 1095 cc.streamsReserved-- 1096 } 1097 } 1098 1099 func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { 1100 ctx := req.Context() 1101 cs := &clientStream{ 1102 cc: cc, 1103 ctx: ctx, 1104 reqCancel: req.Cancel, 1105 isHead: req.Method == "HEAD", 1106 reqBody: req.Body, 1107 reqBodyContentLength: actualContentLength(req), 1108 trace: httptrace.ContextClientTrace(ctx), 1109 peerClosed: make(chan struct{}), 1110 abort: make(chan struct{}), 1111 respHeaderRecv: make(chan struct{}), 1112 donec: make(chan struct{}), 1113 } 1114 go cs.doRequest(req) 1115 1116 waitDone := func() error { 1117 select { 1118 case <-cs.donec: 1119 return nil 1120 case <-ctx.Done(): 1121 return ctx.Err() 1122 case <-cs.reqCancel: 1123 return errRequestCanceled 1124 } 1125 } 1126 1127 handleResponseHeaders := func() (*http.Response, error) { 1128 res := cs.res 1129 if res.StatusCode > 299 { 1130 // On error or status code 3xx, 4xx, 5xx, etc abort any 1131 // ongoing write, assuming that the server doesn't care 1132 // about our request body. If the server replied with 1xx or 1133 // 2xx, however, then assume the server DOES potentially 1134 // want our body (e.g. full-duplex streaming: 1135 // golang.org/issue/13444). If it turns out the server 1136 // doesn't, they'll RST_STREAM us soon enough. This is a 1137 // heuristic to avoid adding knobs to Transport. Hopefully 1138 // we can keep it. 1139 cs.abortRequestBodyWrite() 1140 } 1141 res.Request = req 1142 res.TLS = cc.tlsState 1143 if res.Body == noBody && actualContentLength(req) == 0 { 1144 // If there isn't a request or response body still being 1145 // written, then wait for the stream to be closed before 1146 // RoundTrip returns. 1147 if err := waitDone(); err != nil { 1148 return nil, err 1149 } 1150 } 1151 return res, nil 1152 } 1153 1154 for { 1155 select { 1156 case <-cs.respHeaderRecv: 1157 return handleResponseHeaders() 1158 case <-cs.abort: 1159 select { 1160 case <-cs.respHeaderRecv: 1161 // If both cs.respHeaderRecv and cs.abort are signaling, 1162 // pick respHeaderRecv. The server probably wrote the 1163 // response and immediately reset the stream. 1164 // golang.org/issue/49645 1165 return handleResponseHeaders() 1166 default: 1167 waitDone() 1168 return nil, cs.abortErr 1169 } 1170 case <-ctx.Done(): 1171 err := ctx.Err() 1172 cs.abortStream(err) 1173 return nil, err 1174 case <-cs.reqCancel: 1175 cs.abortStream(errRequestCanceled) 1176 return nil, errRequestCanceled 1177 } 1178 } 1179 } 1180 1181 // doRequest runs for the duration of the request lifetime. 1182 // 1183 // It sends the request and performs post-request cleanup (closing Request.Body, etc.). 1184 func (cs *clientStream) doRequest(req *http.Request) { 1185 err := cs.writeRequest(req) 1186 cs.cleanupWriteRequest(err) 1187 } 1188 1189 // writeRequest sends a request. 1190 // 1191 // It returns nil after the request is written, the response read, 1192 // and the request stream is half-closed by the peer. 1193 // 1194 // It returns non-nil if the request ends otherwise. 1195 // If the returned error is StreamError, the error Code may be used in resetting the stream. 1196 func (cs *clientStream) writeRequest(req *http.Request) (err error) { 1197 cc := cs.cc 1198 ctx := cs.ctx 1199 1200 if err := checkConnHeaders(req); err != nil { 1201 return err 1202 } 1203 1204 // Acquire the new-request lock by writing to reqHeaderMu. 1205 // This lock guards the critical section covering allocating a new stream ID 1206 // (requires mu) and creating the stream (requires wmu). 1207 if cc.reqHeaderMu == nil { 1208 panic("RoundTrip on uninitialized ClientConn") // for tests 1209 } 1210 select { 1211 case cc.reqHeaderMu <- struct{}{}: 1212 case <-cs.reqCancel: 1213 return errRequestCanceled 1214 case <-ctx.Done(): 1215 return ctx.Err() 1216 } 1217 1218 cc.mu.Lock() 1219 if cc.idleTimer != nil { 1220 cc.idleTimer.Stop() 1221 } 1222 cc.decrStreamReservationsLocked() 1223 if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { 1224 cc.mu.Unlock() 1225 <-cc.reqHeaderMu 1226 return err 1227 } 1228 cc.addStreamLocked(cs) // assigns stream ID 1229 if isConnectionCloseRequest(req) { 1230 cc.doNotReuse = true 1231 } 1232 cc.mu.Unlock() 1233 1234 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? 1235 if !cc.t.disableCompression() && 1236 req.Header.Get("Accept-Encoding") == "" && 1237 req.Header.Get("Range") == "" && 1238 !cs.isHead { 1239 // Request gzip only, not deflate. Deflate is ambiguous and 1240 // not as universally supported anyway. 1241 // See: https://zlib.net/zlib_faq.html#faq39 1242 // 1243 // Note that we don't request this for HEAD requests, 1244 // due to a bug in nginx: 1245 // http://trac.nginx.org/nginx/ticket/358 1246 // https://golang.org/issue/5522 1247 // 1248 // We don't request gzip if the request is for a range, since 1249 // auto-decoding a portion of a gzipped document will just fail 1250 // anyway. See https://golang.org/issue/8923 1251 cs.requestedGzip = true 1252 } 1253 1254 continueTimeout := cc.t.expectContinueTimeout() 1255 if continueTimeout != 0 { 1256 if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { 1257 continueTimeout = 0 1258 } else { 1259 cs.on100 = make(chan struct{}, 1) 1260 } 1261 } 1262 1263 // Past this point (where we send request headers), it is possible for 1264 // RoundTrip to return successfully. Since the RoundTrip contract permits 1265 // the caller to "mutate or reuse" the Request after closing the Response's Body, 1266 // we must take care when referencing the Request from here on. 1267 err = cs.encodeAndWriteHeaders(req) 1268 <-cc.reqHeaderMu 1269 if err != nil { 1270 return err 1271 } 1272 1273 hasBody := cs.reqBodyContentLength != 0 1274 if !hasBody { 1275 cs.sentEndStream = true 1276 } else { 1277 if continueTimeout != 0 { 1278 traceWait100Continue(cs.trace) 1279 timer := time.NewTimer(continueTimeout) 1280 select { 1281 case <-timer.C: 1282 err = nil 1283 case <-cs.on100: 1284 err = nil 1285 case <-cs.abort: 1286 err = cs.abortErr 1287 case <-ctx.Done(): 1288 err = ctx.Err() 1289 case <-cs.reqCancel: 1290 err = errRequestCanceled 1291 } 1292 timer.Stop() 1293 if err != nil { 1294 traceWroteRequest(cs.trace, err) 1295 return err 1296 } 1297 } 1298 1299 if err = cs.writeRequestBody(req); err != nil { 1300 if err != errStopReqBodyWrite { 1301 traceWroteRequest(cs.trace, err) 1302 return err 1303 } 1304 } else { 1305 cs.sentEndStream = true 1306 } 1307 } 1308 1309 traceWroteRequest(cs.trace, err) 1310 1311 var respHeaderTimer <-chan time.Time 1312 var respHeaderRecv chan struct{} 1313 if d := cc.responseHeaderTimeout(); d != 0 { 1314 timer := time.NewTimer(d) 1315 defer timer.Stop() 1316 respHeaderTimer = timer.C 1317 respHeaderRecv = cs.respHeaderRecv 1318 } 1319 // Wait until the peer half-closes its end of the stream, 1320 // or until the request is aborted (via context, error, or otherwise), 1321 // whichever comes first. 1322 for { 1323 select { 1324 case <-cs.peerClosed: 1325 return nil 1326 case <-respHeaderTimer: 1327 return errTimeout 1328 case <-respHeaderRecv: 1329 respHeaderRecv = nil 1330 respHeaderTimer = nil // keep waiting for END_STREAM 1331 case <-cs.abort: 1332 return cs.abortErr 1333 case <-ctx.Done(): 1334 return ctx.Err() 1335 case <-cs.reqCancel: 1336 return errRequestCanceled 1337 } 1338 } 1339 } 1340 1341 func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { 1342 cc := cs.cc 1343 ctx := cs.ctx 1344 1345 cc.wmu.Lock() 1346 defer cc.wmu.Unlock() 1347 1348 // If the request was canceled while waiting for cc.mu, just quit. 1349 select { 1350 case <-cs.abort: 1351 return cs.abortErr 1352 case <-ctx.Done(): 1353 return ctx.Err() 1354 case <-cs.reqCancel: 1355 return errRequestCanceled 1356 default: 1357 } 1358 1359 // Encode headers. 1360 // 1361 // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is 1362 // sent by writeRequestBody below, along with any Trailers, 1363 // again in form HEADERS{1}, CONTINUATION{0,}) 1364 trailers, err := commaSeparatedTrailers(req) 1365 if err != nil { 1366 return err 1367 } 1368 hasTrailers := trailers != "" 1369 contentLen := actualContentLength(req) 1370 hasBody := contentLen != 0 1371 hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) 1372 if err != nil { 1373 return err 1374 } 1375 1376 // Write the request. 1377 endStream := !hasBody && !hasTrailers 1378 cs.sentHeaders = true 1379 err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) 1380 traceWroteHeaders(cs.trace) 1381 return err 1382 } 1383 1384 // cleanupWriteRequest performs post-request tasks. 1385 // 1386 // If err (the result of writeRequest) is non-nil and the stream is not closed, 1387 // cleanupWriteRequest will send a reset to the peer. 1388 func (cs *clientStream) cleanupWriteRequest(err error) { 1389 cc := cs.cc 1390 1391 if cs.ID == 0 { 1392 // We were canceled before creating the stream, so return our reservation. 1393 cc.decrStreamReservations() 1394 } 1395 1396 // TODO: write h12Compare test showing whether 1397 // Request.Body is closed by the Transport, 1398 // and in multiple cases: server replies <=299 and >299 1399 // while still writing request body 1400 cc.mu.Lock() 1401 bodyClosed := cs.reqBodyClosed 1402 cs.reqBodyClosed = true 1403 cc.mu.Unlock() 1404 if !bodyClosed && cs.reqBody != nil { 1405 cs.reqBody.Close() 1406 } 1407 1408 if err != nil && cs.sentEndStream { 1409 // If the connection is closed immediately after the response is read, 1410 // we may be aborted before finishing up here. If the stream was closed 1411 // cleanly on both sides, there is no error. 1412 select { 1413 case <-cs.peerClosed: 1414 err = nil 1415 default: 1416 } 1417 } 1418 if err != nil { 1419 cs.abortStream(err) // possibly redundant, but harmless 1420 if cs.sentHeaders { 1421 if se, ok := err.(StreamError); ok { 1422 if se.Cause != errFromPeer { 1423 cc.writeStreamReset(cs.ID, se.Code, err) 1424 } 1425 } else { 1426 cc.writeStreamReset(cs.ID, ErrCodeCancel, err) 1427 } 1428 } 1429 cs.bufPipe.CloseWithError(err) // no-op if already closed 1430 } else { 1431 if cs.sentHeaders && !cs.sentEndStream { 1432 cc.writeStreamReset(cs.ID, ErrCodeNo, nil) 1433 } 1434 cs.bufPipe.CloseWithError(errRequestCanceled) 1435 } 1436 if cs.ID != 0 { 1437 cc.forgetStreamID(cs.ID) 1438 } 1439 1440 cc.wmu.Lock() 1441 werr := cc.werr 1442 cc.wmu.Unlock() 1443 if werr != nil { 1444 cc.Close() 1445 } 1446 1447 close(cs.donec) 1448 } 1449 1450 // awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. 1451 // Must hold cc.mu. 1452 func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { 1453 for { 1454 cc.lastActive = time.Now() 1455 if cc.closed || !cc.canTakeNewRequestLocked() { 1456 return errClientConnUnusable 1457 } 1458 cc.lastIdle = time.Time{} 1459 if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { 1460 return nil 1461 } 1462 cc.pendingRequests++ 1463 cc.cond.Wait() 1464 cc.pendingRequests-- 1465 select { 1466 case <-cs.abort: 1467 return cs.abortErr 1468 default: 1469 } 1470 } 1471 } 1472 1473 // requires cc.wmu be held 1474 func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { 1475 first := true // first frame written (HEADERS is first, then CONTINUATION) 1476 for len(hdrs) > 0 && cc.werr == nil { 1477 chunk := hdrs 1478 if len(chunk) > maxFrameSize { 1479 chunk = chunk[:maxFrameSize] 1480 } 1481 hdrs = hdrs[len(chunk):] 1482 endHeaders := len(hdrs) == 0 1483 if first { 1484 cc.fr.WriteHeaders(HeadersFrameParam{ 1485 StreamID: streamID, 1486 BlockFragment: chunk, 1487 EndStream: endStream, 1488 EndHeaders: endHeaders, 1489 }) 1490 first = false 1491 } else { 1492 cc.fr.WriteContinuation(streamID, endHeaders, chunk) 1493 } 1494 } 1495 cc.bw.Flush() 1496 return cc.werr 1497 } 1498 1499 // internal error values; they don't escape to callers 1500 var ( 1501 // abort request body write; don't send cancel 1502 errStopReqBodyWrite = errors.New("http2: aborting request body write") 1503 1504 // abort request body write, but send stream reset of cancel. 1505 errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") 1506 1507 errReqBodyTooLong = errors.New("http2: request body larger than specified content length") 1508 ) 1509 1510 // frameScratchBufferLen returns the length of a buffer to use for 1511 // outgoing request bodies to read/write to/from. 1512 // 1513 // It returns max(1, min(peer's advertised max frame size, 1514 // Request.ContentLength+1, 512KB)). 1515 func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { 1516 const max = 512 << 10 1517 n := int64(maxFrameSize) 1518 if n > max { 1519 n = max 1520 } 1521 if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { 1522 // Add an extra byte past the declared content-length to 1523 // give the caller's Request.Body io.Reader a chance to 1524 // give us more bytes than they declared, so we can catch it 1525 // early. 1526 n = cl + 1 1527 } 1528 if n < 1 { 1529 return 1 1530 } 1531 return int(n) // doesn't truncate; max is 512K 1532 } 1533 1534 var bufPool sync.Pool // of *[]byte 1535 1536 func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { 1537 cc := cs.cc 1538 body := cs.reqBody 1539 sentEnd := false // whether we sent the final DATA frame w/ END_STREAM 1540 1541 hasTrailers := req.Trailer != nil 1542 remainLen := cs.reqBodyContentLength 1543 hasContentLen := remainLen != -1 1544 1545 cc.mu.Lock() 1546 maxFrameSize := int(cc.maxFrameSize) 1547 cc.mu.Unlock() 1548 1549 // Scratch buffer for reading into & writing from. 1550 scratchLen := cs.frameScratchBufferLen(maxFrameSize) 1551 var buf []byte 1552 if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { 1553 defer bufPool.Put(bp) 1554 buf = *bp 1555 } else { 1556 buf = make([]byte, scratchLen) 1557 defer bufPool.Put(&buf) 1558 } 1559 1560 var sawEOF bool 1561 for !sawEOF { 1562 n, err := body.Read(buf[:len(buf)]) 1563 if hasContentLen { 1564 remainLen -= int64(n) 1565 if remainLen == 0 && err == nil { 1566 // The request body's Content-Length was predeclared and 1567 // we just finished reading it all, but the underlying io.Reader 1568 // returned the final chunk with a nil error (which is one of 1569 // the two valid things a Reader can do at EOF). Because we'd prefer 1570 // to send the END_STREAM bit early, double-check that we're actually 1571 // at EOF. Subsequent reads should return (0, EOF) at this point. 1572 // If either value is different, we return an error in one of two ways below. 1573 var scratch [1]byte 1574 var n1 int 1575 n1, err = body.Read(scratch[:]) 1576 remainLen -= int64(n1) 1577 } 1578 if remainLen < 0 { 1579 err = errReqBodyTooLong 1580 return err 1581 } 1582 } 1583 if err != nil { 1584 cc.mu.Lock() 1585 bodyClosed := cs.reqBodyClosed 1586 cc.mu.Unlock() 1587 switch { 1588 case bodyClosed: 1589 return errStopReqBodyWrite 1590 case err == io.EOF: 1591 sawEOF = true 1592 err = nil 1593 default: 1594 return err 1595 } 1596 } 1597 1598 remain := buf[:n] 1599 for len(remain) > 0 && err == nil { 1600 var allowed int32 1601 allowed, err = cs.awaitFlowControl(len(remain)) 1602 if err != nil { 1603 return err 1604 } 1605 cc.wmu.Lock() 1606 data := remain[:allowed] 1607 remain = remain[allowed:] 1608 sentEnd = sawEOF && len(remain) == 0 && !hasTrailers 1609 err = cc.fr.WriteData(cs.ID, sentEnd, data) 1610 if err == nil { 1611 // TODO(bradfitz): this flush is for latency, not bandwidth. 1612 // Most requests won't need this. Make this opt-in or 1613 // opt-out? Use some heuristic on the body type? Nagel-like 1614 // timers? Based on 'n'? Only last chunk of this for loop, 1615 // unless flow control tokens are low? For now, always. 1616 // If we change this, see comment below. 1617 err = cc.bw.Flush() 1618 } 1619 cc.wmu.Unlock() 1620 } 1621 if err != nil { 1622 return err 1623 } 1624 } 1625 1626 if sentEnd { 1627 // Already sent END_STREAM (which implies we have no 1628 // trailers) and flushed, because currently all 1629 // WriteData frames above get a flush. So we're done. 1630 return nil 1631 } 1632 1633 // Since the RoundTrip contract permits the caller to "mutate or reuse" 1634 // a request after the Response's Body is closed, verify that this hasn't 1635 // happened before accessing the trailers. 1636 cc.mu.Lock() 1637 trailer := req.Trailer 1638 err = cs.abortErr 1639 cc.mu.Unlock() 1640 if err != nil { 1641 return err 1642 } 1643 1644 cc.wmu.Lock() 1645 defer cc.wmu.Unlock() 1646 var trls []byte 1647 if len(trailer) > 0 { 1648 trls, err = cc.encodeTrailers(trailer) 1649 if err != nil { 1650 return err 1651 } 1652 } 1653 1654 // Two ways to send END_STREAM: either with trailers, or 1655 // with an empty DATA frame. 1656 if len(trls) > 0 { 1657 err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) 1658 } else { 1659 err = cc.fr.WriteData(cs.ID, true, nil) 1660 } 1661 if ferr := cc.bw.Flush(); ferr != nil && err == nil { 1662 err = ferr 1663 } 1664 return err 1665 } 1666 1667 // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow 1668 // control tokens from the server. 1669 // It returns either the non-zero number of tokens taken or an error 1670 // if the stream is dead. 1671 func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { 1672 cc := cs.cc 1673 ctx := cs.ctx 1674 cc.mu.Lock() 1675 defer cc.mu.Unlock() 1676 for { 1677 if cc.closed { 1678 return 0, errClientConnClosed 1679 } 1680 if cs.reqBodyClosed { 1681 return 0, errStopReqBodyWrite 1682 } 1683 select { 1684 case <-cs.abort: 1685 return 0, cs.abortErr 1686 case <-ctx.Done(): 1687 return 0, ctx.Err() 1688 case <-cs.reqCancel: 1689 return 0, errRequestCanceled 1690 default: 1691 } 1692 if a := cs.flow.available(); a > 0 { 1693 take := a 1694 if int(take) > maxBytes { 1695 1696 take = int32(maxBytes) // can't truncate int; take is int32 1697 } 1698 if take > int32(cc.maxFrameSize) { 1699 take = int32(cc.maxFrameSize) 1700 } 1701 cs.flow.take(take) 1702 return take, nil 1703 } 1704 cc.cond.Wait() 1705 } 1706 } 1707 1708 var errNilRequestURL = errors.New("http2: Request.URI is nil") 1709 1710 // requires cc.wmu be held. 1711 func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { 1712 cc.hbuf.Reset() 1713 if req.URL == nil { 1714 return nil, errNilRequestURL 1715 } 1716 1717 host := req.Host 1718 if host == "" { 1719 host = req.URL.Host 1720 } 1721 host, err := httpguts.PunycodeHostPort(host) 1722 if err != nil { 1723 return nil, err 1724 } 1725 1726 var path string 1727 if req.Method != "CONNECT" { 1728 path = req.URL.RequestURI() 1729 if !validPseudoPath(path) { 1730 orig := path 1731 path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) 1732 if !validPseudoPath(path) { 1733 if req.URL.Opaque != "" { 1734 return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) 1735 } else { 1736 return nil, fmt.Errorf("invalid request :path %q", orig) 1737 } 1738 } 1739 } 1740 } 1741 1742 // Check for any invalid headers and return an error before we 1743 // potentially pollute our hpack state. (We want to be able to 1744 // continue to reuse the hpack encoder for future requests) 1745 for k, vv := range req.Header { 1746 if !httpguts.ValidHeaderFieldName(k) { 1747 return nil, fmt.Errorf("invalid HTTP header name %q", k) 1748 } 1749 for _, v := range vv { 1750 if !httpguts.ValidHeaderFieldValue(v) { 1751 return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) 1752 } 1753 } 1754 } 1755 1756 enumerateHeaders := func(f func(name, value string)) { 1757 // 8.1.2.3 Request Pseudo-Header Fields 1758 // The :path pseudo-header field includes the path and query parts of the 1759 // target URI (the path-absolute production and optionally a '?' character 1760 // followed by the query production (see Sections 3.3 and 3.4 of 1761 // [RFC3986]). 1762 f(":authority", host) 1763 m := req.Method 1764 if m == "" { 1765 m = http.MethodGet 1766 } 1767 f(":method", m) 1768 if req.Method != "CONNECT" { 1769 f(":path", path) 1770 f(":scheme", req.URL.Scheme) 1771 } 1772 if trailers != "" { 1773 f("trailer", trailers) 1774 } 1775 1776 var didUA bool 1777 for k, vv := range req.Header { 1778 if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { 1779 // Host is :authority, already sent. 1780 // Content-Length is automatic, set below. 1781 continue 1782 } else if asciiEqualFold(k, "connection") || 1783 asciiEqualFold(k, "proxy-connection") || 1784 asciiEqualFold(k, "transfer-encoding") || 1785 asciiEqualFold(k, "upgrade") || 1786 asciiEqualFold(k, "keep-alive") { 1787 // Per 8.1.2.2 Connection-Specific Header 1788 // Fields, don't send connection-specific 1789 // fields. We have already checked if any 1790 // are error-worthy so just ignore the rest. 1791 continue 1792 } else if asciiEqualFold(k, "user-agent") { 1793 // Match Go's http1 behavior: at most one 1794 // User-Agent. If set to nil or empty string, 1795 // then omit it. Otherwise if not mentioned, 1796 // include the default (below). 1797 didUA = true 1798 if len(vv) < 1 { 1799 continue 1800 } 1801 vv = vv[:1] 1802 if vv[0] == "" { 1803 continue 1804 } 1805 } else if asciiEqualFold(k, "cookie") { 1806 // Per 8.1.2.5 To allow for better compression efficiency, the 1807 // Cookie header field MAY be split into separate header fields, 1808 // each with one or more cookie-pairs. 1809 for _, v := range vv { 1810 for { 1811 p := strings.IndexByte(v, ';') 1812 if p < 0 { 1813 break 1814 } 1815 f("cookie", v[:p]) 1816 p++ 1817 // strip space after semicolon if any. 1818 for p+1 <= len(v) && v[p] == ' ' { 1819 p++ 1820 } 1821 v = v[p:] 1822 } 1823 if len(v) > 0 { 1824 f("cookie", v) 1825 } 1826 } 1827 continue 1828 } 1829 1830 for _, v := range vv { 1831 f(k, v) 1832 } 1833 } 1834 if shouldSendReqContentLength(req.Method, contentLength) { 1835 f("content-length", strconv.FormatInt(contentLength, 10)) 1836 } 1837 if addGzipHeader { 1838 f("accept-encoding", "gzip") 1839 } 1840 if !didUA { 1841 f("user-agent", defaultUserAgent) 1842 } 1843 } 1844 1845 // Do a first pass over the headers counting bytes to ensure 1846 // we don't exceed cc.peerMaxHeaderListSize. This is done as a 1847 // separate pass before encoding the headers to prevent 1848 // modifying the hpack state. 1849 hlSize := uint64(0) 1850 enumerateHeaders(func(name, value string) { 1851 hf := hpack.HeaderField{Name: name, Value: value} 1852 hlSize += uint64(hf.Size()) 1853 }) 1854 1855 if hlSize > cc.peerMaxHeaderListSize { 1856 return nil, errRequestHeaderListSize 1857 } 1858 1859 trace := httptrace.ContextClientTrace(req.Context()) 1860 traceHeaders := traceHasWroteHeaderField(trace) 1861 1862 // Header list size is ok. Write the headers. 1863 enumerateHeaders(func(name, value string) { 1864 name, ascii := asciiToLower(name) 1865 if !ascii { 1866 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 1867 // field names have to be ASCII characters (just as in HTTP/1.x). 1868 return 1869 } 1870 cc.writeHeader(name, value) 1871 if traceHeaders { 1872 traceWroteHeaderField(trace, name, value) 1873 } 1874 }) 1875 1876 return cc.hbuf.Bytes(), nil 1877 } 1878 1879 // shouldSendReqContentLength reports whether the http2.Transport should send 1880 // a "content-length" request header. This logic is basically a copy of the net/http 1881 // transferWriter.shouldSendContentLength. 1882 // The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). 1883 // -1 means unknown. 1884 func shouldSendReqContentLength(method string, contentLength int64) bool { 1885 if contentLength > 0 { 1886 return true 1887 } 1888 if contentLength < 0 { 1889 return false 1890 } 1891 // For zero bodies, whether we send a content-length depends on the method. 1892 // It also kinda doesn't matter for http2 either way, with END_STREAM. 1893 switch method { 1894 case "POST", "PUT", "PATCH": 1895 return true 1896 default: 1897 return false 1898 } 1899 } 1900 1901 // requires cc.wmu be held. 1902 func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { 1903 cc.hbuf.Reset() 1904 1905 hlSize := uint64(0) 1906 for k, vv := range trailer { 1907 for _, v := range vv { 1908 hf := hpack.HeaderField{Name: k, Value: v} 1909 hlSize += uint64(hf.Size()) 1910 } 1911 } 1912 if hlSize > cc.peerMaxHeaderListSize { 1913 return nil, errRequestHeaderListSize 1914 } 1915 1916 for k, vv := range trailer { 1917 lowKey, ascii := asciiToLower(k) 1918 if !ascii { 1919 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 1920 // field names have to be ASCII characters (just as in HTTP/1.x). 1921 continue 1922 } 1923 // Transfer-Encoding, etc.. have already been filtered at the 1924 // start of RoundTrip 1925 for _, v := range vv { 1926 cc.writeHeader(lowKey, v) 1927 } 1928 } 1929 return cc.hbuf.Bytes(), nil 1930 } 1931 1932 func (cc *ClientConn) writeHeader(name, value string) { 1933 if VerboseLogs { 1934 log.Printf("http2: Transport encoding header %q = %q", name, value) 1935 } 1936 cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) 1937 } 1938 1939 type resAndError struct { 1940 _ incomparable 1941 res *http.Response 1942 err error 1943 } 1944 1945 // requires cc.mu be held. 1946 func (cc *ClientConn) addStreamLocked(cs *clientStream) { 1947 cs.flow.add(int32(cc.initialWindowSize)) 1948 cs.flow.setConnFlow(&cc.flow) 1949 cs.inflow.add(transportDefaultStreamFlow) 1950 cs.inflow.setConnFlow(&cc.inflow) 1951 cs.ID = cc.nextStreamID 1952 cc.nextStreamID += 2 1953 cc.streams[cs.ID] = cs 1954 if cs.ID == 0 { 1955 panic("assigned stream ID 0") 1956 } 1957 } 1958 1959 func (cc *ClientConn) forgetStreamID(id uint32) { 1960 cc.mu.Lock() 1961 slen := len(cc.streams) 1962 delete(cc.streams, id) 1963 if len(cc.streams) != slen-1 { 1964 panic("forgetting unknown stream id") 1965 } 1966 cc.lastActive = time.Now() 1967 if len(cc.streams) == 0 && cc.idleTimer != nil { 1968 cc.idleTimer.Reset(cc.idleTimeout) 1969 cc.lastIdle = time.Now() 1970 } 1971 // Wake up writeRequestBody via clientStream.awaitFlowControl and 1972 // wake up RoundTrip if there is a pending request. 1973 cc.cond.Broadcast() 1974 1975 closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() 1976 if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { 1977 if VerboseLogs { 1978 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) 1979 } 1980 cc.closed = true 1981 defer cc.tconn.Close() 1982 } 1983 1984 cc.mu.Unlock() 1985 } 1986 1987 // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. 1988 type clientConnReadLoop struct { 1989 _ incomparable 1990 cc *ClientConn 1991 } 1992 1993 // readLoop runs in its own goroutine and reads and dispatches frames. 1994 func (cc *ClientConn) readLoop() { 1995 rl := &clientConnReadLoop{cc: cc} 1996 defer rl.cleanup() 1997 cc.readerErr = rl.run() 1998 if ce, ok := cc.readerErr.(ConnectionError); ok { 1999 cc.wmu.Lock() 2000 cc.fr.WriteGoAway(0, ErrCode(ce), nil) 2001 cc.wmu.Unlock() 2002 } 2003 } 2004 2005 // GoAwayError is returned by the Transport when the server closes the 2006 // TCP connection after sending a GOAWAY frame. 2007 type GoAwayError struct { 2008 LastStreamID uint32 2009 ErrCode ErrCode 2010 DebugData string 2011 } 2012 2013 func (e GoAwayError) Error() string { 2014 return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", 2015 e.LastStreamID, e.ErrCode, e.DebugData) 2016 } 2017 2018 func isEOFOrNetReadError(err error) bool { 2019 if err == io.EOF { 2020 return true 2021 } 2022 ne, ok := err.(*net.OpError) 2023 return ok && ne.Op == "read" 2024 } 2025 2026 func (rl *clientConnReadLoop) cleanup() { 2027 cc := rl.cc 2028 defer cc.tconn.Close() 2029 defer cc.t.connPool().MarkDead(cc) 2030 defer close(cc.readerDone) 2031 2032 if cc.idleTimer != nil { 2033 cc.idleTimer.Stop() 2034 } 2035 2036 // Close any response bodies if the server closes prematurely. 2037 // TODO: also do this if we've written the headers but not 2038 // gotten a response yet. 2039 err := cc.readerErr 2040 cc.mu.Lock() 2041 if cc.goAway != nil && isEOFOrNetReadError(err) { 2042 err = GoAwayError{ 2043 LastStreamID: cc.goAway.LastStreamID, 2044 ErrCode: cc.goAway.ErrCode, 2045 DebugData: cc.goAwayDebug, 2046 } 2047 } else if err == io.EOF { 2048 err = io.ErrUnexpectedEOF 2049 } 2050 cc.closed = true 2051 for _, cs := range cc.streams { 2052 select { 2053 case <-cs.peerClosed: 2054 // The server closed the stream before closing the conn, 2055 // so no need to interrupt it. 2056 default: 2057 cs.abortStreamLocked(err) 2058 } 2059 } 2060 cc.cond.Broadcast() 2061 cc.mu.Unlock() 2062 } 2063 2064 // countReadFrameError calls Transport.CountError with a string 2065 // representing err. 2066 func (cc *ClientConn) countReadFrameError(err error) { 2067 f := cc.t.CountError 2068 if f == nil || err == nil { 2069 return 2070 } 2071 if ce, ok := err.(ConnectionError); ok { 2072 errCode := ErrCode(ce) 2073 f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) 2074 return 2075 } 2076 if errors.Is(err, io.EOF) { 2077 f("read_frame_eof") 2078 return 2079 } 2080 if errors.Is(err, io.ErrUnexpectedEOF) { 2081 f("read_frame_unexpected_eof") 2082 return 2083 } 2084 if errors.Is(err, ErrFrameTooLarge) { 2085 f("read_frame_too_large") 2086 return 2087 } 2088 f("read_frame_other") 2089 } 2090 2091 func (rl *clientConnReadLoop) run() error { 2092 cc := rl.cc 2093 gotSettings := false 2094 readIdleTimeout := cc.t.ReadIdleTimeout 2095 var t *time.Timer 2096 if readIdleTimeout != 0 { 2097 t = time.AfterFunc(readIdleTimeout, cc.healthCheck) 2098 defer t.Stop() 2099 } 2100 for { 2101 f, err := cc.fr.ReadFrame() 2102 if t != nil { 2103 t.Reset(readIdleTimeout) 2104 } 2105 if err != nil { 2106 cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) 2107 } 2108 if se, ok := err.(StreamError); ok { 2109 if cs := rl.streamByID(se.StreamID); cs != nil { 2110 if se.Cause == nil { 2111 se.Cause = cc.fr.errDetail 2112 } 2113 rl.endStreamError(cs, se) 2114 } 2115 continue 2116 } else if err != nil { 2117 cc.countReadFrameError(err) 2118 return err 2119 } 2120 if VerboseLogs { 2121 cc.vlogf("http2: Transport received %s", summarizeFrame(f)) 2122 } 2123 if !gotSettings { 2124 if _, ok := f.(*SettingsFrame); !ok { 2125 cc.logf("protocol error: received %T before a SETTINGS frame", f) 2126 return ConnectionError(ErrCodeProtocol) 2127 } 2128 gotSettings = true 2129 } 2130 2131 switch f := f.(type) { 2132 case *MetaHeadersFrame: 2133 err = rl.processHeaders(f) 2134 case *DataFrame: 2135 err = rl.processData(f) 2136 case *GoAwayFrame: 2137 err = rl.processGoAway(f) 2138 case *RSTStreamFrame: 2139 err = rl.processResetStream(f) 2140 case *SettingsFrame: 2141 err = rl.processSettings(f) 2142 case *PushPromiseFrame: 2143 err = rl.processPushPromise(f) 2144 case *WindowUpdateFrame: 2145 err = rl.processWindowUpdate(f) 2146 case *PingFrame: 2147 err = rl.processPing(f) 2148 default: 2149 cc.logf("Transport: unhandled response frame type %T", f) 2150 } 2151 if err != nil { 2152 if VerboseLogs { 2153 cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) 2154 } 2155 return err 2156 } 2157 } 2158 } 2159 2160 func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { 2161 cs := rl.streamByID(f.StreamID) 2162 if cs == nil { 2163 // We'd get here if we canceled a request while the 2164 // server had its response still in flight. So if this 2165 // was just something we canceled, ignore it. 2166 return nil 2167 } 2168 if cs.readClosed { 2169 rl.endStreamError(cs, StreamError{ 2170 StreamID: f.StreamID, 2171 Code: ErrCodeProtocol, 2172 Cause: errors.New("protocol error: headers after END_STREAM"), 2173 }) 2174 return nil 2175 } 2176 if !cs.firstByte { 2177 if cs.trace != nil { 2178 // TODO(bradfitz): move first response byte earlier, 2179 // when we first read the 9 byte header, not waiting 2180 // until all the HEADERS+CONTINUATION frames have been 2181 // merged. This works for now. 2182 traceFirstResponseByte(cs.trace) 2183 } 2184 cs.firstByte = true 2185 } 2186 if !cs.pastHeaders { 2187 cs.pastHeaders = true 2188 } else { 2189 return rl.processTrailers(cs, f) 2190 } 2191 2192 res, err := rl.handleResponse(cs, f) 2193 if err != nil { 2194 if _, ok := err.(ConnectionError); ok { 2195 return err 2196 } 2197 // Any other error type is a stream error. 2198 rl.endStreamError(cs, StreamError{ 2199 StreamID: f.StreamID, 2200 Code: ErrCodeProtocol, 2201 Cause: err, 2202 }) 2203 return nil // return nil from process* funcs to keep conn alive 2204 } 2205 if res == nil { 2206 // (nil, nil) special case. See handleResponse docs. 2207 return nil 2208 } 2209 cs.resTrailer = &res.Trailer 2210 cs.res = res 2211 close(cs.respHeaderRecv) 2212 if f.StreamEnded() { 2213 rl.endStream(cs) 2214 } 2215 return nil 2216 } 2217 2218 // may return error types nil, or ConnectionError. Any other error value 2219 // is a StreamError of type ErrCodeProtocol. The returned error in that case 2220 // is the detail. 2221 // 2222 // As a special case, handleResponse may return (nil, nil) to skip the 2223 // frame (currently only used for 1xx responses). 2224 func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { 2225 if f.Truncated { 2226 return nil, errResponseHeaderListSize 2227 } 2228 2229 status := f.PseudoValue("status") 2230 if status == "" { 2231 return nil, errors.New("malformed response from server: missing status pseudo header") 2232 } 2233 statusCode, err := strconv.Atoi(status) 2234 if err != nil { 2235 return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") 2236 } 2237 2238 regularFields := f.RegularFields() 2239 strs := make([]string, len(regularFields)) 2240 header := make(http.Header, len(regularFields)) 2241 res := &http.Response{ 2242 Proto: "HTTP/2.0", 2243 ProtoMajor: 2, 2244 Header: header, 2245 StatusCode: statusCode, 2246 Status: status + " " + http.StatusText(statusCode), 2247 } 2248 for _, hf := range regularFields { 2249 key := http.CanonicalHeaderKey(hf.Name) 2250 if key == "Trailer" { 2251 t := res.Trailer 2252 if t == nil { 2253 t = make(http.Header) 2254 res.Trailer = t 2255 } 2256 foreachHeaderElement(hf.Value, func(v string) { 2257 t[http.CanonicalHeaderKey(v)] = nil 2258 }) 2259 } else { 2260 vv := header[key] 2261 if vv == nil && len(strs) > 0 { 2262 // More than likely this will be a single-element key. 2263 // Most headers aren't multi-valued. 2264 // Set the capacity on strs[0] to 1, so any future append 2265 // won't extend the slice into the other strings. 2266 vv, strs = strs[:1:1], strs[1:] 2267 vv[0] = hf.Value 2268 header[key] = vv 2269 } else { 2270 header[key] = append(vv, hf.Value) 2271 } 2272 } 2273 } 2274 2275 if statusCode >= 100 && statusCode <= 199 { 2276 if f.StreamEnded() { 2277 return nil, errors.New("1xx informational response with END_STREAM flag") 2278 } 2279 cs.num1xx++ 2280 const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http 2281 if cs.num1xx > max1xxResponses { 2282 return nil, errors.New("http2: too many 1xx informational responses") 2283 } 2284 if fn := cs.get1xxTraceFunc(); fn != nil { 2285 if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { 2286 return nil, err 2287 } 2288 } 2289 if statusCode == 100 { 2290 traceGot100Continue(cs.trace) 2291 select { 2292 case cs.on100 <- struct{}{}: 2293 default: 2294 } 2295 } 2296 cs.pastHeaders = false // do it all again 2297 return nil, nil 2298 } 2299 2300 res.ContentLength = -1 2301 if clens := res.Header["Content-Length"]; len(clens) == 1 { 2302 if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { 2303 res.ContentLength = int64(cl) 2304 } else { 2305 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2306 // more safe smuggling-wise to ignore. 2307 } 2308 } else if len(clens) > 1 { 2309 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2310 // more safe smuggling-wise to ignore. 2311 } else if f.StreamEnded() && !cs.isHead { 2312 res.ContentLength = 0 2313 } 2314 2315 if cs.isHead { 2316 res.Body = noBody 2317 return res, nil 2318 } 2319 2320 if f.StreamEnded() { 2321 if res.ContentLength > 0 { 2322 res.Body = missingBody{} 2323 } else { 2324 res.Body = noBody 2325 } 2326 return res, nil 2327 } 2328 2329 cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) 2330 cs.bytesRemain = res.ContentLength 2331 res.Body = transportResponseBody{cs} 2332 2333 if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") { 2334 res.Header.Del("Content-Encoding") 2335 res.Header.Del("Content-Length") 2336 res.ContentLength = -1 2337 res.Body = &gzipReader{body: res.Body} 2338 res.Uncompressed = true 2339 } 2340 return res, nil 2341 } 2342 2343 func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { 2344 if cs.pastTrailers { 2345 // Too many HEADERS frames for this stream. 2346 return ConnectionError(ErrCodeProtocol) 2347 } 2348 cs.pastTrailers = true 2349 if !f.StreamEnded() { 2350 // We expect that any headers for trailers also 2351 // has END_STREAM. 2352 return ConnectionError(ErrCodeProtocol) 2353 } 2354 if len(f.PseudoFields()) > 0 { 2355 // No pseudo header fields are defined for trailers. 2356 // TODO: ConnectionError might be overly harsh? Check. 2357 return ConnectionError(ErrCodeProtocol) 2358 } 2359 2360 trailer := make(http.Header) 2361 for _, hf := range f.RegularFields() { 2362 key := http.CanonicalHeaderKey(hf.Name) 2363 trailer[key] = append(trailer[key], hf.Value) 2364 } 2365 cs.trailer = trailer 2366 2367 rl.endStream(cs) 2368 return nil 2369 } 2370 2371 // transportResponseBody is the concrete type of Transport.RoundTrip's 2372 // Response.Body. It is an io.ReadCloser. 2373 type transportResponseBody struct { 2374 cs *clientStream 2375 } 2376 2377 func (b transportResponseBody) Read(p []byte) (n int, err error) { 2378 cs := b.cs 2379 cc := cs.cc 2380 2381 if cs.readErr != nil { 2382 return 0, cs.readErr 2383 } 2384 n, err = b.cs.bufPipe.Read(p) 2385 if cs.bytesRemain != -1 { 2386 if int64(n) > cs.bytesRemain { 2387 n = int(cs.bytesRemain) 2388 if err == nil { 2389 err = errors.New("net/http: server replied with more than declared Content-Length; truncated") 2390 cs.abortStream(err) 2391 } 2392 cs.readErr = err 2393 return int(cs.bytesRemain), err 2394 } 2395 cs.bytesRemain -= int64(n) 2396 if err == io.EOF && cs.bytesRemain > 0 { 2397 err = io.ErrUnexpectedEOF 2398 cs.readErr = err 2399 return n, err 2400 } 2401 } 2402 if n == 0 { 2403 // No flow control tokens to send back. 2404 return 2405 } 2406 2407 cc.mu.Lock() 2408 var connAdd, streamAdd int32 2409 // Check the conn-level first, before the stream-level. 2410 if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { 2411 connAdd = transportDefaultConnFlow - v 2412 cc.inflow.add(connAdd) 2413 } 2414 if err == nil { // No need to refresh if the stream is over or failed. 2415 // Consider any buffered body data (read from the conn but not 2416 // consumed by the client) when computing flow control for this 2417 // stream. 2418 v := int(cs.inflow.available()) + cs.bufPipe.Len() 2419 if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { 2420 streamAdd = int32(transportDefaultStreamFlow - v) 2421 cs.inflow.add(streamAdd) 2422 } 2423 } 2424 cc.mu.Unlock() 2425 2426 if connAdd != 0 || streamAdd != 0 { 2427 cc.wmu.Lock() 2428 defer cc.wmu.Unlock() 2429 if connAdd != 0 { 2430 cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) 2431 } 2432 if streamAdd != 0 { 2433 cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) 2434 } 2435 cc.bw.Flush() 2436 } 2437 return 2438 } 2439 2440 var errClosedResponseBody = errors.New("http2: response body closed") 2441 2442 func (b transportResponseBody) Close() error { 2443 cs := b.cs 2444 cc := cs.cc 2445 2446 unread := cs.bufPipe.Len() 2447 if unread > 0 { 2448 cc.mu.Lock() 2449 // Return connection-level flow control. 2450 if unread > 0 { 2451 cc.inflow.add(int32(unread)) 2452 } 2453 cc.mu.Unlock() 2454 2455 // TODO(dneil): Acquiring this mutex can block indefinitely. 2456 // Move flow control return to a goroutine? 2457 cc.wmu.Lock() 2458 // Return connection-level flow control. 2459 if unread > 0 { 2460 cc.fr.WriteWindowUpdate(0, uint32(unread)) 2461 } 2462 cc.bw.Flush() 2463 cc.wmu.Unlock() 2464 } 2465 2466 cs.bufPipe.BreakWithError(errClosedResponseBody) 2467 cs.abortStream(errClosedResponseBody) 2468 2469 select { 2470 case <-cs.donec: 2471 case <-cs.ctx.Done(): 2472 // See golang/go#49366: The net/http package can cancel the 2473 // request context after the response body is fully read. 2474 // Don't treat this as an error. 2475 return nil 2476 case <-cs.reqCancel: 2477 return errRequestCanceled 2478 } 2479 return nil 2480 } 2481 2482 func (rl *clientConnReadLoop) processData(f *DataFrame) error { 2483 cc := rl.cc 2484 cs := rl.streamByID(f.StreamID) 2485 data := f.Data() 2486 if cs == nil { 2487 cc.mu.Lock() 2488 neverSent := cc.nextStreamID 2489 cc.mu.Unlock() 2490 if f.StreamID >= neverSent { 2491 // We never asked for this. 2492 cc.logf("http2: Transport received unsolicited DATA frame; closing connection") 2493 return ConnectionError(ErrCodeProtocol) 2494 } 2495 // We probably did ask for this, but canceled. Just ignore it. 2496 // TODO: be stricter here? only silently ignore things which 2497 // we canceled, but not things which were closed normally 2498 // by the peer? Tough without accumulating too much state. 2499 2500 // But at least return their flow control: 2501 if f.Length > 0 { 2502 cc.mu.Lock() 2503 cc.inflow.add(int32(f.Length)) 2504 cc.mu.Unlock() 2505 2506 cc.wmu.Lock() 2507 cc.fr.WriteWindowUpdate(0, uint32(f.Length)) 2508 cc.bw.Flush() 2509 cc.wmu.Unlock() 2510 } 2511 return nil 2512 } 2513 if cs.readClosed { 2514 cc.logf("protocol error: received DATA after END_STREAM") 2515 rl.endStreamError(cs, StreamError{ 2516 StreamID: f.StreamID, 2517 Code: ErrCodeProtocol, 2518 }) 2519 return nil 2520 } 2521 if !cs.firstByte { 2522 cc.logf("protocol error: received DATA before a HEADERS frame") 2523 rl.endStreamError(cs, StreamError{ 2524 StreamID: f.StreamID, 2525 Code: ErrCodeProtocol, 2526 }) 2527 return nil 2528 } 2529 if f.Length > 0 { 2530 if cs.isHead && len(data) > 0 { 2531 cc.logf("protocol error: received DATA on a HEAD request") 2532 rl.endStreamError(cs, StreamError{ 2533 StreamID: f.StreamID, 2534 Code: ErrCodeProtocol, 2535 }) 2536 return nil 2537 } 2538 // Check connection-level flow control. 2539 cc.mu.Lock() 2540 if cs.inflow.available() >= int32(f.Length) { 2541 cs.inflow.take(int32(f.Length)) 2542 } else { 2543 cc.mu.Unlock() 2544 return ConnectionError(ErrCodeFlowControl) 2545 } 2546 // Return any padded flow control now, since we won't 2547 // refund it later on body reads. 2548 var refund int 2549 if pad := int(f.Length) - len(data); pad > 0 { 2550 refund += pad 2551 } 2552 2553 didReset := false 2554 var err error 2555 if len(data) > 0 { 2556 if _, err = cs.bufPipe.Write(data); err != nil { 2557 // Return len(data) now if the stream is already closed, 2558 // since data will never be read. 2559 didReset = true 2560 refund += len(data) 2561 } 2562 } 2563 2564 if refund > 0 { 2565 cc.inflow.add(int32(refund)) 2566 if !didReset { 2567 cs.inflow.add(int32(refund)) 2568 } 2569 } 2570 cc.mu.Unlock() 2571 2572 if refund > 0 { 2573 cc.wmu.Lock() 2574 cc.fr.WriteWindowUpdate(0, uint32(refund)) 2575 if !didReset { 2576 cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) 2577 } 2578 cc.bw.Flush() 2579 cc.wmu.Unlock() 2580 } 2581 2582 if err != nil { 2583 rl.endStreamError(cs, err) 2584 return nil 2585 } 2586 } 2587 2588 if f.StreamEnded() { 2589 rl.endStream(cs) 2590 } 2591 return nil 2592 } 2593 2594 func (rl *clientConnReadLoop) endStream(cs *clientStream) { 2595 // TODO: check that any declared content-length matches, like 2596 // server.go's (*stream).endStream method. 2597 if !cs.readClosed { 2598 cs.readClosed = true 2599 // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a 2600 // race condition: The caller can read io.EOF from Response.Body 2601 // and close the body before we close cs.peerClosed, causing 2602 // cleanupWriteRequest to send a RST_STREAM. 2603 rl.cc.mu.Lock() 2604 defer rl.cc.mu.Unlock() 2605 cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) 2606 close(cs.peerClosed) 2607 } 2608 } 2609 2610 func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { 2611 cs.readAborted = true 2612 cs.abortStream(err) 2613 } 2614 2615 func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { 2616 rl.cc.mu.Lock() 2617 defer rl.cc.mu.Unlock() 2618 cs := rl.cc.streams[id] 2619 if cs != nil && !cs.readAborted { 2620 return cs 2621 } 2622 return nil 2623 } 2624 2625 func (cs *clientStream) copyTrailers() { 2626 for k, vv := range cs.trailer { 2627 t := cs.resTrailer 2628 if *t == nil { 2629 *t = make(http.Header) 2630 } 2631 (*t)[k] = vv 2632 } 2633 } 2634 2635 func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { 2636 cc := rl.cc 2637 cc.t.connPool().MarkDead(cc) 2638 if f.ErrCode != 0 { 2639 // TODO: deal with GOAWAY more. particularly the error code 2640 cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) 2641 if fn := cc.t.CountError; fn != nil { 2642 fn("recv_goaway_" + f.ErrCode.stringToken()) 2643 } 2644 2645 } 2646 cc.setGoAway(f) 2647 return nil 2648 } 2649 2650 func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { 2651 cc := rl.cc 2652 // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. 2653 // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. 2654 cc.wmu.Lock() 2655 defer cc.wmu.Unlock() 2656 2657 if err := rl.processSettingsNoWrite(f); err != nil { 2658 return err 2659 } 2660 if !f.IsAck() { 2661 cc.fr.WriteSettingsAck() 2662 cc.bw.Flush() 2663 } 2664 return nil 2665 } 2666 2667 func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { 2668 cc := rl.cc 2669 cc.mu.Lock() 2670 defer cc.mu.Unlock() 2671 2672 if f.IsAck() { 2673 if cc.wantSettingsAck { 2674 cc.wantSettingsAck = false 2675 return nil 2676 } 2677 return ConnectionError(ErrCodeProtocol) 2678 } 2679 2680 var seenMaxConcurrentStreams bool 2681 err := f.ForeachSetting(func(s Setting) error { 2682 switch s.ID { 2683 case SettingMaxFrameSize: 2684 cc.maxFrameSize = s.Val 2685 case SettingMaxConcurrentStreams: 2686 cc.maxConcurrentStreams = s.Val 2687 seenMaxConcurrentStreams = true 2688 case SettingMaxHeaderListSize: 2689 cc.peerMaxHeaderListSize = uint64(s.Val) 2690 case SettingInitialWindowSize: 2691 // Values above the maximum flow-control 2692 // window size of 2^31-1 MUST be treated as a 2693 // connection error (Section 5.4.1) of type 2694 // FLOW_CONTROL_ERROR. 2695 if s.Val > math.MaxInt32 { 2696 return ConnectionError(ErrCodeFlowControl) 2697 } 2698 2699 // Adjust flow control of currently-open 2700 // frames by the difference of the old initial 2701 // window size and this one. 2702 delta := int32(s.Val) - int32(cc.initialWindowSize) 2703 for _, cs := range cc.streams { 2704 cs.flow.add(delta) 2705 } 2706 cc.cond.Broadcast() 2707 2708 cc.initialWindowSize = s.Val 2709 default: 2710 // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. 2711 cc.vlogf("Unhandled Setting: %v", s) 2712 } 2713 return nil 2714 }) 2715 if err != nil { 2716 return err 2717 } 2718 2719 if !cc.seenSettings { 2720 if !seenMaxConcurrentStreams { 2721 // This was the servers initial SETTINGS frame and it 2722 // didn't contain a MAX_CONCURRENT_STREAMS field so 2723 // increase the number of concurrent streams this 2724 // connection can establish to our default. 2725 cc.maxConcurrentStreams = defaultMaxConcurrentStreams 2726 } 2727 cc.seenSettings = true 2728 } 2729 2730 return nil 2731 } 2732 2733 func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { 2734 cc := rl.cc 2735 cs := rl.streamByID(f.StreamID) 2736 if f.StreamID != 0 && cs == nil { 2737 return nil 2738 } 2739 2740 cc.mu.Lock() 2741 defer cc.mu.Unlock() 2742 2743 fl := &cc.flow 2744 if cs != nil { 2745 fl = &cs.flow 2746 } 2747 if !fl.add(int32(f.Increment)) { 2748 return ConnectionError(ErrCodeFlowControl) 2749 } 2750 cc.cond.Broadcast() 2751 return nil 2752 } 2753 2754 func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { 2755 cs := rl.streamByID(f.StreamID) 2756 if cs == nil { 2757 // TODO: return error if server tries to RST_STREAM an idle stream 2758 return nil 2759 } 2760 serr := streamError(cs.ID, f.ErrCode) 2761 serr.Cause = errFromPeer 2762 if f.ErrCode == ErrCodeProtocol { 2763 rl.cc.SetDoNotReuse() 2764 } 2765 if fn := cs.cc.t.CountError; fn != nil { 2766 fn("recv_rststream_" + f.ErrCode.stringToken()) 2767 } 2768 cs.abortStream(serr) 2769 2770 cs.bufPipe.CloseWithError(serr) 2771 return nil 2772 } 2773 2774 // Ping sends a PING frame to the server and waits for the ack. 2775 func (cc *ClientConn) Ping(ctx context.Context) error { 2776 c := make(chan struct{}) 2777 // Generate a random payload 2778 var p [8]byte 2779 for { 2780 if _, err := rand.Read(p[:]); err != nil { 2781 return err 2782 } 2783 cc.mu.Lock() 2784 // check for dup before insert 2785 if _, found := cc.pings[p]; !found { 2786 cc.pings[p] = c 2787 cc.mu.Unlock() 2788 break 2789 } 2790 cc.mu.Unlock() 2791 } 2792 errc := make(chan error, 1) 2793 go func() { 2794 cc.wmu.Lock() 2795 defer cc.wmu.Unlock() 2796 if err := cc.fr.WritePing(false, p); err != nil { 2797 errc <- err 2798 return 2799 } 2800 if err := cc.bw.Flush(); err != nil { 2801 errc <- err 2802 return 2803 } 2804 }() 2805 select { 2806 case <-c: 2807 return nil 2808 case err := <-errc: 2809 return err 2810 case <-ctx.Done(): 2811 return ctx.Err() 2812 case <-cc.readerDone: 2813 // connection closed 2814 return cc.readerErr 2815 } 2816 } 2817 2818 func (rl *clientConnReadLoop) processPing(f *PingFrame) error { 2819 if f.IsAck() { 2820 cc := rl.cc 2821 cc.mu.Lock() 2822 defer cc.mu.Unlock() 2823 // If ack, notify listener if any 2824 if c, ok := cc.pings[f.Data]; ok { 2825 close(c) 2826 delete(cc.pings, f.Data) 2827 } 2828 return nil 2829 } 2830 cc := rl.cc 2831 cc.wmu.Lock() 2832 defer cc.wmu.Unlock() 2833 if err := cc.fr.WritePing(true, f.Data); err != nil { 2834 return err 2835 } 2836 return cc.bw.Flush() 2837 } 2838 2839 func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { 2840 // We told the peer we don't want them. 2841 // Spec says: 2842 // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH 2843 // setting of the peer endpoint is set to 0. An endpoint that 2844 // has set this setting and has received acknowledgement MUST 2845 // treat the receipt of a PUSH_PROMISE frame as a connection 2846 // error (Section 5.4.1) of type PROTOCOL_ERROR." 2847 return ConnectionError(ErrCodeProtocol) 2848 } 2849 2850 func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { 2851 // TODO: map err to more interesting error codes, once the 2852 // HTTP community comes up with some. But currently for 2853 // RST_STREAM there's no equivalent to GOAWAY frame's debug 2854 // data, and the error codes are all pretty vague ("cancel"). 2855 cc.wmu.Lock() 2856 cc.fr.WriteRSTStream(streamID, code) 2857 cc.bw.Flush() 2858 cc.wmu.Unlock() 2859 } 2860 2861 var ( 2862 errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") 2863 errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") 2864 ) 2865 2866 func (cc *ClientConn) logf(format string, args ...interface{}) { 2867 cc.t.logf(format, args...) 2868 } 2869 2870 func (cc *ClientConn) vlogf(format string, args ...interface{}) { 2871 cc.t.vlogf(format, args...) 2872 } 2873 2874 func (t *Transport) vlogf(format string, args ...interface{}) { 2875 if VerboseLogs { 2876 t.logf(format, args...) 2877 } 2878 } 2879 2880 func (t *Transport) logf(format string, args ...interface{}) { 2881 log.Printf(format, args...) 2882 } 2883 2884 var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) 2885 2886 type missingBody struct{} 2887 2888 func (missingBody) Close() error { return nil } 2889 func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } 2890 2891 func strSliceContains(ss []string, s string) bool { 2892 for _, v := range ss { 2893 if v == s { 2894 return true 2895 } 2896 } 2897 return false 2898 } 2899 2900 type erringRoundTripper struct{ err error } 2901 2902 func (rt erringRoundTripper) RoundTripErr() error { return rt.err } 2903 func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } 2904 2905 // gzipReader wraps a response body so it can lazily 2906 // call gzip.NewReader on the first call to Read 2907 type gzipReader struct { 2908 _ incomparable 2909 body io.ReadCloser // underlying Response.Body 2910 zr *gzip.Reader // lazily-initialized gzip reader 2911 zerr error // sticky error 2912 } 2913 2914 func (gz *gzipReader) Read(p []byte) (n int, err error) { 2915 if gz.zerr != nil { 2916 return 0, gz.zerr 2917 } 2918 if gz.zr == nil { 2919 gz.zr, err = gzip.NewReader(gz.body) 2920 if err != nil { 2921 gz.zerr = err 2922 return 0, err 2923 } 2924 } 2925 return gz.zr.Read(p) 2926 } 2927 2928 func (gz *gzipReader) Close() error { 2929 return gz.body.Close() 2930 } 2931 2932 type errorReader struct{ err error } 2933 2934 func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } 2935 2936 // isConnectionCloseRequest reports whether req should use its own 2937 // connection for a single request and then close the connection. 2938 func isConnectionCloseRequest(req *http.Request) bool { 2939 return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") 2940 } 2941 2942 // registerHTTPSProtocol calls Transport.RegisterProtocol but 2943 // converting panics into errors. 2944 func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { 2945 defer func() { 2946 if e := recover(); e != nil { 2947 err = fmt.Errorf("%v", e) 2948 } 2949 }() 2950 t.RegisterProtocol("https", rt) 2951 return nil 2952 } 2953 2954 // noDialH2RoundTripper is a RoundTripper which only tries to complete the request 2955 // if there's already has a cached connection to the host. 2956 // (The field is exported so it can be accessed via reflect from net/http; tested 2957 // by TestNoDialH2RoundTripperType) 2958 type noDialH2RoundTripper struct{ *Transport } 2959 2960 func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 2961 res, err := rt.Transport.RoundTrip(req) 2962 if isNoCachedConnError(err) { 2963 return nil, http.ErrSkipAltProtocol 2964 } 2965 return res, err 2966 } 2967 2968 func (t *Transport) idleConnTimeout() time.Duration { 2969 if t.t1 != nil { 2970 return t.t1.IdleConnTimeout 2971 } 2972 return 0 2973 } 2974 2975 func traceGetConn(req *http.Request, hostPort string) { 2976 trace := httptrace.ContextClientTrace(req.Context()) 2977 if trace == nil || trace.GetConn == nil { 2978 return 2979 } 2980 trace.GetConn(hostPort) 2981 } 2982 2983 func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { 2984 trace := httptrace.ContextClientTrace(req.Context()) 2985 if trace == nil || trace.GotConn == nil { 2986 return 2987 } 2988 ci := httptrace.GotConnInfo{Conn: cc.tconn} 2989 ci.Reused = reused 2990 cc.mu.Lock() 2991 ci.WasIdle = len(cc.streams) == 0 && reused 2992 if ci.WasIdle && !cc.lastActive.IsZero() { 2993 ci.IdleTime = time.Now().Sub(cc.lastActive) 2994 } 2995 cc.mu.Unlock() 2996 2997 trace.GotConn(ci) 2998 } 2999 3000 func traceWroteHeaders(trace *httptrace.ClientTrace) { 3001 if trace != nil && trace.WroteHeaders != nil { 3002 trace.WroteHeaders() 3003 } 3004 } 3005 3006 func traceGot100Continue(trace *httptrace.ClientTrace) { 3007 if trace != nil && trace.Got100Continue != nil { 3008 trace.Got100Continue() 3009 } 3010 } 3011 3012 func traceWait100Continue(trace *httptrace.ClientTrace) { 3013 if trace != nil && trace.Wait100Continue != nil { 3014 trace.Wait100Continue() 3015 } 3016 } 3017 3018 func traceWroteRequest(trace *httptrace.ClientTrace, err error) { 3019 if trace != nil && trace.WroteRequest != nil { 3020 trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) 3021 } 3022 } 3023 3024 func traceFirstResponseByte(trace *httptrace.ClientTrace) { 3025 if trace != nil && trace.GotFirstResponseByte != nil { 3026 trace.GotFirstResponseByte() 3027 } 3028 }