golang.org/x/net@v0.25.1-0.20240516223405-c87a5b62e243/http2/transport.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Transport code. 6 7 package http2 8 9 import ( 10 "bufio" 11 "bytes" 12 "compress/gzip" 13 "context" 14 "crypto/rand" 15 "crypto/tls" 16 "errors" 17 "fmt" 18 "io" 19 "io/fs" 20 "log" 21 "math" 22 "math/bits" 23 mathrand "math/rand" 24 "net" 25 "net/http" 26 "net/http/httptrace" 27 "net/textproto" 28 "os" 29 "sort" 30 "strconv" 31 "strings" 32 "sync" 33 "sync/atomic" 34 "time" 35 36 "golang.org/x/net/http/httpguts" 37 "golang.org/x/net/http2/hpack" 38 "golang.org/x/net/idna" 39 ) 40 41 const ( 42 // transportDefaultConnFlow is how many connection-level flow control 43 // tokens we give the server at start-up, past the default 64k. 44 transportDefaultConnFlow = 1 << 30 45 46 // transportDefaultStreamFlow is how many stream-level flow 47 // control tokens we announce to the peer, and how many bytes 48 // we buffer per stream. 49 transportDefaultStreamFlow = 4 << 20 50 51 defaultUserAgent = "Go-http-client/2.0" 52 53 // initialMaxConcurrentStreams is a connections maxConcurrentStreams until 54 // it's received servers initial SETTINGS frame, which corresponds with the 55 // spec's minimum recommended value. 56 initialMaxConcurrentStreams = 100 57 58 // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams 59 // if the server doesn't include one in its initial SETTINGS frame. 60 defaultMaxConcurrentStreams = 1000 61 ) 62 63 // Transport is an HTTP/2 Transport. 64 // 65 // A Transport internally caches connections to servers. It is safe 66 // for concurrent use by multiple goroutines. 67 type Transport struct { 68 // DialTLSContext specifies an optional dial function with context for 69 // creating TLS connections for requests. 70 // 71 // If DialTLSContext and DialTLS is nil, tls.Dial is used. 72 // 73 // If the returned net.Conn has a ConnectionState method like tls.Conn, 74 // it will be used to set http.Response.TLS. 75 DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) 76 77 // DialTLS specifies an optional dial function for creating 78 // TLS connections for requests. 79 // 80 // If DialTLSContext and DialTLS is nil, tls.Dial is used. 81 // 82 // Deprecated: Use DialTLSContext instead, which allows the transport 83 // to cancel dials as soon as they are no longer needed. 84 // If both are set, DialTLSContext takes priority. 85 DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) 86 87 // TLSClientConfig specifies the TLS configuration to use with 88 // tls.Client. If nil, the default configuration is used. 89 TLSClientConfig *tls.Config 90 91 // ConnPool optionally specifies an alternate connection pool to use. 92 // If nil, the default is used. 93 ConnPool ClientConnPool 94 95 // DisableCompression, if true, prevents the Transport from 96 // requesting compression with an "Accept-Encoding: gzip" 97 // request header when the Request contains no existing 98 // Accept-Encoding value. If the Transport requests gzip on 99 // its own and gets a gzipped response, it's transparently 100 // decoded in the Response.Body. However, if the user 101 // explicitly requested gzip it is not automatically 102 // uncompressed. 103 DisableCompression bool 104 105 // AllowHTTP, if true, permits HTTP/2 requests using the insecure, 106 // plain-text "http" scheme. Note that this does not enable h2c support. 107 AllowHTTP bool 108 109 // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to 110 // send in the initial settings frame. It is how many bytes 111 // of response headers are allowed. Unlike the http2 spec, zero here 112 // means to use a default limit (currently 10MB). If you actually 113 // want to advertise an unlimited value to the peer, Transport 114 // interprets the highest possible value here (0xffffffff or 1<<32-1) 115 // to mean no limit. 116 MaxHeaderListSize uint32 117 118 // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the 119 // initial settings frame. It is the size in bytes of the largest frame 120 // payload that the sender is willing to receive. If 0, no setting is 121 // sent, and the value is provided by the peer, which should be 16384 122 // according to the spec: 123 // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. 124 // Values are bounded in the range 16k to 16M. 125 MaxReadFrameSize uint32 126 127 // MaxDecoderHeaderTableSize optionally specifies the http2 128 // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It 129 // informs the remote endpoint of the maximum size of the header compression 130 // table used to decode header blocks, in octets. If zero, the default value 131 // of 4096 is used. 132 MaxDecoderHeaderTableSize uint32 133 134 // MaxEncoderHeaderTableSize optionally specifies an upper limit for the 135 // header compression table used for encoding request headers. Received 136 // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, 137 // the default value of 4096 is used. 138 MaxEncoderHeaderTableSize uint32 139 140 // StrictMaxConcurrentStreams controls whether the server's 141 // SETTINGS_MAX_CONCURRENT_STREAMS should be respected 142 // globally. If false, new TCP connections are created to the 143 // server as needed to keep each under the per-connection 144 // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the 145 // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as 146 // a global limit and callers of RoundTrip block when needed, 147 // waiting for their turn. 148 StrictMaxConcurrentStreams bool 149 150 // IdleConnTimeout is the maximum amount of time an idle 151 // (keep-alive) connection will remain idle before closing 152 // itself. 153 // Zero means no limit. 154 IdleConnTimeout time.Duration 155 156 // ReadIdleTimeout is the timeout after which a health check using ping 157 // frame will be carried out if no frame is received on the connection. 158 // Note that a ping response will is considered a received frame, so if 159 // there is no other traffic on the connection, the health check will 160 // be performed every ReadIdleTimeout interval. 161 // If zero, no health check is performed. 162 ReadIdleTimeout time.Duration 163 164 // PingTimeout is the timeout after which the connection will be closed 165 // if a response to Ping is not received. 166 // Defaults to 15s. 167 PingTimeout time.Duration 168 169 // WriteByteTimeout is the timeout after which the connection will be 170 // closed no data can be written to it. The timeout begins when data is 171 // available to write, and is extended whenever any bytes are written. 172 WriteByteTimeout time.Duration 173 174 // CountError, if non-nil, is called on HTTP/2 transport errors. 175 // It's intended to increment a metric for monitoring, such 176 // as an expvar or Prometheus metric. 177 // The errType consists of only ASCII word characters. 178 CountError func(errType string) 179 180 // t1, if non-nil, is the standard library Transport using 181 // this transport. Its settings are used (but not its 182 // RoundTrip method, etc). 183 t1 *http.Transport 184 185 connPoolOnce sync.Once 186 connPoolOrDef ClientConnPool // non-nil version of ConnPool 187 188 syncHooks *testSyncHooks 189 } 190 191 func (t *Transport) maxHeaderListSize() uint32 { 192 if t.MaxHeaderListSize == 0 { 193 return 10 << 20 194 } 195 if t.MaxHeaderListSize == 0xffffffff { 196 return 0 197 } 198 return t.MaxHeaderListSize 199 } 200 201 func (t *Transport) maxFrameReadSize() uint32 { 202 if t.MaxReadFrameSize == 0 { 203 return 0 // use the default provided by the peer 204 } 205 if t.MaxReadFrameSize < minMaxFrameSize { 206 return minMaxFrameSize 207 } 208 if t.MaxReadFrameSize > maxFrameSize { 209 return maxFrameSize 210 } 211 return t.MaxReadFrameSize 212 } 213 214 func (t *Transport) disableCompression() bool { 215 return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) 216 } 217 218 func (t *Transport) pingTimeout() time.Duration { 219 if t.PingTimeout == 0 { 220 return 15 * time.Second 221 } 222 return t.PingTimeout 223 224 } 225 226 // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. 227 // It returns an error if t1 has already been HTTP/2-enabled. 228 // 229 // Use ConfigureTransports instead to configure the HTTP/2 Transport. 230 func ConfigureTransport(t1 *http.Transport) error { 231 _, err := ConfigureTransports(t1) 232 return err 233 } 234 235 // ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. 236 // It returns a new HTTP/2 Transport for further configuration. 237 // It returns an error if t1 has already been HTTP/2-enabled. 238 func ConfigureTransports(t1 *http.Transport) (*Transport, error) { 239 return configureTransports(t1) 240 } 241 242 func configureTransports(t1 *http.Transport) (*Transport, error) { 243 connPool := new(clientConnPool) 244 t2 := &Transport{ 245 ConnPool: noDialClientConnPool{connPool}, 246 t1: t1, 247 } 248 connPool.t = t2 249 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { 250 return nil, err 251 } 252 if t1.TLSClientConfig == nil { 253 t1.TLSClientConfig = new(tls.Config) 254 } 255 if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { 256 t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) 257 } 258 if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { 259 t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") 260 } 261 upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { 262 addr := authorityAddr("https", authority) 263 if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { 264 go c.Close() 265 return erringRoundTripper{err} 266 } else if !used { 267 // Turns out we don't need this c. 268 // For example, two goroutines made requests to the same host 269 // at the same time, both kicking off TCP dials. (since protocol 270 // was unknown) 271 go c.Close() 272 } 273 return t2 274 } 275 if m := t1.TLSNextProto; len(m) == 0 { 276 t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ 277 "h2": upgradeFn, 278 } 279 } else { 280 m["h2"] = upgradeFn 281 } 282 return t2, nil 283 } 284 285 func (t *Transport) connPool() ClientConnPool { 286 t.connPoolOnce.Do(t.initConnPool) 287 return t.connPoolOrDef 288 } 289 290 func (t *Transport) initConnPool() { 291 if t.ConnPool != nil { 292 t.connPoolOrDef = t.ConnPool 293 } else { 294 t.connPoolOrDef = &clientConnPool{t: t} 295 } 296 } 297 298 // ClientConn is the state of a single HTTP/2 client connection to an 299 // HTTP/2 server. 300 type ClientConn struct { 301 t *Transport 302 tconn net.Conn // usually *tls.Conn, except specialized impls 303 tlsState *tls.ConnectionState // nil only for specialized impls 304 reused uint32 // whether conn is being reused; atomic 305 singleUse bool // whether being used for a single http.Request 306 getConnCalled bool // used by clientConnPool 307 308 // readLoop goroutine fields: 309 readerDone chan struct{} // closed on error 310 readerErr error // set before readerDone is closed 311 312 idleTimeout time.Duration // or 0 for never 313 idleTimer timer 314 315 mu sync.Mutex // guards following 316 cond *sync.Cond // hold mu; broadcast on flow/closed changes 317 flow outflow // our conn-level flow control quota (cs.outflow is per stream) 318 inflow inflow // peer's conn-level flow control 319 doNotReuse bool // whether conn is marked to not be reused for any future requests 320 closing bool 321 closed bool 322 seenSettings bool // true if we've seen a settings frame, false otherwise 323 wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back 324 goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received 325 goAwayDebug string // goAway frame's debug data, retained as a string 326 streams map[uint32]*clientStream // client-initiated 327 streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip 328 nextStreamID uint32 329 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams 330 pings map[[8]byte]chan struct{} // in flight ping data to notification channel 331 br *bufio.Reader 332 lastActive time.Time 333 lastIdle time.Time // time last idle 334 // Settings from peer: (also guarded by wmu) 335 maxFrameSize uint32 336 maxConcurrentStreams uint32 337 peerMaxHeaderListSize uint64 338 peerMaxHeaderTableSize uint32 339 initialWindowSize uint32 340 341 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. 342 // Write to reqHeaderMu to lock it, read from it to unlock. 343 // Lock reqmu BEFORE mu or wmu. 344 reqHeaderMu chan struct{} 345 346 // wmu is held while writing. 347 // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. 348 // Only acquire both at the same time when changing peer settings. 349 wmu sync.Mutex 350 bw *bufio.Writer 351 fr *Framer 352 werr error // first write error that has occurred 353 hbuf bytes.Buffer // HPACK encoder writes into this 354 henc *hpack.Encoder 355 356 syncHooks *testSyncHooks // can be nil 357 } 358 359 // Hook points used for testing. 360 // Outside of tests, cc.syncHooks is nil and these all have minimal implementations. 361 // Inside tests, see the testSyncHooks function docs. 362 363 // goRun starts a new goroutine. 364 func (cc *ClientConn) goRun(f func()) { 365 if cc.syncHooks != nil { 366 cc.syncHooks.goRun(f) 367 return 368 } 369 go f() 370 } 371 372 // condBroadcast is cc.cond.Broadcast. 373 func (cc *ClientConn) condBroadcast() { 374 if cc.syncHooks != nil { 375 cc.syncHooks.condBroadcast(cc.cond) 376 } 377 cc.cond.Broadcast() 378 } 379 380 // condWait is cc.cond.Wait. 381 func (cc *ClientConn) condWait() { 382 if cc.syncHooks != nil { 383 cc.syncHooks.condWait(cc.cond) 384 } 385 cc.cond.Wait() 386 } 387 388 // newTimer creates a new time.Timer, or a synthetic timer in tests. 389 func (cc *ClientConn) newTimer(d time.Duration) timer { 390 if cc.syncHooks != nil { 391 return cc.syncHooks.newTimer(d) 392 } 393 return newTimeTimer(d) 394 } 395 396 // afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. 397 func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { 398 if cc.syncHooks != nil { 399 return cc.syncHooks.afterFunc(d, f) 400 } 401 return newTimeAfterFunc(d, f) 402 } 403 404 func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { 405 if cc.syncHooks != nil { 406 return cc.syncHooks.contextWithTimeout(ctx, d) 407 } 408 return context.WithTimeout(ctx, d) 409 } 410 411 // clientStream is the state for a single HTTP/2 stream. One of these 412 // is created for each Transport.RoundTrip call. 413 type clientStream struct { 414 cc *ClientConn 415 416 // Fields of Request that we may access even after the response body is closed. 417 ctx context.Context 418 reqCancel <-chan struct{} 419 420 trace *httptrace.ClientTrace // or nil 421 ID uint32 422 bufPipe pipe // buffered pipe with the flow-controlled response payload 423 requestedGzip bool 424 isHead bool 425 426 abortOnce sync.Once 427 abort chan struct{} // closed to signal stream should end immediately 428 abortErr error // set if abort is closed 429 430 peerClosed chan struct{} // closed when the peer sends an END_STREAM flag 431 donec chan struct{} // closed after the stream is in the closed state 432 on100 chan struct{} // buffered; written to if a 100 is received 433 434 respHeaderRecv chan struct{} // closed when headers are received 435 res *http.Response // set if respHeaderRecv is closed 436 437 flow outflow // guarded by cc.mu 438 inflow inflow // guarded by cc.mu 439 bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read 440 readErr error // sticky read error; owned by transportResponseBody.Read 441 442 reqBody io.ReadCloser 443 reqBodyContentLength int64 // -1 means unknown 444 reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done 445 446 // owned by writeRequest: 447 sentEndStream bool // sent an END_STREAM flag to the peer 448 sentHeaders bool 449 450 // owned by clientConnReadLoop: 451 firstByte bool // got the first response byte 452 pastHeaders bool // got first MetaHeadersFrame (actual headers) 453 pastTrailers bool // got optional second MetaHeadersFrame (trailers) 454 num1xx uint8 // number of 1xx responses seen 455 readClosed bool // peer sent an END_STREAM flag 456 readAborted bool // read loop reset the stream 457 458 trailer http.Header // accumulated trailers 459 resTrailer *http.Header // client's Response.Trailer 460 } 461 462 var got1xxFuncForTests func(int, textproto.MIMEHeader) error 463 464 // get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, 465 // if any. It returns nil if not set or if the Go version is too old. 466 func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { 467 if fn := got1xxFuncForTests; fn != nil { 468 return fn 469 } 470 return traceGot1xxResponseFunc(cs.trace) 471 } 472 473 func (cs *clientStream) abortStream(err error) { 474 cs.cc.mu.Lock() 475 defer cs.cc.mu.Unlock() 476 cs.abortStreamLocked(err) 477 } 478 479 func (cs *clientStream) abortStreamLocked(err error) { 480 cs.abortOnce.Do(func() { 481 cs.abortErr = err 482 close(cs.abort) 483 }) 484 if cs.reqBody != nil { 485 cs.closeReqBodyLocked() 486 } 487 // TODO(dneil): Clean up tests where cs.cc.cond is nil. 488 if cs.cc.cond != nil { 489 // Wake up writeRequestBody if it is waiting on flow control. 490 cs.cc.condBroadcast() 491 } 492 } 493 494 func (cs *clientStream) abortRequestBodyWrite() { 495 cc := cs.cc 496 cc.mu.Lock() 497 defer cc.mu.Unlock() 498 if cs.reqBody != nil && cs.reqBodyClosed == nil { 499 cs.closeReqBodyLocked() 500 cc.condBroadcast() 501 } 502 } 503 504 func (cs *clientStream) closeReqBodyLocked() { 505 if cs.reqBodyClosed != nil { 506 return 507 } 508 cs.reqBodyClosed = make(chan struct{}) 509 reqBodyClosed := cs.reqBodyClosed 510 cs.cc.goRun(func() { 511 cs.reqBody.Close() 512 close(reqBodyClosed) 513 }) 514 } 515 516 type stickyErrWriter struct { 517 conn net.Conn 518 timeout time.Duration 519 err *error 520 } 521 522 func (sew stickyErrWriter) Write(p []byte) (n int, err error) { 523 if *sew.err != nil { 524 return 0, *sew.err 525 } 526 for { 527 if sew.timeout != 0 { 528 sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) 529 } 530 nn, err := sew.conn.Write(p[n:]) 531 n += nn 532 if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { 533 // Keep extending the deadline so long as we're making progress. 534 continue 535 } 536 if sew.timeout != 0 { 537 sew.conn.SetWriteDeadline(time.Time{}) 538 } 539 *sew.err = err 540 return n, err 541 } 542 } 543 544 // noCachedConnError is the concrete type of ErrNoCachedConn, which 545 // needs to be detected by net/http regardless of whether it's its 546 // bundled version (in h2_bundle.go with a rewritten type name) or 547 // from a user's x/net/http2. As such, as it has a unique method name 548 // (IsHTTP2NoCachedConnError) that net/http sniffs for via func 549 // isNoCachedConnError. 550 type noCachedConnError struct{} 551 552 func (noCachedConnError) IsHTTP2NoCachedConnError() {} 553 func (noCachedConnError) Error() string { return "http2: no cached connection was available" } 554 555 // isNoCachedConnError reports whether err is of type noCachedConnError 556 // or its equivalent renamed type in net/http2's h2_bundle.go. Both types 557 // may coexist in the same running program. 558 func isNoCachedConnError(err error) bool { 559 _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) 560 return ok 561 } 562 563 var ErrNoCachedConn error = noCachedConnError{} 564 565 // RoundTripOpt are options for the Transport.RoundTripOpt method. 566 type RoundTripOpt struct { 567 // OnlyCachedConn controls whether RoundTripOpt may 568 // create a new TCP connection. If set true and 569 // no cached connection is available, RoundTripOpt 570 // will return ErrNoCachedConn. 571 OnlyCachedConn bool 572 } 573 574 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { 575 return t.RoundTripOpt(req, RoundTripOpt{}) 576 } 577 578 // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) 579 // and returns a host:port. The port 443 is added if needed. 580 func authorityAddr(scheme string, authority string) (addr string) { 581 host, port, err := net.SplitHostPort(authority) 582 if err != nil { // authority didn't have a port 583 host = authority 584 port = "" 585 } 586 if port == "" { // authority's port was empty 587 port = "443" 588 if scheme == "http" { 589 port = "80" 590 } 591 } 592 if a, err := idna.ToASCII(host); err == nil { 593 host = a 594 } 595 // IPv6 address literal, without a port: 596 if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { 597 return host + ":" + port 598 } 599 return net.JoinHostPort(host, port) 600 } 601 602 // RoundTripOpt is like RoundTrip, but takes options. 603 func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { 604 if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { 605 return nil, errors.New("http2: unsupported scheme") 606 } 607 608 addr := authorityAddr(req.URL.Scheme, req.URL.Host) 609 for retry := 0; ; retry++ { 610 cc, err := t.connPool().GetClientConn(req, addr) 611 if err != nil { 612 t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) 613 return nil, err 614 } 615 reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) 616 traceGotConn(req, cc, reused) 617 res, err := cc.RoundTrip(req) 618 if err != nil && retry <= 6 { 619 roundTripErr := err 620 if req, err = shouldRetryRequest(req, err); err == nil { 621 // After the first retry, do exponential backoff with 10% jitter. 622 if retry == 0 { 623 t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) 624 continue 625 } 626 backoff := float64(uint(1) << (uint(retry) - 1)) 627 backoff += backoff * (0.1 * mathrand.Float64()) 628 d := time.Second * time.Duration(backoff) 629 var tm timer 630 if t.syncHooks != nil { 631 tm = t.syncHooks.newTimer(d) 632 t.syncHooks.blockUntil(func() bool { 633 select { 634 case <-tm.C(): 635 case <-req.Context().Done(): 636 default: 637 return false 638 } 639 return true 640 }) 641 } else { 642 tm = newTimeTimer(d) 643 } 644 select { 645 case <-tm.C(): 646 t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) 647 continue 648 case <-req.Context().Done(): 649 tm.Stop() 650 err = req.Context().Err() 651 } 652 } 653 } 654 if err != nil { 655 t.vlogf("RoundTrip failure: %v", err) 656 return nil, err 657 } 658 return res, nil 659 } 660 } 661 662 // CloseIdleConnections closes any connections which were previously 663 // connected from previous requests but are now sitting idle. 664 // It does not interrupt any connections currently in use. 665 func (t *Transport) CloseIdleConnections() { 666 if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { 667 cp.closeIdleConnections() 668 } 669 } 670 671 var ( 672 errClientConnClosed = errors.New("http2: client conn is closed") 673 errClientConnUnusable = errors.New("http2: client conn not usable") 674 errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") 675 ) 676 677 // shouldRetryRequest is called by RoundTrip when a request fails to get 678 // response headers. It is always called with a non-nil error. 679 // It returns either a request to retry (either the same request, or a 680 // modified clone), or an error if the request can't be replayed. 681 func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { 682 if !canRetryError(err) { 683 return nil, err 684 } 685 // If the Body is nil (or http.NoBody), it's safe to reuse 686 // this request and its Body. 687 if req.Body == nil || req.Body == http.NoBody { 688 return req, nil 689 } 690 691 // If the request body can be reset back to its original 692 // state via the optional req.GetBody, do that. 693 if req.GetBody != nil { 694 body, err := req.GetBody() 695 if err != nil { 696 return nil, err 697 } 698 newReq := *req 699 newReq.Body = body 700 return &newReq, nil 701 } 702 703 // The Request.Body can't reset back to the beginning, but we 704 // don't seem to have started to read from it yet, so reuse 705 // the request directly. 706 if err == errClientConnUnusable { 707 return req, nil 708 } 709 710 return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) 711 } 712 713 func canRetryError(err error) bool { 714 if err == errClientConnUnusable || err == errClientConnGotGoAway { 715 return true 716 } 717 if se, ok := err.(StreamError); ok { 718 if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { 719 // See golang/go#47635, golang/go#42777 720 return true 721 } 722 return se.Code == ErrCodeRefusedStream 723 } 724 return false 725 } 726 727 func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { 728 if t.syncHooks != nil { 729 return t.newClientConn(nil, singleUse, t.syncHooks) 730 } 731 host, _, err := net.SplitHostPort(addr) 732 if err != nil { 733 return nil, err 734 } 735 tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) 736 if err != nil { 737 return nil, err 738 } 739 return t.newClientConn(tconn, singleUse, nil) 740 } 741 742 func (t *Transport) newTLSConfig(host string) *tls.Config { 743 cfg := new(tls.Config) 744 if t.TLSClientConfig != nil { 745 *cfg = *t.TLSClientConfig.Clone() 746 } 747 if !strSliceContains(cfg.NextProtos, NextProtoTLS) { 748 cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) 749 } 750 if cfg.ServerName == "" { 751 cfg.ServerName = host 752 } 753 return cfg 754 } 755 756 func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { 757 if t.DialTLSContext != nil { 758 return t.DialTLSContext(ctx, network, addr, tlsCfg) 759 } else if t.DialTLS != nil { 760 return t.DialTLS(network, addr, tlsCfg) 761 } 762 763 tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) 764 if err != nil { 765 return nil, err 766 } 767 state := tlsCn.ConnectionState() 768 if p := state.NegotiatedProtocol; p != NextProtoTLS { 769 return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) 770 } 771 if !state.NegotiatedProtocolIsMutual { 772 return nil, errors.New("http2: could not negotiate protocol mutually") 773 } 774 return tlsCn, nil 775 } 776 777 // disableKeepAlives reports whether connections should be closed as 778 // soon as possible after handling the first request. 779 func (t *Transport) disableKeepAlives() bool { 780 return t.t1 != nil && t.t1.DisableKeepAlives 781 } 782 783 func (t *Transport) expectContinueTimeout() time.Duration { 784 if t.t1 == nil { 785 return 0 786 } 787 return t.t1.ExpectContinueTimeout 788 } 789 790 func (t *Transport) maxDecoderHeaderTableSize() uint32 { 791 if v := t.MaxDecoderHeaderTableSize; v > 0 { 792 return v 793 } 794 return initialHeaderTableSize 795 } 796 797 func (t *Transport) maxEncoderHeaderTableSize() uint32 { 798 if v := t.MaxEncoderHeaderTableSize; v > 0 { 799 return v 800 } 801 return initialHeaderTableSize 802 } 803 804 func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { 805 return t.newClientConn(c, t.disableKeepAlives(), nil) 806 } 807 808 func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { 809 cc := &ClientConn{ 810 t: t, 811 tconn: c, 812 readerDone: make(chan struct{}), 813 nextStreamID: 1, 814 maxFrameSize: 16 << 10, // spec default 815 initialWindowSize: 65535, // spec default 816 maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. 817 peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. 818 streams: make(map[uint32]*clientStream), 819 singleUse: singleUse, 820 wantSettingsAck: true, 821 pings: make(map[[8]byte]chan struct{}), 822 reqHeaderMu: make(chan struct{}, 1), 823 syncHooks: hooks, 824 } 825 if hooks != nil { 826 hooks.newclientconn(cc) 827 c = cc.tconn 828 } 829 if VerboseLogs { 830 t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) 831 } 832 833 cc.cond = sync.NewCond(&cc.mu) 834 cc.flow.add(int32(initialWindowSize)) 835 836 // TODO: adjust this writer size to account for frame size + 837 // MTU + crypto/tls record padding. 838 cc.bw = bufio.NewWriter(stickyErrWriter{ 839 conn: c, 840 timeout: t.WriteByteTimeout, 841 err: &cc.werr, 842 }) 843 cc.br = bufio.NewReader(c) 844 cc.fr = NewFramer(cc.bw, cc.br) 845 if t.maxFrameReadSize() != 0 { 846 cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) 847 } 848 if t.CountError != nil { 849 cc.fr.countError = t.CountError 850 } 851 maxHeaderTableSize := t.maxDecoderHeaderTableSize() 852 cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) 853 cc.fr.MaxHeaderListSize = t.maxHeaderListSize() 854 855 cc.henc = hpack.NewEncoder(&cc.hbuf) 856 cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) 857 cc.peerMaxHeaderTableSize = initialHeaderTableSize 858 859 if t.AllowHTTP { 860 cc.nextStreamID = 3 861 } 862 863 if cs, ok := c.(connectionStater); ok { 864 state := cs.ConnectionState() 865 cc.tlsState = &state 866 } 867 868 initialSettings := []Setting{ 869 {ID: SettingEnablePush, Val: 0}, 870 {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, 871 } 872 if max := t.maxFrameReadSize(); max != 0 { 873 initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) 874 } 875 if max := t.maxHeaderListSize(); max != 0 { 876 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) 877 } 878 if maxHeaderTableSize != initialHeaderTableSize { 879 initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) 880 } 881 882 cc.bw.Write(clientPreface) 883 cc.fr.WriteSettings(initialSettings...) 884 cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) 885 cc.inflow.init(transportDefaultConnFlow + initialWindowSize) 886 cc.bw.Flush() 887 if cc.werr != nil { 888 cc.Close() 889 return nil, cc.werr 890 } 891 892 // Start the idle timer after the connection is fully initialized. 893 if d := t.idleConnTimeout(); d != 0 { 894 cc.idleTimeout = d 895 cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) 896 } 897 898 cc.goRun(cc.readLoop) 899 return cc, nil 900 } 901 902 func (cc *ClientConn) healthCheck() { 903 pingTimeout := cc.t.pingTimeout() 904 // We don't need to periodically ping in the health check, because the readLoop of ClientConn will 905 // trigger the healthCheck again if there is no frame received. 906 ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) 907 defer cancel() 908 cc.vlogf("http2: Transport sending health check") 909 err := cc.Ping(ctx) 910 if err != nil { 911 cc.vlogf("http2: Transport health check failure: %v", err) 912 cc.closeForLostPing() 913 } else { 914 cc.vlogf("http2: Transport health check success") 915 } 916 } 917 918 // SetDoNotReuse marks cc as not reusable for future HTTP requests. 919 func (cc *ClientConn) SetDoNotReuse() { 920 cc.mu.Lock() 921 defer cc.mu.Unlock() 922 cc.doNotReuse = true 923 } 924 925 func (cc *ClientConn) setGoAway(f *GoAwayFrame) { 926 cc.mu.Lock() 927 defer cc.mu.Unlock() 928 929 old := cc.goAway 930 cc.goAway = f 931 932 // Merge the previous and current GoAway error frames. 933 if cc.goAwayDebug == "" { 934 cc.goAwayDebug = string(f.DebugData()) 935 } 936 if old != nil && old.ErrCode != ErrCodeNo { 937 cc.goAway.ErrCode = old.ErrCode 938 } 939 last := f.LastStreamID 940 for streamID, cs := range cc.streams { 941 if streamID <= last { 942 // The server's GOAWAY indicates that it received this stream. 943 // It will either finish processing it, or close the connection 944 // without doing so. Either way, leave the stream alone for now. 945 continue 946 } 947 if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { 948 // Don't retry the first stream on a connection if we get a non-NO error. 949 // If the server is sending an error on a new connection, 950 // retrying the request on a new one probably isn't going to work. 951 cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) 952 } else { 953 // Aborting the stream with errClentConnGotGoAway indicates that 954 // the request should be retried on a new connection. 955 cs.abortStreamLocked(errClientConnGotGoAway) 956 } 957 } 958 } 959 960 // CanTakeNewRequest reports whether the connection can take a new request, 961 // meaning it has not been closed or received or sent a GOAWAY. 962 // 963 // If the caller is going to immediately make a new request on this 964 // connection, use ReserveNewRequest instead. 965 func (cc *ClientConn) CanTakeNewRequest() bool { 966 cc.mu.Lock() 967 defer cc.mu.Unlock() 968 return cc.canTakeNewRequestLocked() 969 } 970 971 // ReserveNewRequest is like CanTakeNewRequest but also reserves a 972 // concurrent stream in cc. The reservation is decremented on the 973 // next call to RoundTrip. 974 func (cc *ClientConn) ReserveNewRequest() bool { 975 cc.mu.Lock() 976 defer cc.mu.Unlock() 977 if st := cc.idleStateLocked(); !st.canTakeNewRequest { 978 return false 979 } 980 cc.streamsReserved++ 981 return true 982 } 983 984 // ClientConnState describes the state of a ClientConn. 985 type ClientConnState struct { 986 // Closed is whether the connection is closed. 987 Closed bool 988 989 // Closing is whether the connection is in the process of 990 // closing. It may be closing due to shutdown, being a 991 // single-use connection, being marked as DoNotReuse, or 992 // having received a GOAWAY frame. 993 Closing bool 994 995 // StreamsActive is how many streams are active. 996 StreamsActive int 997 998 // StreamsReserved is how many streams have been reserved via 999 // ClientConn.ReserveNewRequest. 1000 StreamsReserved int 1001 1002 // StreamsPending is how many requests have been sent in excess 1003 // of the peer's advertised MaxConcurrentStreams setting and 1004 // are waiting for other streams to complete. 1005 StreamsPending int 1006 1007 // MaxConcurrentStreams is how many concurrent streams the 1008 // peer advertised as acceptable. Zero means no SETTINGS 1009 // frame has been received yet. 1010 MaxConcurrentStreams uint32 1011 1012 // LastIdle, if non-zero, is when the connection last 1013 // transitioned to idle state. 1014 LastIdle time.Time 1015 } 1016 1017 // State returns a snapshot of cc's state. 1018 func (cc *ClientConn) State() ClientConnState { 1019 cc.wmu.Lock() 1020 maxConcurrent := cc.maxConcurrentStreams 1021 if !cc.seenSettings { 1022 maxConcurrent = 0 1023 } 1024 cc.wmu.Unlock() 1025 1026 cc.mu.Lock() 1027 defer cc.mu.Unlock() 1028 return ClientConnState{ 1029 Closed: cc.closed, 1030 Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, 1031 StreamsActive: len(cc.streams), 1032 StreamsReserved: cc.streamsReserved, 1033 StreamsPending: cc.pendingRequests, 1034 LastIdle: cc.lastIdle, 1035 MaxConcurrentStreams: maxConcurrent, 1036 } 1037 } 1038 1039 // clientConnIdleState describes the suitability of a client 1040 // connection to initiate a new RoundTrip request. 1041 type clientConnIdleState struct { 1042 canTakeNewRequest bool 1043 } 1044 1045 func (cc *ClientConn) idleState() clientConnIdleState { 1046 cc.mu.Lock() 1047 defer cc.mu.Unlock() 1048 return cc.idleStateLocked() 1049 } 1050 1051 func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { 1052 if cc.singleUse && cc.nextStreamID > 1 { 1053 return 1054 } 1055 var maxConcurrentOkay bool 1056 if cc.t.StrictMaxConcurrentStreams { 1057 // We'll tell the caller we can take a new request to 1058 // prevent the caller from dialing a new TCP 1059 // connection, but then we'll block later before 1060 // writing it. 1061 maxConcurrentOkay = true 1062 } else { 1063 maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) 1064 } 1065 1066 st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && 1067 !cc.doNotReuse && 1068 int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && 1069 !cc.tooIdleLocked() 1070 return 1071 } 1072 1073 func (cc *ClientConn) canTakeNewRequestLocked() bool { 1074 st := cc.idleStateLocked() 1075 return st.canTakeNewRequest 1076 } 1077 1078 // tooIdleLocked reports whether this connection has been been sitting idle 1079 // for too much wall time. 1080 func (cc *ClientConn) tooIdleLocked() bool { 1081 // The Round(0) strips the monontonic clock reading so the 1082 // times are compared based on their wall time. We don't want 1083 // to reuse a connection that's been sitting idle during 1084 // VM/laptop suspend if monotonic time was also frozen. 1085 return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout 1086 } 1087 1088 // onIdleTimeout is called from a time.AfterFunc goroutine. It will 1089 // only be called when we're idle, but because we're coming from a new 1090 // goroutine, there could be a new request coming in at the same time, 1091 // so this simply calls the synchronized closeIfIdle to shut down this 1092 // connection. The timer could just call closeIfIdle, but this is more 1093 // clear. 1094 func (cc *ClientConn) onIdleTimeout() { 1095 cc.closeIfIdle() 1096 } 1097 1098 func (cc *ClientConn) closeConn() { 1099 t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) 1100 defer t.Stop() 1101 cc.tconn.Close() 1102 } 1103 1104 // A tls.Conn.Close can hang for a long time if the peer is unresponsive. 1105 // Try to shut it down more aggressively. 1106 func (cc *ClientConn) forceCloseConn() { 1107 tc, ok := cc.tconn.(*tls.Conn) 1108 if !ok { 1109 return 1110 } 1111 if nc := tc.NetConn(); nc != nil { 1112 nc.Close() 1113 } 1114 } 1115 1116 func (cc *ClientConn) closeIfIdle() { 1117 cc.mu.Lock() 1118 if len(cc.streams) > 0 || cc.streamsReserved > 0 { 1119 cc.mu.Unlock() 1120 return 1121 } 1122 cc.closed = true 1123 nextID := cc.nextStreamID 1124 // TODO: do clients send GOAWAY too? maybe? Just Close: 1125 cc.mu.Unlock() 1126 1127 if VerboseLogs { 1128 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) 1129 } 1130 cc.closeConn() 1131 } 1132 1133 func (cc *ClientConn) isDoNotReuseAndIdle() bool { 1134 cc.mu.Lock() 1135 defer cc.mu.Unlock() 1136 return cc.doNotReuse && len(cc.streams) == 0 1137 } 1138 1139 var shutdownEnterWaitStateHook = func() {} 1140 1141 // Shutdown gracefully closes the client connection, waiting for running streams to complete. 1142 func (cc *ClientConn) Shutdown(ctx context.Context) error { 1143 if err := cc.sendGoAway(); err != nil { 1144 return err 1145 } 1146 // Wait for all in-flight streams to complete or connection to close 1147 done := make(chan struct{}) 1148 cancelled := false // guarded by cc.mu 1149 cc.goRun(func() { 1150 cc.mu.Lock() 1151 defer cc.mu.Unlock() 1152 for { 1153 if len(cc.streams) == 0 || cc.closed { 1154 cc.closed = true 1155 close(done) 1156 break 1157 } 1158 if cancelled { 1159 break 1160 } 1161 cc.condWait() 1162 } 1163 }) 1164 shutdownEnterWaitStateHook() 1165 select { 1166 case <-done: 1167 cc.closeConn() 1168 return nil 1169 case <-ctx.Done(): 1170 cc.mu.Lock() 1171 // Free the goroutine above 1172 cancelled = true 1173 cc.condBroadcast() 1174 cc.mu.Unlock() 1175 return ctx.Err() 1176 } 1177 } 1178 1179 func (cc *ClientConn) sendGoAway() error { 1180 cc.mu.Lock() 1181 closing := cc.closing 1182 cc.closing = true 1183 maxStreamID := cc.nextStreamID 1184 cc.mu.Unlock() 1185 if closing { 1186 // GOAWAY sent already 1187 return nil 1188 } 1189 1190 cc.wmu.Lock() 1191 defer cc.wmu.Unlock() 1192 // Send a graceful shutdown frame to server 1193 if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { 1194 return err 1195 } 1196 if err := cc.bw.Flush(); err != nil { 1197 return err 1198 } 1199 // Prevent new requests 1200 return nil 1201 } 1202 1203 // closes the client connection immediately. In-flight requests are interrupted. 1204 // err is sent to streams. 1205 func (cc *ClientConn) closeForError(err error) { 1206 cc.mu.Lock() 1207 cc.closed = true 1208 for _, cs := range cc.streams { 1209 cs.abortStreamLocked(err) 1210 } 1211 cc.condBroadcast() 1212 cc.mu.Unlock() 1213 cc.closeConn() 1214 } 1215 1216 // Close closes the client connection immediately. 1217 // 1218 // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. 1219 func (cc *ClientConn) Close() error { 1220 err := errors.New("http2: client connection force closed via ClientConn.Close") 1221 cc.closeForError(err) 1222 return nil 1223 } 1224 1225 // closes the client connection immediately. In-flight requests are interrupted. 1226 func (cc *ClientConn) closeForLostPing() { 1227 err := errors.New("http2: client connection lost") 1228 if f := cc.t.CountError; f != nil { 1229 f("conn_close_lost_ping") 1230 } 1231 cc.closeForError(err) 1232 } 1233 1234 // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not 1235 // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. 1236 var errRequestCanceled = errors.New("net/http: request canceled") 1237 1238 func commaSeparatedTrailers(req *http.Request) (string, error) { 1239 keys := make([]string, 0, len(req.Trailer)) 1240 for k := range req.Trailer { 1241 k = canonicalHeader(k) 1242 switch k { 1243 case "Transfer-Encoding", "Trailer", "Content-Length": 1244 return "", fmt.Errorf("invalid Trailer key %q", k) 1245 } 1246 keys = append(keys, k) 1247 } 1248 if len(keys) > 0 { 1249 sort.Strings(keys) 1250 return strings.Join(keys, ","), nil 1251 } 1252 return "", nil 1253 } 1254 1255 func (cc *ClientConn) responseHeaderTimeout() time.Duration { 1256 if cc.t.t1 != nil { 1257 return cc.t.t1.ResponseHeaderTimeout 1258 } 1259 // No way to do this (yet?) with just an http2.Transport. Probably 1260 // no need. Request.Cancel this is the new way. We only need to support 1261 // this for compatibility with the old http.Transport fields when 1262 // we're doing transparent http2. 1263 return 0 1264 } 1265 1266 // checkConnHeaders checks whether req has any invalid connection-level headers. 1267 // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. 1268 // Certain headers are special-cased as okay but not transmitted later. 1269 func checkConnHeaders(req *http.Request) error { 1270 if v := req.Header.Get("Upgrade"); v != "" { 1271 return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) 1272 } 1273 if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { 1274 return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) 1275 } 1276 if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { 1277 return fmt.Errorf("http2: invalid Connection request header: %q", vv) 1278 } 1279 return nil 1280 } 1281 1282 // actualContentLength returns a sanitized version of 1283 // req.ContentLength, where 0 actually means zero (not unknown) and -1 1284 // means unknown. 1285 func actualContentLength(req *http.Request) int64 { 1286 if req.Body == nil || req.Body == http.NoBody { 1287 return 0 1288 } 1289 if req.ContentLength != 0 { 1290 return req.ContentLength 1291 } 1292 return -1 1293 } 1294 1295 func (cc *ClientConn) decrStreamReservations() { 1296 cc.mu.Lock() 1297 defer cc.mu.Unlock() 1298 cc.decrStreamReservationsLocked() 1299 } 1300 1301 func (cc *ClientConn) decrStreamReservationsLocked() { 1302 if cc.streamsReserved > 0 { 1303 cc.streamsReserved-- 1304 } 1305 } 1306 1307 func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { 1308 return cc.roundTrip(req, nil) 1309 } 1310 1311 func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { 1312 ctx := req.Context() 1313 cs := &clientStream{ 1314 cc: cc, 1315 ctx: ctx, 1316 reqCancel: req.Cancel, 1317 isHead: req.Method == "HEAD", 1318 reqBody: req.Body, 1319 reqBodyContentLength: actualContentLength(req), 1320 trace: httptrace.ContextClientTrace(ctx), 1321 peerClosed: make(chan struct{}), 1322 abort: make(chan struct{}), 1323 respHeaderRecv: make(chan struct{}), 1324 donec: make(chan struct{}), 1325 } 1326 cc.goRun(func() { 1327 cs.doRequest(req) 1328 }) 1329 1330 waitDone := func() error { 1331 if cc.syncHooks != nil { 1332 cc.syncHooks.blockUntil(func() bool { 1333 select { 1334 case <-cs.donec: 1335 case <-ctx.Done(): 1336 case <-cs.reqCancel: 1337 default: 1338 return false 1339 } 1340 return true 1341 }) 1342 } 1343 select { 1344 case <-cs.donec: 1345 return nil 1346 case <-ctx.Done(): 1347 return ctx.Err() 1348 case <-cs.reqCancel: 1349 return errRequestCanceled 1350 } 1351 } 1352 1353 handleResponseHeaders := func() (*http.Response, error) { 1354 res := cs.res 1355 if res.StatusCode > 299 { 1356 // On error or status code 3xx, 4xx, 5xx, etc abort any 1357 // ongoing write, assuming that the server doesn't care 1358 // about our request body. If the server replied with 1xx or 1359 // 2xx, however, then assume the server DOES potentially 1360 // want our body (e.g. full-duplex streaming: 1361 // golang.org/issue/13444). If it turns out the server 1362 // doesn't, they'll RST_STREAM us soon enough. This is a 1363 // heuristic to avoid adding knobs to Transport. Hopefully 1364 // we can keep it. 1365 cs.abortRequestBodyWrite() 1366 } 1367 res.Request = req 1368 res.TLS = cc.tlsState 1369 if res.Body == noBody && actualContentLength(req) == 0 { 1370 // If there isn't a request or response body still being 1371 // written, then wait for the stream to be closed before 1372 // RoundTrip returns. 1373 if err := waitDone(); err != nil { 1374 return nil, err 1375 } 1376 } 1377 return res, nil 1378 } 1379 1380 cancelRequest := func(cs *clientStream, err error) error { 1381 cs.cc.mu.Lock() 1382 bodyClosed := cs.reqBodyClosed 1383 cs.cc.mu.Unlock() 1384 // Wait for the request body to be closed. 1385 // 1386 // If nothing closed the body before now, abortStreamLocked 1387 // will have started a goroutine to close it. 1388 // 1389 // Closing the body before returning avoids a race condition 1390 // with net/http checking its readTrackingBody to see if the 1391 // body was read from or closed. See golang/go#60041. 1392 // 1393 // The body is closed in a separate goroutine without the 1394 // connection mutex held, but dropping the mutex before waiting 1395 // will keep us from holding it indefinitely if the body 1396 // close is slow for some reason. 1397 if bodyClosed != nil { 1398 <-bodyClosed 1399 } 1400 return err 1401 } 1402 1403 if streamf != nil { 1404 streamf(cs) 1405 } 1406 1407 for { 1408 if cc.syncHooks != nil { 1409 cc.syncHooks.blockUntil(func() bool { 1410 select { 1411 case <-cs.respHeaderRecv: 1412 case <-cs.abort: 1413 case <-ctx.Done(): 1414 case <-cs.reqCancel: 1415 default: 1416 return false 1417 } 1418 return true 1419 }) 1420 } 1421 select { 1422 case <-cs.respHeaderRecv: 1423 return handleResponseHeaders() 1424 case <-cs.abort: 1425 select { 1426 case <-cs.respHeaderRecv: 1427 // If both cs.respHeaderRecv and cs.abort are signaling, 1428 // pick respHeaderRecv. The server probably wrote the 1429 // response and immediately reset the stream. 1430 // golang.org/issue/49645 1431 return handleResponseHeaders() 1432 default: 1433 waitDone() 1434 return nil, cs.abortErr 1435 } 1436 case <-ctx.Done(): 1437 err := ctx.Err() 1438 cs.abortStream(err) 1439 return nil, cancelRequest(cs, err) 1440 case <-cs.reqCancel: 1441 cs.abortStream(errRequestCanceled) 1442 return nil, cancelRequest(cs, errRequestCanceled) 1443 } 1444 } 1445 } 1446 1447 // doRequest runs for the duration of the request lifetime. 1448 // 1449 // It sends the request and performs post-request cleanup (closing Request.Body, etc.). 1450 func (cs *clientStream) doRequest(req *http.Request) { 1451 err := cs.writeRequest(req) 1452 cs.cleanupWriteRequest(err) 1453 } 1454 1455 // writeRequest sends a request. 1456 // 1457 // It returns nil after the request is written, the response read, 1458 // and the request stream is half-closed by the peer. 1459 // 1460 // It returns non-nil if the request ends otherwise. 1461 // If the returned error is StreamError, the error Code may be used in resetting the stream. 1462 func (cs *clientStream) writeRequest(req *http.Request) (err error) { 1463 cc := cs.cc 1464 ctx := cs.ctx 1465 1466 if err := checkConnHeaders(req); err != nil { 1467 return err 1468 } 1469 1470 // Acquire the new-request lock by writing to reqHeaderMu. 1471 // This lock guards the critical section covering allocating a new stream ID 1472 // (requires mu) and creating the stream (requires wmu). 1473 if cc.reqHeaderMu == nil { 1474 panic("RoundTrip on uninitialized ClientConn") // for tests 1475 } 1476 var newStreamHook func(*clientStream) 1477 if cc.syncHooks != nil { 1478 newStreamHook = cc.syncHooks.newstream 1479 cc.syncHooks.blockUntil(func() bool { 1480 select { 1481 case cc.reqHeaderMu <- struct{}{}: 1482 <-cc.reqHeaderMu 1483 case <-cs.reqCancel: 1484 case <-ctx.Done(): 1485 default: 1486 return false 1487 } 1488 return true 1489 }) 1490 } 1491 select { 1492 case cc.reqHeaderMu <- struct{}{}: 1493 case <-cs.reqCancel: 1494 return errRequestCanceled 1495 case <-ctx.Done(): 1496 return ctx.Err() 1497 } 1498 1499 cc.mu.Lock() 1500 if cc.idleTimer != nil { 1501 cc.idleTimer.Stop() 1502 } 1503 cc.decrStreamReservationsLocked() 1504 if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { 1505 cc.mu.Unlock() 1506 <-cc.reqHeaderMu 1507 return err 1508 } 1509 cc.addStreamLocked(cs) // assigns stream ID 1510 if isConnectionCloseRequest(req) { 1511 cc.doNotReuse = true 1512 } 1513 cc.mu.Unlock() 1514 1515 if newStreamHook != nil { 1516 newStreamHook(cs) 1517 } 1518 1519 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? 1520 if !cc.t.disableCompression() && 1521 req.Header.Get("Accept-Encoding") == "" && 1522 req.Header.Get("Range") == "" && 1523 !cs.isHead { 1524 // Request gzip only, not deflate. Deflate is ambiguous and 1525 // not as universally supported anyway. 1526 // See: https://zlib.net/zlib_faq.html#faq39 1527 // 1528 // Note that we don't request this for HEAD requests, 1529 // due to a bug in nginx: 1530 // http://trac.nginx.org/nginx/ticket/358 1531 // https://golang.org/issue/5522 1532 // 1533 // We don't request gzip if the request is for a range, since 1534 // auto-decoding a portion of a gzipped document will just fail 1535 // anyway. See https://golang.org/issue/8923 1536 cs.requestedGzip = true 1537 } 1538 1539 continueTimeout := cc.t.expectContinueTimeout() 1540 if continueTimeout != 0 { 1541 if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { 1542 continueTimeout = 0 1543 } else { 1544 cs.on100 = make(chan struct{}, 1) 1545 } 1546 } 1547 1548 // Past this point (where we send request headers), it is possible for 1549 // RoundTrip to return successfully. Since the RoundTrip contract permits 1550 // the caller to "mutate or reuse" the Request after closing the Response's Body, 1551 // we must take care when referencing the Request from here on. 1552 err = cs.encodeAndWriteHeaders(req) 1553 <-cc.reqHeaderMu 1554 if err != nil { 1555 return err 1556 } 1557 1558 hasBody := cs.reqBodyContentLength != 0 1559 if !hasBody { 1560 cs.sentEndStream = true 1561 } else { 1562 if continueTimeout != 0 { 1563 traceWait100Continue(cs.trace) 1564 timer := time.NewTimer(continueTimeout) 1565 select { 1566 case <-timer.C: 1567 err = nil 1568 case <-cs.on100: 1569 err = nil 1570 case <-cs.abort: 1571 err = cs.abortErr 1572 case <-ctx.Done(): 1573 err = ctx.Err() 1574 case <-cs.reqCancel: 1575 err = errRequestCanceled 1576 } 1577 timer.Stop() 1578 if err != nil { 1579 traceWroteRequest(cs.trace, err) 1580 return err 1581 } 1582 } 1583 1584 if err = cs.writeRequestBody(req); err != nil { 1585 if err != errStopReqBodyWrite { 1586 traceWroteRequest(cs.trace, err) 1587 return err 1588 } 1589 } else { 1590 cs.sentEndStream = true 1591 } 1592 } 1593 1594 traceWroteRequest(cs.trace, err) 1595 1596 var respHeaderTimer <-chan time.Time 1597 var respHeaderRecv chan struct{} 1598 if d := cc.responseHeaderTimeout(); d != 0 { 1599 timer := cc.newTimer(d) 1600 defer timer.Stop() 1601 respHeaderTimer = timer.C() 1602 respHeaderRecv = cs.respHeaderRecv 1603 } 1604 // Wait until the peer half-closes its end of the stream, 1605 // or until the request is aborted (via context, error, or otherwise), 1606 // whichever comes first. 1607 for { 1608 if cc.syncHooks != nil { 1609 cc.syncHooks.blockUntil(func() bool { 1610 select { 1611 case <-cs.peerClosed: 1612 case <-respHeaderTimer: 1613 case <-respHeaderRecv: 1614 case <-cs.abort: 1615 case <-ctx.Done(): 1616 case <-cs.reqCancel: 1617 default: 1618 return false 1619 } 1620 return true 1621 }) 1622 } 1623 select { 1624 case <-cs.peerClosed: 1625 return nil 1626 case <-respHeaderTimer: 1627 return errTimeout 1628 case <-respHeaderRecv: 1629 respHeaderRecv = nil 1630 respHeaderTimer = nil // keep waiting for END_STREAM 1631 case <-cs.abort: 1632 return cs.abortErr 1633 case <-ctx.Done(): 1634 return ctx.Err() 1635 case <-cs.reqCancel: 1636 return errRequestCanceled 1637 } 1638 } 1639 } 1640 1641 func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { 1642 cc := cs.cc 1643 ctx := cs.ctx 1644 1645 cc.wmu.Lock() 1646 defer cc.wmu.Unlock() 1647 1648 // If the request was canceled while waiting for cc.mu, just quit. 1649 select { 1650 case <-cs.abort: 1651 return cs.abortErr 1652 case <-ctx.Done(): 1653 return ctx.Err() 1654 case <-cs.reqCancel: 1655 return errRequestCanceled 1656 default: 1657 } 1658 1659 // Encode headers. 1660 // 1661 // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is 1662 // sent by writeRequestBody below, along with any Trailers, 1663 // again in form HEADERS{1}, CONTINUATION{0,}) 1664 trailers, err := commaSeparatedTrailers(req) 1665 if err != nil { 1666 return err 1667 } 1668 hasTrailers := trailers != "" 1669 contentLen := actualContentLength(req) 1670 hasBody := contentLen != 0 1671 hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) 1672 if err != nil { 1673 return err 1674 } 1675 1676 // Write the request. 1677 endStream := !hasBody && !hasTrailers 1678 cs.sentHeaders = true 1679 err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) 1680 traceWroteHeaders(cs.trace) 1681 return err 1682 } 1683 1684 // cleanupWriteRequest performs post-request tasks. 1685 // 1686 // If err (the result of writeRequest) is non-nil and the stream is not closed, 1687 // cleanupWriteRequest will send a reset to the peer. 1688 func (cs *clientStream) cleanupWriteRequest(err error) { 1689 cc := cs.cc 1690 1691 if cs.ID == 0 { 1692 // We were canceled before creating the stream, so return our reservation. 1693 cc.decrStreamReservations() 1694 } 1695 1696 // TODO: write h12Compare test showing whether 1697 // Request.Body is closed by the Transport, 1698 // and in multiple cases: server replies <=299 and >299 1699 // while still writing request body 1700 cc.mu.Lock() 1701 mustCloseBody := false 1702 if cs.reqBody != nil && cs.reqBodyClosed == nil { 1703 mustCloseBody = true 1704 cs.reqBodyClosed = make(chan struct{}) 1705 } 1706 bodyClosed := cs.reqBodyClosed 1707 cc.mu.Unlock() 1708 if mustCloseBody { 1709 cs.reqBody.Close() 1710 close(bodyClosed) 1711 } 1712 if bodyClosed != nil { 1713 <-bodyClosed 1714 } 1715 1716 if err != nil && cs.sentEndStream { 1717 // If the connection is closed immediately after the response is read, 1718 // we may be aborted before finishing up here. If the stream was closed 1719 // cleanly on both sides, there is no error. 1720 select { 1721 case <-cs.peerClosed: 1722 err = nil 1723 default: 1724 } 1725 } 1726 if err != nil { 1727 cs.abortStream(err) // possibly redundant, but harmless 1728 if cs.sentHeaders { 1729 if se, ok := err.(StreamError); ok { 1730 if se.Cause != errFromPeer { 1731 cc.writeStreamReset(cs.ID, se.Code, err) 1732 } 1733 } else { 1734 cc.writeStreamReset(cs.ID, ErrCodeCancel, err) 1735 } 1736 } 1737 cs.bufPipe.CloseWithError(err) // no-op if already closed 1738 } else { 1739 if cs.sentHeaders && !cs.sentEndStream { 1740 cc.writeStreamReset(cs.ID, ErrCodeNo, nil) 1741 } 1742 cs.bufPipe.CloseWithError(errRequestCanceled) 1743 } 1744 if cs.ID != 0 { 1745 cc.forgetStreamID(cs.ID) 1746 } 1747 1748 cc.wmu.Lock() 1749 werr := cc.werr 1750 cc.wmu.Unlock() 1751 if werr != nil { 1752 cc.Close() 1753 } 1754 1755 close(cs.donec) 1756 } 1757 1758 // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. 1759 // Must hold cc.mu. 1760 func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { 1761 for { 1762 cc.lastActive = time.Now() 1763 if cc.closed || !cc.canTakeNewRequestLocked() { 1764 return errClientConnUnusable 1765 } 1766 cc.lastIdle = time.Time{} 1767 if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { 1768 return nil 1769 } 1770 cc.pendingRequests++ 1771 cc.condWait() 1772 cc.pendingRequests-- 1773 select { 1774 case <-cs.abort: 1775 return cs.abortErr 1776 default: 1777 } 1778 } 1779 } 1780 1781 // requires cc.wmu be held 1782 func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { 1783 first := true // first frame written (HEADERS is first, then CONTINUATION) 1784 for len(hdrs) > 0 && cc.werr == nil { 1785 chunk := hdrs 1786 if len(chunk) > maxFrameSize { 1787 chunk = chunk[:maxFrameSize] 1788 } 1789 hdrs = hdrs[len(chunk):] 1790 endHeaders := len(hdrs) == 0 1791 if first { 1792 cc.fr.WriteHeaders(HeadersFrameParam{ 1793 StreamID: streamID, 1794 BlockFragment: chunk, 1795 EndStream: endStream, 1796 EndHeaders: endHeaders, 1797 }) 1798 first = false 1799 } else { 1800 cc.fr.WriteContinuation(streamID, endHeaders, chunk) 1801 } 1802 } 1803 cc.bw.Flush() 1804 return cc.werr 1805 } 1806 1807 // internal error values; they don't escape to callers 1808 var ( 1809 // abort request body write; don't send cancel 1810 errStopReqBodyWrite = errors.New("http2: aborting request body write") 1811 1812 // abort request body write, but send stream reset of cancel. 1813 errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") 1814 1815 errReqBodyTooLong = errors.New("http2: request body larger than specified content length") 1816 ) 1817 1818 // frameScratchBufferLen returns the length of a buffer to use for 1819 // outgoing request bodies to read/write to/from. 1820 // 1821 // It returns max(1, min(peer's advertised max frame size, 1822 // Request.ContentLength+1, 512KB)). 1823 func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { 1824 const max = 512 << 10 1825 n := int64(maxFrameSize) 1826 if n > max { 1827 n = max 1828 } 1829 if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { 1830 // Add an extra byte past the declared content-length to 1831 // give the caller's Request.Body io.Reader a chance to 1832 // give us more bytes than they declared, so we can catch it 1833 // early. 1834 n = cl + 1 1835 } 1836 if n < 1 { 1837 return 1 1838 } 1839 return int(n) // doesn't truncate; max is 512K 1840 } 1841 1842 // Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running 1843 // streaming requests using small frame sizes occupy large buffers initially allocated for prior 1844 // requests needing big buffers. The size ranges are as follows: 1845 // {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB], 1846 // {256 KB, 512 KB], {512 KB, infinity} 1847 // In practice, the maximum scratch buffer size should not exceed 512 KB due to 1848 // frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used. 1849 // It exists mainly as a safety measure, for potential future increases in max buffer size. 1850 var bufPools [7]sync.Pool // of *[]byte 1851 func bufPoolIndex(size int) int { 1852 if size <= 16384 { 1853 return 0 1854 } 1855 size -= 1 1856 bits := bits.Len(uint(size)) 1857 index := bits - 14 1858 if index >= len(bufPools) { 1859 return len(bufPools) - 1 1860 } 1861 return index 1862 } 1863 1864 func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { 1865 cc := cs.cc 1866 body := cs.reqBody 1867 sentEnd := false // whether we sent the final DATA frame w/ END_STREAM 1868 1869 hasTrailers := req.Trailer != nil 1870 remainLen := cs.reqBodyContentLength 1871 hasContentLen := remainLen != -1 1872 1873 cc.mu.Lock() 1874 maxFrameSize := int(cc.maxFrameSize) 1875 cc.mu.Unlock() 1876 1877 // Scratch buffer for reading into & writing from. 1878 scratchLen := cs.frameScratchBufferLen(maxFrameSize) 1879 var buf []byte 1880 index := bufPoolIndex(scratchLen) 1881 if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen { 1882 defer bufPools[index].Put(bp) 1883 buf = *bp 1884 } else { 1885 buf = make([]byte, scratchLen) 1886 defer bufPools[index].Put(&buf) 1887 } 1888 1889 var sawEOF bool 1890 for !sawEOF { 1891 n, err := body.Read(buf) 1892 if hasContentLen { 1893 remainLen -= int64(n) 1894 if remainLen == 0 && err == nil { 1895 // The request body's Content-Length was predeclared and 1896 // we just finished reading it all, but the underlying io.Reader 1897 // returned the final chunk with a nil error (which is one of 1898 // the two valid things a Reader can do at EOF). Because we'd prefer 1899 // to send the END_STREAM bit early, double-check that we're actually 1900 // at EOF. Subsequent reads should return (0, EOF) at this point. 1901 // If either value is different, we return an error in one of two ways below. 1902 var scratch [1]byte 1903 var n1 int 1904 n1, err = body.Read(scratch[:]) 1905 remainLen -= int64(n1) 1906 } 1907 if remainLen < 0 { 1908 err = errReqBodyTooLong 1909 return err 1910 } 1911 } 1912 if err != nil { 1913 cc.mu.Lock() 1914 bodyClosed := cs.reqBodyClosed != nil 1915 cc.mu.Unlock() 1916 switch { 1917 case bodyClosed: 1918 return errStopReqBodyWrite 1919 case err == io.EOF: 1920 sawEOF = true 1921 err = nil 1922 default: 1923 return err 1924 } 1925 } 1926 1927 remain := buf[:n] 1928 for len(remain) > 0 && err == nil { 1929 var allowed int32 1930 allowed, err = cs.awaitFlowControl(len(remain)) 1931 if err != nil { 1932 return err 1933 } 1934 cc.wmu.Lock() 1935 data := remain[:allowed] 1936 remain = remain[allowed:] 1937 sentEnd = sawEOF && len(remain) == 0 && !hasTrailers 1938 err = cc.fr.WriteData(cs.ID, sentEnd, data) 1939 if err == nil { 1940 // TODO(bradfitz): this flush is for latency, not bandwidth. 1941 // Most requests won't need this. Make this opt-in or 1942 // opt-out? Use some heuristic on the body type? Nagel-like 1943 // timers? Based on 'n'? Only last chunk of this for loop, 1944 // unless flow control tokens are low? For now, always. 1945 // If we change this, see comment below. 1946 err = cc.bw.Flush() 1947 } 1948 cc.wmu.Unlock() 1949 } 1950 if err != nil { 1951 return err 1952 } 1953 } 1954 1955 if sentEnd { 1956 // Already sent END_STREAM (which implies we have no 1957 // trailers) and flushed, because currently all 1958 // WriteData frames above get a flush. So we're done. 1959 return nil 1960 } 1961 1962 // Since the RoundTrip contract permits the caller to "mutate or reuse" 1963 // a request after the Response's Body is closed, verify that this hasn't 1964 // happened before accessing the trailers. 1965 cc.mu.Lock() 1966 trailer := req.Trailer 1967 err = cs.abortErr 1968 cc.mu.Unlock() 1969 if err != nil { 1970 return err 1971 } 1972 1973 cc.wmu.Lock() 1974 defer cc.wmu.Unlock() 1975 var trls []byte 1976 if len(trailer) > 0 { 1977 trls, err = cc.encodeTrailers(trailer) 1978 if err != nil { 1979 return err 1980 } 1981 } 1982 1983 // Two ways to send END_STREAM: either with trailers, or 1984 // with an empty DATA frame. 1985 if len(trls) > 0 { 1986 err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) 1987 } else { 1988 err = cc.fr.WriteData(cs.ID, true, nil) 1989 } 1990 if ferr := cc.bw.Flush(); ferr != nil && err == nil { 1991 err = ferr 1992 } 1993 return err 1994 } 1995 1996 // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow 1997 // control tokens from the server. 1998 // It returns either the non-zero number of tokens taken or an error 1999 // if the stream is dead. 2000 func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { 2001 cc := cs.cc 2002 ctx := cs.ctx 2003 cc.mu.Lock() 2004 defer cc.mu.Unlock() 2005 for { 2006 if cc.closed { 2007 return 0, errClientConnClosed 2008 } 2009 if cs.reqBodyClosed != nil { 2010 return 0, errStopReqBodyWrite 2011 } 2012 select { 2013 case <-cs.abort: 2014 return 0, cs.abortErr 2015 case <-ctx.Done(): 2016 return 0, ctx.Err() 2017 case <-cs.reqCancel: 2018 return 0, errRequestCanceled 2019 default: 2020 } 2021 if a := cs.flow.available(); a > 0 { 2022 take := a 2023 if int(take) > maxBytes { 2024 2025 take = int32(maxBytes) // can't truncate int; take is int32 2026 } 2027 if take > int32(cc.maxFrameSize) { 2028 take = int32(cc.maxFrameSize) 2029 } 2030 cs.flow.take(take) 2031 return take, nil 2032 } 2033 cc.condWait() 2034 } 2035 } 2036 2037 func validateHeaders(hdrs http.Header) string { 2038 for k, vv := range hdrs { 2039 if !httpguts.ValidHeaderFieldName(k) { 2040 return fmt.Sprintf("name %q", k) 2041 } 2042 for _, v := range vv { 2043 if !httpguts.ValidHeaderFieldValue(v) { 2044 // Don't include the value in the error, 2045 // because it may be sensitive. 2046 return fmt.Sprintf("value for header %q", k) 2047 } 2048 } 2049 } 2050 return "" 2051 } 2052 2053 var errNilRequestURL = errors.New("http2: Request.URI is nil") 2054 2055 // requires cc.wmu be held. 2056 func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { 2057 cc.hbuf.Reset() 2058 if req.URL == nil { 2059 return nil, errNilRequestURL 2060 } 2061 2062 host := req.Host 2063 if host == "" { 2064 host = req.URL.Host 2065 } 2066 host, err := httpguts.PunycodeHostPort(host) 2067 if err != nil { 2068 return nil, err 2069 } 2070 if !httpguts.ValidHostHeader(host) { 2071 return nil, errors.New("http2: invalid Host header") 2072 } 2073 2074 var path string 2075 if req.Method != "CONNECT" { 2076 path = req.URL.RequestURI() 2077 if !validPseudoPath(path) { 2078 orig := path 2079 path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) 2080 if !validPseudoPath(path) { 2081 if req.URL.Opaque != "" { 2082 return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) 2083 } else { 2084 return nil, fmt.Errorf("invalid request :path %q", orig) 2085 } 2086 } 2087 } 2088 } 2089 2090 // Check for any invalid headers+trailers and return an error before we 2091 // potentially pollute our hpack state. (We want to be able to 2092 // continue to reuse the hpack encoder for future requests) 2093 if err := validateHeaders(req.Header); err != "" { 2094 return nil, fmt.Errorf("invalid HTTP header %s", err) 2095 } 2096 if err := validateHeaders(req.Trailer); err != "" { 2097 return nil, fmt.Errorf("invalid HTTP trailer %s", err) 2098 } 2099 2100 enumerateHeaders := func(f func(name, value string)) { 2101 // 8.1.2.3 Request Pseudo-Header Fields 2102 // The :path pseudo-header field includes the path and query parts of the 2103 // target URI (the path-absolute production and optionally a '?' character 2104 // followed by the query production, see Sections 3.3 and 3.4 of 2105 // [RFC3986]). 2106 f(":authority", host) 2107 m := req.Method 2108 if m == "" { 2109 m = http.MethodGet 2110 } 2111 f(":method", m) 2112 if req.Method != "CONNECT" { 2113 f(":path", path) 2114 f(":scheme", req.URL.Scheme) 2115 } 2116 if trailers != "" { 2117 f("trailer", trailers) 2118 } 2119 2120 var didUA bool 2121 for k, vv := range req.Header { 2122 if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { 2123 // Host is :authority, already sent. 2124 // Content-Length is automatic, set below. 2125 continue 2126 } else if asciiEqualFold(k, "connection") || 2127 asciiEqualFold(k, "proxy-connection") || 2128 asciiEqualFold(k, "transfer-encoding") || 2129 asciiEqualFold(k, "upgrade") || 2130 asciiEqualFold(k, "keep-alive") { 2131 // Per 8.1.2.2 Connection-Specific Header 2132 // Fields, don't send connection-specific 2133 // fields. We have already checked if any 2134 // are error-worthy so just ignore the rest. 2135 continue 2136 } else if asciiEqualFold(k, "user-agent") { 2137 // Match Go's http1 behavior: at most one 2138 // User-Agent. If set to nil or empty string, 2139 // then omit it. Otherwise if not mentioned, 2140 // include the default (below). 2141 didUA = true 2142 if len(vv) < 1 { 2143 continue 2144 } 2145 vv = vv[:1] 2146 if vv[0] == "" { 2147 continue 2148 } 2149 } else if asciiEqualFold(k, "cookie") { 2150 // Per 8.1.2.5 To allow for better compression efficiency, the 2151 // Cookie header field MAY be split into separate header fields, 2152 // each with one or more cookie-pairs. 2153 for _, v := range vv { 2154 for { 2155 p := strings.IndexByte(v, ';') 2156 if p < 0 { 2157 break 2158 } 2159 f("cookie", v[:p]) 2160 p++ 2161 // strip space after semicolon if any. 2162 for p+1 <= len(v) && v[p] == ' ' { 2163 p++ 2164 } 2165 v = v[p:] 2166 } 2167 if len(v) > 0 { 2168 f("cookie", v) 2169 } 2170 } 2171 continue 2172 } 2173 2174 for _, v := range vv { 2175 f(k, v) 2176 } 2177 } 2178 if shouldSendReqContentLength(req.Method, contentLength) { 2179 f("content-length", strconv.FormatInt(contentLength, 10)) 2180 } 2181 if addGzipHeader { 2182 f("accept-encoding", "gzip") 2183 } 2184 if !didUA { 2185 f("user-agent", defaultUserAgent) 2186 } 2187 } 2188 2189 // Do a first pass over the headers counting bytes to ensure 2190 // we don't exceed cc.peerMaxHeaderListSize. This is done as a 2191 // separate pass before encoding the headers to prevent 2192 // modifying the hpack state. 2193 hlSize := uint64(0) 2194 enumerateHeaders(func(name, value string) { 2195 hf := hpack.HeaderField{Name: name, Value: value} 2196 hlSize += uint64(hf.Size()) 2197 }) 2198 2199 if hlSize > cc.peerMaxHeaderListSize { 2200 return nil, errRequestHeaderListSize 2201 } 2202 2203 trace := httptrace.ContextClientTrace(req.Context()) 2204 traceHeaders := traceHasWroteHeaderField(trace) 2205 2206 // Header list size is ok. Write the headers. 2207 enumerateHeaders(func(name, value string) { 2208 name, ascii := lowerHeader(name) 2209 if !ascii { 2210 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 2211 // field names have to be ASCII characters (just as in HTTP/1.x). 2212 return 2213 } 2214 cc.writeHeader(name, value) 2215 if traceHeaders { 2216 traceWroteHeaderField(trace, name, value) 2217 } 2218 }) 2219 2220 return cc.hbuf.Bytes(), nil 2221 } 2222 2223 // shouldSendReqContentLength reports whether the http2.Transport should send 2224 // a "content-length" request header. This logic is basically a copy of the net/http 2225 // transferWriter.shouldSendContentLength. 2226 // The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). 2227 // -1 means unknown. 2228 func shouldSendReqContentLength(method string, contentLength int64) bool { 2229 if contentLength > 0 { 2230 return true 2231 } 2232 if contentLength < 0 { 2233 return false 2234 } 2235 // For zero bodies, whether we send a content-length depends on the method. 2236 // It also kinda doesn't matter for http2 either way, with END_STREAM. 2237 switch method { 2238 case "POST", "PUT", "PATCH": 2239 return true 2240 default: 2241 return false 2242 } 2243 } 2244 2245 // requires cc.wmu be held. 2246 func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { 2247 cc.hbuf.Reset() 2248 2249 hlSize := uint64(0) 2250 for k, vv := range trailer { 2251 for _, v := range vv { 2252 hf := hpack.HeaderField{Name: k, Value: v} 2253 hlSize += uint64(hf.Size()) 2254 } 2255 } 2256 if hlSize > cc.peerMaxHeaderListSize { 2257 return nil, errRequestHeaderListSize 2258 } 2259 2260 for k, vv := range trailer { 2261 lowKey, ascii := lowerHeader(k) 2262 if !ascii { 2263 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 2264 // field names have to be ASCII characters (just as in HTTP/1.x). 2265 continue 2266 } 2267 // Transfer-Encoding, etc.. have already been filtered at the 2268 // start of RoundTrip 2269 for _, v := range vv { 2270 cc.writeHeader(lowKey, v) 2271 } 2272 } 2273 return cc.hbuf.Bytes(), nil 2274 } 2275 2276 func (cc *ClientConn) writeHeader(name, value string) { 2277 if VerboseLogs { 2278 log.Printf("http2: Transport encoding header %q = %q", name, value) 2279 } 2280 cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) 2281 } 2282 2283 type resAndError struct { 2284 _ incomparable 2285 res *http.Response 2286 err error 2287 } 2288 2289 // requires cc.mu be held. 2290 func (cc *ClientConn) addStreamLocked(cs *clientStream) { 2291 cs.flow.add(int32(cc.initialWindowSize)) 2292 cs.flow.setConnFlow(&cc.flow) 2293 cs.inflow.init(transportDefaultStreamFlow) 2294 cs.ID = cc.nextStreamID 2295 cc.nextStreamID += 2 2296 cc.streams[cs.ID] = cs 2297 if cs.ID == 0 { 2298 panic("assigned stream ID 0") 2299 } 2300 } 2301 2302 func (cc *ClientConn) forgetStreamID(id uint32) { 2303 cc.mu.Lock() 2304 slen := len(cc.streams) 2305 delete(cc.streams, id) 2306 if len(cc.streams) != slen-1 { 2307 panic("forgetting unknown stream id") 2308 } 2309 cc.lastActive = time.Now() 2310 if len(cc.streams) == 0 && cc.idleTimer != nil { 2311 cc.idleTimer.Reset(cc.idleTimeout) 2312 cc.lastIdle = time.Now() 2313 } 2314 // Wake up writeRequestBody via clientStream.awaitFlowControl and 2315 // wake up RoundTrip if there is a pending request. 2316 cc.condBroadcast() 2317 2318 closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil 2319 if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { 2320 if VerboseLogs { 2321 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) 2322 } 2323 cc.closed = true 2324 defer cc.closeConn() 2325 } 2326 2327 cc.mu.Unlock() 2328 } 2329 2330 // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. 2331 type clientConnReadLoop struct { 2332 _ incomparable 2333 cc *ClientConn 2334 } 2335 2336 // readLoop runs in its own goroutine and reads and dispatches frames. 2337 func (cc *ClientConn) readLoop() { 2338 rl := &clientConnReadLoop{cc: cc} 2339 defer rl.cleanup() 2340 cc.readerErr = rl.run() 2341 if ce, ok := cc.readerErr.(ConnectionError); ok { 2342 cc.wmu.Lock() 2343 cc.fr.WriteGoAway(0, ErrCode(ce), nil) 2344 cc.wmu.Unlock() 2345 } 2346 } 2347 2348 // GoAwayError is returned by the Transport when the server closes the 2349 // TCP connection after sending a GOAWAY frame. 2350 type GoAwayError struct { 2351 LastStreamID uint32 2352 ErrCode ErrCode 2353 DebugData string 2354 } 2355 2356 func (e GoAwayError) Error() string { 2357 return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", 2358 e.LastStreamID, e.ErrCode, e.DebugData) 2359 } 2360 2361 func isEOFOrNetReadError(err error) bool { 2362 if err == io.EOF { 2363 return true 2364 } 2365 ne, ok := err.(*net.OpError) 2366 return ok && ne.Op == "read" 2367 } 2368 2369 func (rl *clientConnReadLoop) cleanup() { 2370 cc := rl.cc 2371 cc.t.connPool().MarkDead(cc) 2372 defer cc.closeConn() 2373 defer close(cc.readerDone) 2374 2375 if cc.idleTimer != nil { 2376 cc.idleTimer.Stop() 2377 } 2378 2379 // Close any response bodies if the server closes prematurely. 2380 // TODO: also do this if we've written the headers but not 2381 // gotten a response yet. 2382 err := cc.readerErr 2383 cc.mu.Lock() 2384 if cc.goAway != nil && isEOFOrNetReadError(err) { 2385 err = GoAwayError{ 2386 LastStreamID: cc.goAway.LastStreamID, 2387 ErrCode: cc.goAway.ErrCode, 2388 DebugData: cc.goAwayDebug, 2389 } 2390 } else if err == io.EOF { 2391 err = io.ErrUnexpectedEOF 2392 } 2393 cc.closed = true 2394 2395 for _, cs := range cc.streams { 2396 select { 2397 case <-cs.peerClosed: 2398 // The server closed the stream before closing the conn, 2399 // so no need to interrupt it. 2400 default: 2401 cs.abortStreamLocked(err) 2402 } 2403 } 2404 cc.condBroadcast() 2405 cc.mu.Unlock() 2406 } 2407 2408 // countReadFrameError calls Transport.CountError with a string 2409 // representing err. 2410 func (cc *ClientConn) countReadFrameError(err error) { 2411 f := cc.t.CountError 2412 if f == nil || err == nil { 2413 return 2414 } 2415 if ce, ok := err.(ConnectionError); ok { 2416 errCode := ErrCode(ce) 2417 f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) 2418 return 2419 } 2420 if errors.Is(err, io.EOF) { 2421 f("read_frame_eof") 2422 return 2423 } 2424 if errors.Is(err, io.ErrUnexpectedEOF) { 2425 f("read_frame_unexpected_eof") 2426 return 2427 } 2428 if errors.Is(err, ErrFrameTooLarge) { 2429 f("read_frame_too_large") 2430 return 2431 } 2432 f("read_frame_other") 2433 } 2434 2435 func (rl *clientConnReadLoop) run() error { 2436 cc := rl.cc 2437 gotSettings := false 2438 readIdleTimeout := cc.t.ReadIdleTimeout 2439 var t timer 2440 if readIdleTimeout != 0 { 2441 t = cc.afterFunc(readIdleTimeout, cc.healthCheck) 2442 } 2443 for { 2444 f, err := cc.fr.ReadFrame() 2445 if t != nil { 2446 t.Reset(readIdleTimeout) 2447 } 2448 if err != nil { 2449 cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) 2450 } 2451 if se, ok := err.(StreamError); ok { 2452 if cs := rl.streamByID(se.StreamID); cs != nil { 2453 if se.Cause == nil { 2454 se.Cause = cc.fr.errDetail 2455 } 2456 rl.endStreamError(cs, se) 2457 } 2458 continue 2459 } else if err != nil { 2460 cc.countReadFrameError(err) 2461 return err 2462 } 2463 if VerboseLogs { 2464 cc.vlogf("http2: Transport received %s", summarizeFrame(f)) 2465 } 2466 if !gotSettings { 2467 if _, ok := f.(*SettingsFrame); !ok { 2468 cc.logf("protocol error: received %T before a SETTINGS frame", f) 2469 return ConnectionError(ErrCodeProtocol) 2470 } 2471 gotSettings = true 2472 } 2473 2474 switch f := f.(type) { 2475 case *MetaHeadersFrame: 2476 err = rl.processHeaders(f) 2477 case *DataFrame: 2478 err = rl.processData(f) 2479 case *GoAwayFrame: 2480 err = rl.processGoAway(f) 2481 case *RSTStreamFrame: 2482 err = rl.processResetStream(f) 2483 case *SettingsFrame: 2484 err = rl.processSettings(f) 2485 case *PushPromiseFrame: 2486 err = rl.processPushPromise(f) 2487 case *WindowUpdateFrame: 2488 err = rl.processWindowUpdate(f) 2489 case *PingFrame: 2490 err = rl.processPing(f) 2491 default: 2492 cc.logf("Transport: unhandled response frame type %T", f) 2493 } 2494 if err != nil { 2495 if VerboseLogs { 2496 cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) 2497 } 2498 return err 2499 } 2500 } 2501 } 2502 2503 func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { 2504 cs := rl.streamByID(f.StreamID) 2505 if cs == nil { 2506 // We'd get here if we canceled a request while the 2507 // server had its response still in flight. So if this 2508 // was just something we canceled, ignore it. 2509 return nil 2510 } 2511 if cs.readClosed { 2512 rl.endStreamError(cs, StreamError{ 2513 StreamID: f.StreamID, 2514 Code: ErrCodeProtocol, 2515 Cause: errors.New("protocol error: headers after END_STREAM"), 2516 }) 2517 return nil 2518 } 2519 if !cs.firstByte { 2520 if cs.trace != nil { 2521 // TODO(bradfitz): move first response byte earlier, 2522 // when we first read the 9 byte header, not waiting 2523 // until all the HEADERS+CONTINUATION frames have been 2524 // merged. This works for now. 2525 traceFirstResponseByte(cs.trace) 2526 } 2527 cs.firstByte = true 2528 } 2529 if !cs.pastHeaders { 2530 cs.pastHeaders = true 2531 } else { 2532 return rl.processTrailers(cs, f) 2533 } 2534 2535 res, err := rl.handleResponse(cs, f) 2536 if err != nil { 2537 if _, ok := err.(ConnectionError); ok { 2538 return err 2539 } 2540 // Any other error type is a stream error. 2541 rl.endStreamError(cs, StreamError{ 2542 StreamID: f.StreamID, 2543 Code: ErrCodeProtocol, 2544 Cause: err, 2545 }) 2546 return nil // return nil from process* funcs to keep conn alive 2547 } 2548 if res == nil { 2549 // (nil, nil) special case. See handleResponse docs. 2550 return nil 2551 } 2552 cs.resTrailer = &res.Trailer 2553 cs.res = res 2554 close(cs.respHeaderRecv) 2555 if f.StreamEnded() { 2556 rl.endStream(cs) 2557 } 2558 return nil 2559 } 2560 2561 // may return error types nil, or ConnectionError. Any other error value 2562 // is a StreamError of type ErrCodeProtocol. The returned error in that case 2563 // is the detail. 2564 // 2565 // As a special case, handleResponse may return (nil, nil) to skip the 2566 // frame (currently only used for 1xx responses). 2567 func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { 2568 if f.Truncated { 2569 return nil, errResponseHeaderListSize 2570 } 2571 2572 status := f.PseudoValue("status") 2573 if status == "" { 2574 return nil, errors.New("malformed response from server: missing status pseudo header") 2575 } 2576 statusCode, err := strconv.Atoi(status) 2577 if err != nil { 2578 return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") 2579 } 2580 2581 regularFields := f.RegularFields() 2582 strs := make([]string, len(regularFields)) 2583 header := make(http.Header, len(regularFields)) 2584 res := &http.Response{ 2585 Proto: "HTTP/2.0", 2586 ProtoMajor: 2, 2587 Header: header, 2588 StatusCode: statusCode, 2589 Status: status + " " + http.StatusText(statusCode), 2590 } 2591 for _, hf := range regularFields { 2592 key := canonicalHeader(hf.Name) 2593 if key == "Trailer" { 2594 t := res.Trailer 2595 if t == nil { 2596 t = make(http.Header) 2597 res.Trailer = t 2598 } 2599 foreachHeaderElement(hf.Value, func(v string) { 2600 t[canonicalHeader(v)] = nil 2601 }) 2602 } else { 2603 vv := header[key] 2604 if vv == nil && len(strs) > 0 { 2605 // More than likely this will be a single-element key. 2606 // Most headers aren't multi-valued. 2607 // Set the capacity on strs[0] to 1, so any future append 2608 // won't extend the slice into the other strings. 2609 vv, strs = strs[:1:1], strs[1:] 2610 vv[0] = hf.Value 2611 header[key] = vv 2612 } else { 2613 header[key] = append(vv, hf.Value) 2614 } 2615 } 2616 } 2617 2618 if statusCode >= 100 && statusCode <= 199 { 2619 if f.StreamEnded() { 2620 return nil, errors.New("1xx informational response with END_STREAM flag") 2621 } 2622 cs.num1xx++ 2623 const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http 2624 if cs.num1xx > max1xxResponses { 2625 return nil, errors.New("http2: too many 1xx informational responses") 2626 } 2627 if fn := cs.get1xxTraceFunc(); fn != nil { 2628 if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { 2629 return nil, err 2630 } 2631 } 2632 if statusCode == 100 { 2633 traceGot100Continue(cs.trace) 2634 select { 2635 case cs.on100 <- struct{}{}: 2636 default: 2637 } 2638 } 2639 cs.pastHeaders = false // do it all again 2640 return nil, nil 2641 } 2642 2643 res.ContentLength = -1 2644 if clens := res.Header["Content-Length"]; len(clens) == 1 { 2645 if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { 2646 res.ContentLength = int64(cl) 2647 } else { 2648 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2649 // more safe smuggling-wise to ignore. 2650 } 2651 } else if len(clens) > 1 { 2652 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2653 // more safe smuggling-wise to ignore. 2654 } else if f.StreamEnded() && !cs.isHead { 2655 res.ContentLength = 0 2656 } 2657 2658 if cs.isHead { 2659 res.Body = noBody 2660 return res, nil 2661 } 2662 2663 if f.StreamEnded() { 2664 if res.ContentLength > 0 { 2665 res.Body = missingBody{} 2666 } else { 2667 res.Body = noBody 2668 } 2669 return res, nil 2670 } 2671 2672 cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) 2673 cs.bytesRemain = res.ContentLength 2674 res.Body = transportResponseBody{cs} 2675 2676 if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") { 2677 res.Header.Del("Content-Encoding") 2678 res.Header.Del("Content-Length") 2679 res.ContentLength = -1 2680 res.Body = &gzipReader{body: res.Body} 2681 res.Uncompressed = true 2682 } 2683 return res, nil 2684 } 2685 2686 func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { 2687 if cs.pastTrailers { 2688 // Too many HEADERS frames for this stream. 2689 return ConnectionError(ErrCodeProtocol) 2690 } 2691 cs.pastTrailers = true 2692 if !f.StreamEnded() { 2693 // We expect that any headers for trailers also 2694 // has END_STREAM. 2695 return ConnectionError(ErrCodeProtocol) 2696 } 2697 if len(f.PseudoFields()) > 0 { 2698 // No pseudo header fields are defined for trailers. 2699 // TODO: ConnectionError might be overly harsh? Check. 2700 return ConnectionError(ErrCodeProtocol) 2701 } 2702 2703 trailer := make(http.Header) 2704 for _, hf := range f.RegularFields() { 2705 key := canonicalHeader(hf.Name) 2706 trailer[key] = append(trailer[key], hf.Value) 2707 } 2708 cs.trailer = trailer 2709 2710 rl.endStream(cs) 2711 return nil 2712 } 2713 2714 // transportResponseBody is the concrete type of Transport.RoundTrip's 2715 // Response.Body. It is an io.ReadCloser. 2716 type transportResponseBody struct { 2717 cs *clientStream 2718 } 2719 2720 func (b transportResponseBody) Read(p []byte) (n int, err error) { 2721 cs := b.cs 2722 cc := cs.cc 2723 2724 if cs.readErr != nil { 2725 return 0, cs.readErr 2726 } 2727 n, err = b.cs.bufPipe.Read(p) 2728 if cs.bytesRemain != -1 { 2729 if int64(n) > cs.bytesRemain { 2730 n = int(cs.bytesRemain) 2731 if err == nil { 2732 err = errors.New("net/http: server replied with more than declared Content-Length; truncated") 2733 cs.abortStream(err) 2734 } 2735 cs.readErr = err 2736 return int(cs.bytesRemain), err 2737 } 2738 cs.bytesRemain -= int64(n) 2739 if err == io.EOF && cs.bytesRemain > 0 { 2740 err = io.ErrUnexpectedEOF 2741 cs.readErr = err 2742 return n, err 2743 } 2744 } 2745 if n == 0 { 2746 // No flow control tokens to send back. 2747 return 2748 } 2749 2750 cc.mu.Lock() 2751 connAdd := cc.inflow.add(n) 2752 var streamAdd int32 2753 if err == nil { // No need to refresh if the stream is over or failed. 2754 streamAdd = cs.inflow.add(n) 2755 } 2756 cc.mu.Unlock() 2757 2758 if connAdd != 0 || streamAdd != 0 { 2759 cc.wmu.Lock() 2760 defer cc.wmu.Unlock() 2761 if connAdd != 0 { 2762 cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) 2763 } 2764 if streamAdd != 0 { 2765 cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) 2766 } 2767 cc.bw.Flush() 2768 } 2769 return 2770 } 2771 2772 var errClosedResponseBody = errors.New("http2: response body closed") 2773 2774 func (b transportResponseBody) Close() error { 2775 cs := b.cs 2776 cc := cs.cc 2777 2778 cs.bufPipe.BreakWithError(errClosedResponseBody) 2779 cs.abortStream(errClosedResponseBody) 2780 2781 unread := cs.bufPipe.Len() 2782 if unread > 0 { 2783 cc.mu.Lock() 2784 // Return connection-level flow control. 2785 connAdd := cc.inflow.add(unread) 2786 cc.mu.Unlock() 2787 2788 // TODO(dneil): Acquiring this mutex can block indefinitely. 2789 // Move flow control return to a goroutine? 2790 cc.wmu.Lock() 2791 // Return connection-level flow control. 2792 if connAdd > 0 { 2793 cc.fr.WriteWindowUpdate(0, uint32(connAdd)) 2794 } 2795 cc.bw.Flush() 2796 cc.wmu.Unlock() 2797 } 2798 2799 select { 2800 case <-cs.donec: 2801 case <-cs.ctx.Done(): 2802 // See golang/go#49366: The net/http package can cancel the 2803 // request context after the response body is fully read. 2804 // Don't treat this as an error. 2805 return nil 2806 case <-cs.reqCancel: 2807 return errRequestCanceled 2808 } 2809 return nil 2810 } 2811 2812 func (rl *clientConnReadLoop) processData(f *DataFrame) error { 2813 cc := rl.cc 2814 cs := rl.streamByID(f.StreamID) 2815 data := f.Data() 2816 if cs == nil { 2817 cc.mu.Lock() 2818 neverSent := cc.nextStreamID 2819 cc.mu.Unlock() 2820 if f.StreamID >= neverSent { 2821 // We never asked for this. 2822 cc.logf("http2: Transport received unsolicited DATA frame; closing connection") 2823 return ConnectionError(ErrCodeProtocol) 2824 } 2825 // We probably did ask for this, but canceled. Just ignore it. 2826 // TODO: be stricter here? only silently ignore things which 2827 // we canceled, but not things which were closed normally 2828 // by the peer? Tough without accumulating too much state. 2829 2830 // But at least return their flow control: 2831 if f.Length > 0 { 2832 cc.mu.Lock() 2833 ok := cc.inflow.take(f.Length) 2834 connAdd := cc.inflow.add(int(f.Length)) 2835 cc.mu.Unlock() 2836 if !ok { 2837 return ConnectionError(ErrCodeFlowControl) 2838 } 2839 if connAdd > 0 { 2840 cc.wmu.Lock() 2841 cc.fr.WriteWindowUpdate(0, uint32(connAdd)) 2842 cc.bw.Flush() 2843 cc.wmu.Unlock() 2844 } 2845 } 2846 return nil 2847 } 2848 if cs.readClosed { 2849 cc.logf("protocol error: received DATA after END_STREAM") 2850 rl.endStreamError(cs, StreamError{ 2851 StreamID: f.StreamID, 2852 Code: ErrCodeProtocol, 2853 }) 2854 return nil 2855 } 2856 if !cs.pastHeaders { 2857 cc.logf("protocol error: received DATA before a HEADERS frame") 2858 rl.endStreamError(cs, StreamError{ 2859 StreamID: f.StreamID, 2860 Code: ErrCodeProtocol, 2861 }) 2862 return nil 2863 } 2864 if f.Length > 0 { 2865 if cs.isHead && len(data) > 0 { 2866 cc.logf("protocol error: received DATA on a HEAD request") 2867 rl.endStreamError(cs, StreamError{ 2868 StreamID: f.StreamID, 2869 Code: ErrCodeProtocol, 2870 }) 2871 return nil 2872 } 2873 // Check connection-level flow control. 2874 cc.mu.Lock() 2875 if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { 2876 cc.mu.Unlock() 2877 return ConnectionError(ErrCodeFlowControl) 2878 } 2879 // Return any padded flow control now, since we won't 2880 // refund it later on body reads. 2881 var refund int 2882 if pad := int(f.Length) - len(data); pad > 0 { 2883 refund += pad 2884 } 2885 2886 didReset := false 2887 var err error 2888 if len(data) > 0 { 2889 if _, err = cs.bufPipe.Write(data); err != nil { 2890 // Return len(data) now if the stream is already closed, 2891 // since data will never be read. 2892 didReset = true 2893 refund += len(data) 2894 } 2895 } 2896 2897 sendConn := cc.inflow.add(refund) 2898 var sendStream int32 2899 if !didReset { 2900 sendStream = cs.inflow.add(refund) 2901 } 2902 cc.mu.Unlock() 2903 2904 if sendConn > 0 || sendStream > 0 { 2905 cc.wmu.Lock() 2906 if sendConn > 0 { 2907 cc.fr.WriteWindowUpdate(0, uint32(sendConn)) 2908 } 2909 if sendStream > 0 { 2910 cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) 2911 } 2912 cc.bw.Flush() 2913 cc.wmu.Unlock() 2914 } 2915 2916 if err != nil { 2917 rl.endStreamError(cs, err) 2918 return nil 2919 } 2920 } 2921 2922 if f.StreamEnded() { 2923 rl.endStream(cs) 2924 } 2925 return nil 2926 } 2927 2928 func (rl *clientConnReadLoop) endStream(cs *clientStream) { 2929 // TODO: check that any declared content-length matches, like 2930 // server.go's (*stream).endStream method. 2931 if !cs.readClosed { 2932 cs.readClosed = true 2933 // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a 2934 // race condition: The caller can read io.EOF from Response.Body 2935 // and close the body before we close cs.peerClosed, causing 2936 // cleanupWriteRequest to send a RST_STREAM. 2937 rl.cc.mu.Lock() 2938 defer rl.cc.mu.Unlock() 2939 cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) 2940 close(cs.peerClosed) 2941 } 2942 } 2943 2944 func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { 2945 cs.readAborted = true 2946 cs.abortStream(err) 2947 } 2948 2949 func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { 2950 rl.cc.mu.Lock() 2951 defer rl.cc.mu.Unlock() 2952 cs := rl.cc.streams[id] 2953 if cs != nil && !cs.readAborted { 2954 return cs 2955 } 2956 return nil 2957 } 2958 2959 func (cs *clientStream) copyTrailers() { 2960 for k, vv := range cs.trailer { 2961 t := cs.resTrailer 2962 if *t == nil { 2963 *t = make(http.Header) 2964 } 2965 (*t)[k] = vv 2966 } 2967 } 2968 2969 func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { 2970 cc := rl.cc 2971 cc.t.connPool().MarkDead(cc) 2972 if f.ErrCode != 0 { 2973 // TODO: deal with GOAWAY more. particularly the error code 2974 cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) 2975 if fn := cc.t.CountError; fn != nil { 2976 fn("recv_goaway_" + f.ErrCode.stringToken()) 2977 } 2978 } 2979 cc.setGoAway(f) 2980 return nil 2981 } 2982 2983 func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { 2984 cc := rl.cc 2985 // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. 2986 // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. 2987 cc.wmu.Lock() 2988 defer cc.wmu.Unlock() 2989 2990 if err := rl.processSettingsNoWrite(f); err != nil { 2991 return err 2992 } 2993 if !f.IsAck() { 2994 cc.fr.WriteSettingsAck() 2995 cc.bw.Flush() 2996 } 2997 return nil 2998 } 2999 3000 func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { 3001 cc := rl.cc 3002 cc.mu.Lock() 3003 defer cc.mu.Unlock() 3004 3005 if f.IsAck() { 3006 if cc.wantSettingsAck { 3007 cc.wantSettingsAck = false 3008 return nil 3009 } 3010 return ConnectionError(ErrCodeProtocol) 3011 } 3012 3013 var seenMaxConcurrentStreams bool 3014 err := f.ForeachSetting(func(s Setting) error { 3015 switch s.ID { 3016 case SettingMaxFrameSize: 3017 cc.maxFrameSize = s.Val 3018 case SettingMaxConcurrentStreams: 3019 cc.maxConcurrentStreams = s.Val 3020 seenMaxConcurrentStreams = true 3021 case SettingMaxHeaderListSize: 3022 cc.peerMaxHeaderListSize = uint64(s.Val) 3023 case SettingInitialWindowSize: 3024 // Values above the maximum flow-control 3025 // window size of 2^31-1 MUST be treated as a 3026 // connection error (Section 5.4.1) of type 3027 // FLOW_CONTROL_ERROR. 3028 if s.Val > math.MaxInt32 { 3029 return ConnectionError(ErrCodeFlowControl) 3030 } 3031 3032 // Adjust flow control of currently-open 3033 // frames by the difference of the old initial 3034 // window size and this one. 3035 delta := int32(s.Val) - int32(cc.initialWindowSize) 3036 for _, cs := range cc.streams { 3037 cs.flow.add(delta) 3038 } 3039 cc.condBroadcast() 3040 3041 cc.initialWindowSize = s.Val 3042 case SettingHeaderTableSize: 3043 cc.henc.SetMaxDynamicTableSize(s.Val) 3044 cc.peerMaxHeaderTableSize = s.Val 3045 default: 3046 cc.vlogf("Unhandled Setting: %v", s) 3047 } 3048 return nil 3049 }) 3050 if err != nil { 3051 return err 3052 } 3053 3054 if !cc.seenSettings { 3055 if !seenMaxConcurrentStreams { 3056 // This was the servers initial SETTINGS frame and it 3057 // didn't contain a MAX_CONCURRENT_STREAMS field so 3058 // increase the number of concurrent streams this 3059 // connection can establish to our default. 3060 cc.maxConcurrentStreams = defaultMaxConcurrentStreams 3061 } 3062 cc.seenSettings = true 3063 } 3064 3065 return nil 3066 } 3067 3068 func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { 3069 cc := rl.cc 3070 cs := rl.streamByID(f.StreamID) 3071 if f.StreamID != 0 && cs == nil { 3072 return nil 3073 } 3074 3075 cc.mu.Lock() 3076 defer cc.mu.Unlock() 3077 3078 fl := &cc.flow 3079 if cs != nil { 3080 fl = &cs.flow 3081 } 3082 if !fl.add(int32(f.Increment)) { 3083 // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR 3084 if cs != nil { 3085 rl.endStreamError(cs, StreamError{ 3086 StreamID: f.StreamID, 3087 Code: ErrCodeFlowControl, 3088 }) 3089 return nil 3090 } 3091 3092 return ConnectionError(ErrCodeFlowControl) 3093 } 3094 cc.condBroadcast() 3095 return nil 3096 } 3097 3098 func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { 3099 cs := rl.streamByID(f.StreamID) 3100 if cs == nil { 3101 // TODO: return error if server tries to RST_STREAM an idle stream 3102 return nil 3103 } 3104 serr := streamError(cs.ID, f.ErrCode) 3105 serr.Cause = errFromPeer 3106 if f.ErrCode == ErrCodeProtocol { 3107 rl.cc.SetDoNotReuse() 3108 } 3109 if fn := cs.cc.t.CountError; fn != nil { 3110 fn("recv_rststream_" + f.ErrCode.stringToken()) 3111 } 3112 cs.abortStream(serr) 3113 3114 cs.bufPipe.CloseWithError(serr) 3115 return nil 3116 } 3117 3118 // Ping sends a PING frame to the server and waits for the ack. 3119 func (cc *ClientConn) Ping(ctx context.Context) error { 3120 c := make(chan struct{}) 3121 // Generate a random payload 3122 var p [8]byte 3123 for { 3124 if _, err := rand.Read(p[:]); err != nil { 3125 return err 3126 } 3127 cc.mu.Lock() 3128 // check for dup before insert 3129 if _, found := cc.pings[p]; !found { 3130 cc.pings[p] = c 3131 cc.mu.Unlock() 3132 break 3133 } 3134 cc.mu.Unlock() 3135 } 3136 var pingError error 3137 errc := make(chan struct{}) 3138 cc.goRun(func() { 3139 cc.wmu.Lock() 3140 defer cc.wmu.Unlock() 3141 if pingError = cc.fr.WritePing(false, p); pingError != nil { 3142 close(errc) 3143 return 3144 } 3145 if pingError = cc.bw.Flush(); pingError != nil { 3146 close(errc) 3147 return 3148 } 3149 }) 3150 if cc.syncHooks != nil { 3151 cc.syncHooks.blockUntil(func() bool { 3152 select { 3153 case <-c: 3154 case <-errc: 3155 case <-ctx.Done(): 3156 case <-cc.readerDone: 3157 default: 3158 return false 3159 } 3160 return true 3161 }) 3162 } 3163 select { 3164 case <-c: 3165 return nil 3166 case <-errc: 3167 return pingError 3168 case <-ctx.Done(): 3169 return ctx.Err() 3170 case <-cc.readerDone: 3171 // connection closed 3172 return cc.readerErr 3173 } 3174 } 3175 3176 func (rl *clientConnReadLoop) processPing(f *PingFrame) error { 3177 if f.IsAck() { 3178 cc := rl.cc 3179 cc.mu.Lock() 3180 defer cc.mu.Unlock() 3181 // If ack, notify listener if any 3182 if c, ok := cc.pings[f.Data]; ok { 3183 close(c) 3184 delete(cc.pings, f.Data) 3185 } 3186 return nil 3187 } 3188 cc := rl.cc 3189 cc.wmu.Lock() 3190 defer cc.wmu.Unlock() 3191 if err := cc.fr.WritePing(true, f.Data); err != nil { 3192 return err 3193 } 3194 return cc.bw.Flush() 3195 } 3196 3197 func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { 3198 // We told the peer we don't want them. 3199 // Spec says: 3200 // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH 3201 // setting of the peer endpoint is set to 0. An endpoint that 3202 // has set this setting and has received acknowledgement MUST 3203 // treat the receipt of a PUSH_PROMISE frame as a connection 3204 // error (Section 5.4.1) of type PROTOCOL_ERROR." 3205 return ConnectionError(ErrCodeProtocol) 3206 } 3207 3208 func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { 3209 // TODO: map err to more interesting error codes, once the 3210 // HTTP community comes up with some. But currently for 3211 // RST_STREAM there's no equivalent to GOAWAY frame's debug 3212 // data, and the error codes are all pretty vague ("cancel"). 3213 cc.wmu.Lock() 3214 cc.fr.WriteRSTStream(streamID, code) 3215 cc.bw.Flush() 3216 cc.wmu.Unlock() 3217 } 3218 3219 var ( 3220 errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") 3221 errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") 3222 ) 3223 3224 func (cc *ClientConn) logf(format string, args ...interface{}) { 3225 cc.t.logf(format, args...) 3226 } 3227 3228 func (cc *ClientConn) vlogf(format string, args ...interface{}) { 3229 cc.t.vlogf(format, args...) 3230 } 3231 3232 func (t *Transport) vlogf(format string, args ...interface{}) { 3233 if VerboseLogs { 3234 t.logf(format, args...) 3235 } 3236 } 3237 3238 func (t *Transport) logf(format string, args ...interface{}) { 3239 log.Printf(format, args...) 3240 } 3241 3242 var noBody io.ReadCloser = noBodyReader{} 3243 3244 type noBodyReader struct{} 3245 3246 func (noBodyReader) Close() error { return nil } 3247 func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } 3248 3249 type missingBody struct{} 3250 3251 func (missingBody) Close() error { return nil } 3252 func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } 3253 3254 func strSliceContains(ss []string, s string) bool { 3255 for _, v := range ss { 3256 if v == s { 3257 return true 3258 } 3259 } 3260 return false 3261 } 3262 3263 type erringRoundTripper struct{ err error } 3264 3265 func (rt erringRoundTripper) RoundTripErr() error { return rt.err } 3266 func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } 3267 3268 // gzipReader wraps a response body so it can lazily 3269 // call gzip.NewReader on the first call to Read 3270 type gzipReader struct { 3271 _ incomparable 3272 body io.ReadCloser // underlying Response.Body 3273 zr *gzip.Reader // lazily-initialized gzip reader 3274 zerr error // sticky error 3275 } 3276 3277 func (gz *gzipReader) Read(p []byte) (n int, err error) { 3278 if gz.zerr != nil { 3279 return 0, gz.zerr 3280 } 3281 if gz.zr == nil { 3282 gz.zr, err = gzip.NewReader(gz.body) 3283 if err != nil { 3284 gz.zerr = err 3285 return 0, err 3286 } 3287 } 3288 return gz.zr.Read(p) 3289 } 3290 3291 func (gz *gzipReader) Close() error { 3292 if err := gz.body.Close(); err != nil { 3293 return err 3294 } 3295 gz.zerr = fs.ErrClosed 3296 return nil 3297 } 3298 3299 type errorReader struct{ err error } 3300 3301 func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } 3302 3303 // isConnectionCloseRequest reports whether req should use its own 3304 // connection for a single request and then close the connection. 3305 func isConnectionCloseRequest(req *http.Request) bool { 3306 return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") 3307 } 3308 3309 // registerHTTPSProtocol calls Transport.RegisterProtocol but 3310 // converting panics into errors. 3311 func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { 3312 defer func() { 3313 if e := recover(); e != nil { 3314 err = fmt.Errorf("%v", e) 3315 } 3316 }() 3317 t.RegisterProtocol("https", rt) 3318 return nil 3319 } 3320 3321 // noDialH2RoundTripper is a RoundTripper which only tries to complete the request 3322 // if there's already has a cached connection to the host. 3323 // (The field is exported so it can be accessed via reflect from net/http; tested 3324 // by TestNoDialH2RoundTripperType) 3325 type noDialH2RoundTripper struct{ *Transport } 3326 3327 func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 3328 res, err := rt.Transport.RoundTrip(req) 3329 if isNoCachedConnError(err) { 3330 return nil, http.ErrSkipAltProtocol 3331 } 3332 return res, err 3333 } 3334 3335 func (t *Transport) idleConnTimeout() time.Duration { 3336 // to keep things backwards compatible, we use non-zero values of 3337 // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying 3338 // http1 transport, followed by 0 3339 if t.IdleConnTimeout != 0 { 3340 return t.IdleConnTimeout 3341 } 3342 3343 if t.t1 != nil { 3344 return t.t1.IdleConnTimeout 3345 } 3346 3347 return 0 3348 } 3349 3350 func traceGetConn(req *http.Request, hostPort string) { 3351 trace := httptrace.ContextClientTrace(req.Context()) 3352 if trace == nil || trace.GetConn == nil { 3353 return 3354 } 3355 trace.GetConn(hostPort) 3356 } 3357 3358 func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { 3359 trace := httptrace.ContextClientTrace(req.Context()) 3360 if trace == nil || trace.GotConn == nil { 3361 return 3362 } 3363 ci := httptrace.GotConnInfo{Conn: cc.tconn} 3364 ci.Reused = reused 3365 cc.mu.Lock() 3366 ci.WasIdle = len(cc.streams) == 0 && reused 3367 if ci.WasIdle && !cc.lastActive.IsZero() { 3368 ci.IdleTime = time.Since(cc.lastActive) 3369 } 3370 cc.mu.Unlock() 3371 3372 trace.GotConn(ci) 3373 } 3374 3375 func traceWroteHeaders(trace *httptrace.ClientTrace) { 3376 if trace != nil && trace.WroteHeaders != nil { 3377 trace.WroteHeaders() 3378 } 3379 } 3380 3381 func traceGot100Continue(trace *httptrace.ClientTrace) { 3382 if trace != nil && trace.Got100Continue != nil { 3383 trace.Got100Continue() 3384 } 3385 } 3386 3387 func traceWait100Continue(trace *httptrace.ClientTrace) { 3388 if trace != nil && trace.Wait100Continue != nil { 3389 trace.Wait100Continue() 3390 } 3391 } 3392 3393 func traceWroteRequest(trace *httptrace.ClientTrace, err error) { 3394 if trace != nil && trace.WroteRequest != nil { 3395 trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) 3396 } 3397 } 3398 3399 func traceFirstResponseByte(trace *httptrace.ClientTrace) { 3400 if trace != nil && trace.GotFirstResponseByte != nil { 3401 trace.GotFirstResponseByte() 3402 } 3403 } 3404 3405 func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { 3406 return trace != nil && trace.WroteHeaderField != nil 3407 } 3408 3409 func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { 3410 if trace != nil && trace.WroteHeaderField != nil { 3411 trace.WroteHeaderField(k, []string{v}) 3412 } 3413 } 3414 3415 func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { 3416 if trace != nil { 3417 return trace.Got1xxResponse 3418 } 3419 return nil 3420 } 3421 3422 // dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS 3423 // connection. 3424 func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { 3425 dialer := &tls.Dialer{ 3426 Config: cfg, 3427 } 3428 cn, err := dialer.DialContext(ctx, network, addr) 3429 if err != nil { 3430 return nil, err 3431 } 3432 tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed 3433 return tlsCn, nil 3434 }