github.com/Kolosok86/http@v0.1.2/http2/transport.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Transport code. 6 7 package http2 8 9 import ( 10 "bufio" 11 "bytes" 12 "compress/gzip" 13 "context" 14 "crypto/rand" 15 "errors" 16 "fmt" 17 "io" 18 "io/fs" 19 "log" 20 "math" 21 mathrand "math/rand" 22 "net" 23 "net/http/httptrace" 24 "net/textproto" 25 "os" 26 "sort" 27 "strconv" 28 "strings" 29 "sync" 30 "sync/atomic" 31 "time" 32 33 "github.com/Kolosok86/http" 34 tls "github.com/refraction-networking/utls" 35 "golang.org/x/net/http/httpguts" 36 "golang.org/x/net/http2/hpack" 37 "golang.org/x/net/idna" 38 ) 39 40 const ( 41 // transportDefaultConnFlow is how many connection-level flow control 42 // tokens we give the server at start-up, past the default 64k. 43 transportDefaultConnFlow = 0xEF0001 44 45 // transportDefaultStreamFlow is how many stream-level flow 46 // control tokens we announce to the peer, and how many bytes 47 // we buffer per stream. 48 transportDefaultStreamFlow = 4 << 20 49 50 defaultUserAgent = "Go-http-client/2.0" 51 52 // initialMaxConcurrentStreams is a connections maxConcurrentStreams until 53 // it's received servers initial SETTINGS frame, which corresponds with the 54 // spec's minimum recommended value. 55 initialMaxConcurrentStreams = 100 56 57 // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams 58 // if the server doesn't include one in its initial SETTINGS frame. 59 defaultMaxConcurrentStreams = 1000 60 ) 61 62 // Transport is an HTTP/2 Transport. 63 // 64 // A Transport internally caches connections to servers. It is safe 65 // for concurrent use by multiple goroutines. 66 type Transport struct { 67 // DialTLSContext specifies an optional dial function with context for 68 // creating TLS connections for requests. 69 // 70 // If DialTLSContext and DialTLS is nil, tls.Dial is used. 71 // 72 // If the returned net.Conn has a ConnectionState method like tls.Conn, 73 // it will be used to set http.Response.TLS. 74 DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) 75 76 // DialTLS specifies an optional dial function for creating 77 // TLS connections for requests. 78 // 79 // If DialTLSContext and DialTLS is nil, tls.Dial is used. 80 // 81 // Deprecated: Use DialTLSContext instead, which allows the transport 82 // to cancel dials as soon as they are no longer needed. 83 // If both are set, DialTLSContext takes priority. 84 DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) 85 86 // TLSClientConfig specifies the TLS configuration to use with 87 // tls.Client. If nil, the default configuration is used. 88 TLSClientConfig *tls.Config 89 90 // ConnPool optionally specifies an alternate connection pool to use. 91 // If nil, the default is used. 92 ConnPool ClientConnPool 93 94 // DisableCompression, if true, prevents the Transport from 95 // requesting compression with an "Accept-Encoding: gzip" 96 // request header when the Request contains no existing 97 // Accept-Encoding value. If the Transport requests gzip on 98 // its own and gets a gzipped response, it's transparently 99 // decoded in the Response.Body. However, if the user 100 // explicitly requested gzip it is not automatically 101 // uncompressed. 102 DisableCompression bool 103 104 // AllowHTTP, if true, permits HTTP/2 requests using the insecure, 105 // plain-text "http" scheme. Note that this does not enable h2c support. 106 AllowHTTP bool 107 108 // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to 109 // send in the initial settings frame. It is how many bytes 110 // of response headers are allowed. Unlike the http2 spec, zero here 111 // means to use a default limit (currently 10MB). If you actually 112 // want to advertise an unlimited value to the peer, Transport 113 // interprets the highest possible value here (0xffffffff or 1<<32-1) 114 // to mean no limit. 115 MaxHeaderListSize uint32 116 117 // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the 118 // initial settings frame. It is the size in bytes of the largest frame 119 // payload that the sender is willing to receive. If 0, no setting is 120 // sent, and the value is provided by the peer, which should be 16384 121 // according to the spec: 122 // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. 123 // Values are bounded in the range 16k to 16M. 124 MaxReadFrameSize uint32 125 126 // MaxDecoderHeaderTableSize optionally specifies the http2 127 // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It 128 // informs the remote endpoint of the maximum size of the header compression 129 // table used to decode header blocks, in octets. If zero, the default value 130 // of 4096 is used. 131 MaxDecoderHeaderTableSize uint32 132 133 // MaxEncoderHeaderTableSize optionally specifies an upper limit for the 134 // header compression table used for encoding request headers. Received 135 // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, 136 // the default value of 4096 is used. 137 MaxEncoderHeaderTableSize uint32 138 139 // StrictMaxConcurrentStreams controls whether the server's 140 // SETTINGS_MAX_CONCURRENT_STREAMS should be respected 141 // globally. If false, new TCP connections are created to the 142 // server as needed to keep each under the per-connection 143 // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the 144 // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as 145 // a global limit and callers of RoundTrip block when needed, 146 // waiting for their turn. 147 StrictMaxConcurrentStreams bool 148 149 // ReadIdleTimeout is the timeout after which a health check using ping 150 // frame will be carried out if no frame is received on the connection. 151 // Note that a ping response will is considered a received frame, so if 152 // there is no other traffic on the connection, the health check will 153 // be performed every ReadIdleTimeout interval. 154 // If zero, no health check is performed. 155 ReadIdleTimeout time.Duration 156 157 // PingTimeout is the timeout after which the connection will be closed 158 // if a response to Ping is not received. 159 // Defaults to 15s. 160 PingTimeout time.Duration 161 162 // WriteByteTimeout is the timeout after which the connection will be 163 // closed no data can be written to it. The timeout begins when data is 164 // available to write, and is extended whenever any bytes are written. 165 WriteByteTimeout time.Duration 166 167 // CountError, if non-nil, is called on HTTP/2 transport errors. 168 // It's intended to increment a metric for monitoring, such 169 // as an expvar or Prometheus metric. 170 // The errType consists of only ASCII word characters. 171 CountError func(errType string) 172 173 // t1, if non-nil, is the standard library Transport using 174 // this transport. Its settings are used (but not its 175 // RoundTrip method, etc). 176 t1 *http.Transport 177 178 connPoolOnce sync.Once 179 connPoolOrDef ClientConnPool // non-nil version of ConnPool 180 181 // Settings 182 InitialWindowSize uint32 // if nil, will use global initialWindowSize 183 HeaderTableSize uint32 // if nil, will use global initialHeaderTableSize 184 InitMaxReadFrameSize uint32 // if nil, will use global defaultMaxReadFrameSize 185 } 186 187 func (t *Transport) maxHeaderListSize() uint32 { 188 if t.MaxHeaderListSize == 0 { 189 return 10 << 20 190 } 191 if t.MaxHeaderListSize == 0xffffffff { 192 return 0 193 } 194 return t.MaxHeaderListSize 195 } 196 197 func (t *Transport) maxFrameReadSize() uint32 { 198 if t.MaxReadFrameSize == 0 { 199 return 0 // use the default provided by the peer 200 } 201 if t.MaxReadFrameSize < minMaxFrameSize { 202 return minMaxFrameSize 203 } 204 if t.MaxReadFrameSize > maxFrameSize { 205 return maxFrameSize 206 } 207 return t.MaxReadFrameSize 208 } 209 210 func (t *Transport) disableCompression() bool { 211 return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) 212 } 213 214 func (t *Transport) pingTimeout() time.Duration { 215 if t.PingTimeout == 0 { 216 return 15 * time.Second 217 } 218 return t.PingTimeout 219 220 } 221 222 // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. 223 // It returns an error if t1 has already been HTTP/2-enabled. 224 // 225 // Use ConfigureTransports instead to configure the HTTP/2 Transport. 226 func ConfigureTransport(t1 *http.Transport) error { 227 _, err := ConfigureTransports(t1) 228 return err 229 } 230 231 // ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. 232 // It returns a new HTTP/2 Transport for further configuration. 233 // It returns an error if t1 has already been HTTP/2-enabled. 234 func ConfigureTransports(t1 *http.Transport) (*Transport, error) { 235 return configureTransports(t1) 236 } 237 238 func configureTransports(t1 *http.Transport) (*Transport, error) { 239 connPool := new(clientConnPool) 240 t2 := &Transport{ 241 ConnPool: noDialClientConnPool{connPool}, 242 t1: t1, 243 } 244 connPool.t = t2 245 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { 246 return nil, err 247 } 248 if t1.TLSClientConfig == nil { 249 t1.TLSClientConfig = new(tls.Config) 250 } 251 if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { 252 t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) 253 } 254 if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { 255 t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") 256 } 257 upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { 258 addr := authorityAddr("https", authority) 259 if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { 260 go c.Close() 261 return erringRoundTripper{err} 262 } else if !used { 263 // Turns out we don't need this c. 264 // For example, two goroutines made requests to the same host 265 // at the same time, both kicking off TCP dials. (since protocol 266 // was unknown) 267 go c.Close() 268 } 269 return t2 270 } 271 if m := t1.TLSNextProto; len(m) == 0 { 272 t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ 273 "h2": upgradeFn, 274 } 275 } else { 276 m["h2"] = upgradeFn 277 } 278 return t2, nil 279 } 280 281 func (t *Transport) connPool() ClientConnPool { 282 t.connPoolOnce.Do(t.initConnPool) 283 return t.connPoolOrDef 284 } 285 286 func (t *Transport) initConnPool() { 287 if t.ConnPool != nil { 288 t.connPoolOrDef = t.ConnPool 289 } else { 290 t.connPoolOrDef = &clientConnPool{t: t} 291 } 292 } 293 294 // ClientConn is the state of a single HTTP/2 client connection to an 295 // HTTP/2 server. 296 type ClientConn struct { 297 t *Transport 298 tconn net.Conn // usually *tls.Conn, except specialized impls 299 tconnClosed bool 300 tlsState *tls.ConnectionState // nil only for specialized impls 301 reused uint32 // whether conn is being reused; atomic 302 singleUse bool // whether being used for a single http.Request 303 getConnCalled bool // used by clientConnPool 304 305 // readLoop goroutine fields: 306 readerDone chan struct{} // closed on error 307 readerErr error // set before readerDone is closed 308 309 idleTimeout time.Duration // or 0 for never 310 idleTimer *time.Timer 311 312 mu sync.Mutex // guards following 313 cond *sync.Cond // hold mu; broadcast on flow/closed changes 314 flow outflow // our conn-level flow control quota (cs.outflow is per stream) 315 inflow inflow // peer's conn-level flow control 316 doNotReuse bool // whether conn is marked to not be reused for any future requests 317 closing bool 318 closed bool 319 seenSettings bool // true if we've seen a settings frame, false otherwise 320 wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back 321 goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received 322 goAwayDebug string // goAway frame's debug data, retained as a string 323 streams map[uint32]*clientStream // client-initiated 324 streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip 325 nextStreamID uint32 326 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams 327 pings map[[8]byte]chan struct{} // in flight ping data to notification channel 328 br *bufio.Reader 329 lastActive time.Time 330 lastIdle time.Time // time last idle 331 // Settings from peer: (also guarded by wmu) 332 maxFrameSize uint32 333 maxConcurrentStreams uint32 334 peerMaxHeaderListSize uint64 335 peerMaxHeaderTableSize uint32 336 initialWindowSize uint32 337 338 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. 339 // Write to reqHeaderMu to lock it, read from it to unlock. 340 // Lock reqmu BEFORE mu or wmu. 341 reqHeaderMu chan struct{} 342 343 // wmu is held while writing. 344 // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. 345 // Only acquire both at the same time when changing peer settings. 346 wmu sync.Mutex 347 bw *bufio.Writer 348 fr *Framer 349 werr error // first write error that has occurred 350 hbuf bytes.Buffer // HPACK encoder writes into this 351 henc *hpack.Encoder 352 } 353 354 // clientStream is the state for a single HTTP/2 stream. One of these 355 // is created for each Transport.RoundTrip call. 356 type clientStream struct { 357 cc *ClientConn 358 359 // Fields of Request that we may access even after the response body is closed. 360 ctx context.Context 361 reqCancel <-chan struct{} 362 363 trace *httptrace.ClientTrace // or nil 364 ID uint32 365 bufPipe pipe // buffered pipe with the flow-controlled response payload 366 requestedGzip bool 367 isHead bool 368 369 abortOnce sync.Once 370 abort chan struct{} // closed to signal stream should end immediately 371 abortErr error // set if abort is closed 372 373 peerClosed chan struct{} // closed when the peer sends an END_STREAM flag 374 donec chan struct{} // closed after the stream is in the closed state 375 on100 chan struct{} // buffered; written to if a 100 is received 376 377 respHeaderRecv chan struct{} // closed when headers are received 378 res *http.Response // set if respHeaderRecv is closed 379 380 flow outflow // guarded by cc.mu 381 inflow inflow // guarded by cc.mu 382 bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read 383 readErr error // sticky read error; owned by transportResponseBody.Read 384 385 reqBody io.ReadCloser 386 reqBodyContentLength int64 // -1 means unknown 387 reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done 388 389 // owned by writeRequest: 390 sentEndStream bool // sent an END_STREAM flag to the peer 391 sentHeaders bool 392 393 // owned by clientConnReadLoop: 394 firstByte bool // got the first response byte 395 pastHeaders bool // got first MetaHeadersFrame (actual headers) 396 pastTrailers bool // got optional second MetaHeadersFrame (trailers) 397 num1xx uint8 // number of 1xx responses seen 398 readClosed bool // peer sent an END_STREAM flag 399 readAborted bool // read loop reset the stream 400 401 trailer http.Header // accumulated trailers 402 resTrailer *http.Header // client's Response.Trailer 403 } 404 405 var got1xxFuncForTests func(int, textproto.MIMEHeader) error 406 407 // get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, 408 // if any. It returns nil if not set or if the Go version is too old. 409 func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { 410 if fn := got1xxFuncForTests; fn != nil { 411 return fn 412 } 413 return traceGot1xxResponseFunc(cs.trace) 414 } 415 416 func (cs *clientStream) abortStream(err error) { 417 cs.cc.mu.Lock() 418 defer cs.cc.mu.Unlock() 419 cs.abortStreamLocked(err) 420 } 421 422 func (cs *clientStream) abortStreamLocked(err error) { 423 cs.abortOnce.Do(func() { 424 cs.abortErr = err 425 close(cs.abort) 426 }) 427 if cs.reqBody != nil { 428 cs.closeReqBodyLocked() 429 } 430 // TODO(dneil): Clean up tests where cs.cc.cond is nil. 431 if cs.cc.cond != nil { 432 // Wake up writeRequestBody if it is waiting on flow control. 433 cs.cc.cond.Broadcast() 434 } 435 } 436 437 func (cs *clientStream) abortRequestBodyWrite() { 438 cc := cs.cc 439 cc.mu.Lock() 440 defer cc.mu.Unlock() 441 if cs.reqBody != nil && cs.reqBodyClosed == nil { 442 cs.closeReqBodyLocked() 443 cc.cond.Broadcast() 444 } 445 } 446 447 func (cs *clientStream) closeReqBodyLocked() { 448 if cs.reqBodyClosed != nil { 449 return 450 } 451 cs.reqBodyClosed = make(chan struct{}) 452 reqBodyClosed := cs.reqBodyClosed 453 go func() { 454 cs.reqBody.Close() 455 close(reqBodyClosed) 456 }() 457 } 458 459 type stickyErrWriter struct { 460 conn net.Conn 461 timeout time.Duration 462 err *error 463 } 464 465 func (sew stickyErrWriter) Write(p []byte) (n int, err error) { 466 if *sew.err != nil { 467 return 0, *sew.err 468 } 469 for { 470 if sew.timeout != 0 { 471 sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) 472 } 473 nn, err := sew.conn.Write(p[n:]) 474 n += nn 475 if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { 476 // Keep extending the deadline so long as we're making progress. 477 continue 478 } 479 if sew.timeout != 0 { 480 sew.conn.SetWriteDeadline(time.Time{}) 481 } 482 *sew.err = err 483 return n, err 484 } 485 } 486 487 // noCachedConnError is the concrete type of ErrNoCachedConn, which 488 // needs to be detected by net/http regardless of whether it's its 489 // bundled version (in h2_bundle.go with a rewritten type name) or 490 // from a user's x/net/http2. As such, as it has a unique method name 491 // (IsHTTP2NoCachedConnError) that net/http sniffs for via func 492 // isNoCachedConnError. 493 type noCachedConnError struct{} 494 495 func (noCachedConnError) IsHTTP2NoCachedConnError() {} 496 func (noCachedConnError) Error() string { return "http2: no cached connection was available" } 497 498 // isNoCachedConnError reports whether err is of type noCachedConnError 499 // or its equivalent renamed type in net/http2's h2_bundle.go. Both types 500 // may coexist in the same running program. 501 func isNoCachedConnError(err error) bool { 502 _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) 503 return ok 504 } 505 506 var ErrNoCachedConn error = noCachedConnError{} 507 508 // RoundTripOpt are options for the Transport.RoundTripOpt method. 509 type RoundTripOpt struct { 510 // OnlyCachedConn controls whether RoundTripOpt may 511 // create a new TCP connection. If set true and 512 // no cached connection is available, RoundTripOpt 513 // will return ErrNoCachedConn. 514 OnlyCachedConn bool 515 } 516 517 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { 518 return t.RoundTripOpt(req, RoundTripOpt{}) 519 } 520 521 // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) 522 // and returns a host:port. The port 443 is added if needed. 523 func authorityAddr(scheme string, authority string) (addr string) { 524 host, port, err := net.SplitHostPort(authority) 525 if err != nil { // authority didn't have a port 526 port = "443" 527 if scheme == "http" { 528 port = "80" 529 } 530 host = authority 531 } 532 if a, err := idna.ToASCII(host); err == nil { 533 host = a 534 } 535 // IPv6 address literal, without a port: 536 if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { 537 return host + ":" + port 538 } 539 return net.JoinHostPort(host, port) 540 } 541 542 var retryBackoffHook func(time.Duration) *time.Timer 543 544 func backoffNewTimer(d time.Duration) *time.Timer { 545 if retryBackoffHook != nil { 546 return retryBackoffHook(d) 547 } 548 return time.NewTimer(d) 549 } 550 551 // RoundTripOpt is like RoundTrip, but takes options. 552 func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { 553 if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { 554 return nil, errors.New("http2: unsupported scheme") 555 } 556 557 addr := authorityAddr(req.URL.Scheme, req.URL.Host) 558 for retry := 0; ; retry++ { 559 cc, err := t.connPool().GetClientConn(req, addr) 560 if err != nil { 561 t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) 562 return nil, err 563 } 564 reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) 565 traceGotConn(req, cc, reused) 566 res, err := cc.RoundTrip(req) 567 if err != nil && retry <= 6 { 568 if req, err = shouldRetryRequest(req, err); err == nil { 569 // After the first retry, do exponential backoff with 10% jitter. 570 if retry == 0 { 571 t.vlogf("RoundTrip retrying after failure: %v", err) 572 continue 573 } 574 backoff := float64(uint(1) << (uint(retry) - 1)) 575 backoff += backoff * (0.1 * mathrand.Float64()) 576 d := time.Second * time.Duration(backoff) 577 timer := backoffNewTimer(d) 578 select { 579 case <-timer.C: 580 t.vlogf("RoundTrip retrying after failure: %v", err) 581 continue 582 case <-req.Context().Done(): 583 timer.Stop() 584 err = req.Context().Err() 585 } 586 } 587 } 588 if err != nil { 589 t.vlogf("RoundTrip failure: %v", err) 590 return nil, err 591 } 592 return res, nil 593 } 594 } 595 596 // CloseIdleConnections closes any connections which were previously 597 // connected from previous requests but are now sitting idle. 598 // It does not interrupt any connections currently in use. 599 func (t *Transport) CloseIdleConnections() { 600 if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { 601 cp.closeIdleConnections() 602 } 603 } 604 605 var ( 606 errClientConnClosed = errors.New("http2: client conn is closed") 607 errClientConnUnusable = errors.New("http2: client conn not usable") 608 errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") 609 ) 610 611 // shouldRetryRequest is called by RoundTrip when a request fails to get 612 // response headers. It is always called with a non-nil error. 613 // It returns either a request to retry (either the same request, or a 614 // modified clone), or an error if the request can't be replayed. 615 func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { 616 if !canRetryError(err) { 617 return nil, err 618 } 619 // If the Body is nil (or http.NoBody), it's safe to reuse 620 // this request and its Body. 621 if req.Body == nil || req.Body == http.NoBody { 622 return req, nil 623 } 624 625 // If the request body can be reset back to its original 626 // state via the optional req.GetBody, do that. 627 if req.GetBody != nil { 628 body, err := req.GetBody() 629 if err != nil { 630 return nil, err 631 } 632 newReq := *req 633 newReq.Body = body 634 return &newReq, nil 635 } 636 637 // The Request.Body can't reset back to the beginning, but we 638 // don't seem to have started to read from it yet, so reuse 639 // the request directly. 640 if err == errClientConnUnusable { 641 return req, nil 642 } 643 644 return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) 645 } 646 647 func canRetryError(err error) bool { 648 if err == errClientConnUnusable || err == errClientConnGotGoAway { 649 return true 650 } 651 if se, ok := err.(StreamError); ok { 652 if se.Code == ErrCodeProtocol && se.Cause == errFromPeer { 653 // See golang/go#47635, golang/go#42777 654 return true 655 } 656 return se.Code == ErrCodeRefusedStream 657 } 658 return false 659 } 660 661 func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { 662 host, _, err := net.SplitHostPort(addr) 663 if err != nil { 664 return nil, err 665 } 666 tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) 667 if err != nil { 668 return nil, err 669 } 670 return t.newClientConn(tconn, singleUse) 671 } 672 673 func (t *Transport) newTLSConfig(host string) *tls.Config { 674 cfg := new(tls.Config) 675 if t.TLSClientConfig != nil { 676 *cfg = *t.TLSClientConfig.Clone() 677 } 678 if !strSliceContains(cfg.NextProtos, NextProtoTLS) { 679 cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) 680 } 681 if cfg.ServerName == "" { 682 cfg.ServerName = host 683 } 684 return cfg 685 } 686 687 func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { 688 if t.DialTLSContext != nil { 689 return t.DialTLSContext(ctx, network, addr, tlsCfg) 690 } else if t.DialTLS != nil { 691 return t.DialTLS(network, addr, tlsCfg) 692 } 693 694 tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) 695 if err != nil { 696 return nil, err 697 } 698 state := tlsCn.ConnectionState() 699 if p := state.NegotiatedProtocol; p != NextProtoTLS { 700 return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) 701 } 702 if !state.NegotiatedProtocolIsMutual { 703 return nil, errors.New("http2: could not negotiate protocol mutually") 704 } 705 return tlsCn, nil 706 } 707 708 // disableKeepAlives reports whether connections should be closed as 709 // soon as possible after handling the first request. 710 func (t *Transport) disableKeepAlives() bool { 711 return t.t1 != nil && t.t1.DisableKeepAlives 712 } 713 714 func (t *Transport) expectContinueTimeout() time.Duration { 715 if t.t1 == nil { 716 return 0 717 } 718 return t.t1.ExpectContinueTimeout 719 } 720 721 func (t *Transport) maxDecoderHeaderTableSize() uint32 { 722 if v := t.MaxDecoderHeaderTableSize; v > 0 { 723 return v 724 } 725 726 if t.HeaderTableSize != 0 { 727 return t.HeaderTableSize 728 } 729 730 return initialHeaderTableSize 731 } 732 733 func (t *Transport) maxEncoderHeaderTableSize() uint32 { 734 if v := t.MaxEncoderHeaderTableSize; v > 0 { 735 return v 736 } 737 738 if t.HeaderTableSize != 0 { 739 return t.HeaderTableSize 740 } 741 742 return initialHeaderTableSize 743 } 744 745 func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { 746 return t.newClientConn(c, t.disableKeepAlives()) 747 } 748 749 func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { 750 cc := &ClientConn{ 751 t: t, 752 tconn: c, 753 readerDone: make(chan struct{}), 754 nextStreamID: 1, 755 maxFrameSize: 16 << 10, // spec default 756 initialWindowSize: 65535, // spec default 757 maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. 758 peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. 759 streams: make(map[uint32]*clientStream), 760 singleUse: singleUse, 761 wantSettingsAck: true, 762 pings: make(map[[8]byte]chan struct{}), 763 reqHeaderMu: make(chan struct{}, 1), 764 } 765 if d := t.idleConnTimeout(); d != 0 { 766 cc.idleTimeout = d 767 cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) 768 } 769 if VerboseLogs { 770 t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) 771 } 772 773 cc.cond = sync.NewCond(&cc.mu) 774 cc.flow.add(int32(initialWindowSize)) 775 776 // TODO: adjust this writer size to account for frame size + 777 // MTU + crypto/tls record padding. 778 cc.bw = bufio.NewWriter(stickyErrWriter{ 779 conn: c, 780 timeout: t.WriteByteTimeout, 781 err: &cc.werr, 782 }) 783 cc.br = bufio.NewReader(c) 784 cc.fr = NewFramer(cc.bw, cc.br) 785 if t.maxFrameReadSize() != 0 { 786 cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) 787 } 788 if t.CountError != nil { 789 cc.fr.countError = t.CountError 790 } 791 maxHeaderTableSize := t.maxDecoderHeaderTableSize() 792 cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) 793 cc.fr.MaxHeaderListSize = t.maxHeaderListSize() 794 795 cc.henc = hpack.NewEncoder(&cc.hbuf) 796 cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) 797 cc.peerMaxHeaderTableSize = maxHeaderTableSize 798 799 if t.AllowHTTP { 800 cc.nextStreamID = 3 801 } 802 803 if cs, ok := c.(connectionStater); ok { 804 state := cs.ConnectionState() 805 cc.tlsState = &state 806 } 807 808 var initialSettings []Setting 809 810 if t.HeaderTableSize != 0 { 811 initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: t.HeaderTableSize}) 812 } else { 813 initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: initialHeaderTableSize}) 814 } 815 816 initialSettings = append(initialSettings, 817 Setting{ID: SettingEnablePush, Val: 0}, 818 Setting{ID: SettingMaxConcurrentStreams, Val: defaultMaxConcurrentStreams}, 819 ) 820 821 if t.InitialWindowSize != 0 { 822 initialSettings = append(initialSettings, Setting{ID: SettingInitialWindowSize, Val: t.InitialWindowSize}) 823 } else { 824 initialSettings = append(initialSettings, Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}) 825 } 826 827 if t.InitMaxReadFrameSize != 0 { 828 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: t.InitMaxReadFrameSize}) 829 } else { 830 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: defaultMaxReadFrameSize}) 831 } 832 833 if max := t.maxFrameReadSize(); max != 0 { 834 initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) 835 } 836 837 cc.bw.Write(clientPreface) 838 cc.fr.WriteSettings(initialSettings...) 839 cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) 840 cc.inflow.init(transportDefaultConnFlow + initialWindowSize) 841 cc.bw.Flush() 842 if cc.werr != nil { 843 cc.Close() 844 return nil, cc.werr 845 } 846 847 go cc.readLoop() 848 return cc, nil 849 } 850 851 func (cc *ClientConn) healthCheck() { 852 pingTimeout := cc.t.pingTimeout() 853 // We don't need to periodically ping in the health check, because the readLoop of ClientConn will 854 // trigger the healthCheck again if there is no frame received. 855 ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) 856 defer cancel() 857 cc.vlogf("http2: Transport sending health check") 858 err := cc.Ping(ctx) 859 if err != nil { 860 cc.vlogf("http2: Transport health check failure: %v", err) 861 cc.closeForLostPing() 862 } else { 863 cc.vlogf("http2: Transport health check success") 864 } 865 } 866 867 // SetDoNotReuse marks cc as not reusable for future HTTP requests. 868 func (cc *ClientConn) SetDoNotReuse() { 869 cc.mu.Lock() 870 defer cc.mu.Unlock() 871 cc.doNotReuse = true 872 } 873 874 func (cc *ClientConn) setGoAway(f *GoAwayFrame) { 875 cc.mu.Lock() 876 defer cc.mu.Unlock() 877 878 old := cc.goAway 879 cc.goAway = f 880 881 // Merge the previous and current GoAway error frames. 882 if cc.goAwayDebug == "" { 883 cc.goAwayDebug = string(f.DebugData()) 884 } 885 if old != nil && old.ErrCode != ErrCodeNo { 886 cc.goAway.ErrCode = old.ErrCode 887 } 888 last := f.LastStreamID 889 for streamID, cs := range cc.streams { 890 if streamID > last { 891 cs.abortStreamLocked(errClientConnGotGoAway) 892 } 893 } 894 } 895 896 // CanTakeNewRequest reports whether the connection can take a new request, 897 // meaning it has not been closed or received or sent a GOAWAY. 898 // 899 // If the caller is going to immediately make a new request on this 900 // connection, use ReserveNewRequest instead. 901 func (cc *ClientConn) CanTakeNewRequest() bool { 902 cc.mu.Lock() 903 defer cc.mu.Unlock() 904 return cc.canTakeNewRequestLocked() 905 } 906 907 // ReserveNewRequest is like CanTakeNewRequest but also reserves a 908 // concurrent stream in cc. The reservation is decremented on the 909 // next call to RoundTrip. 910 func (cc *ClientConn) ReserveNewRequest() bool { 911 cc.mu.Lock() 912 defer cc.mu.Unlock() 913 if st := cc.idleStateLocked(); !st.canTakeNewRequest { 914 return false 915 } 916 cc.streamsReserved++ 917 return true 918 } 919 920 // ClientConnState describes the state of a ClientConn. 921 type ClientConnState struct { 922 // Closed is whether the connection is closed. 923 Closed bool 924 925 // Closing is whether the connection is in the process of 926 // closing. It may be closing due to shutdown, being a 927 // single-use connection, being marked as DoNotReuse, or 928 // having received a GOAWAY frame. 929 Closing bool 930 931 // StreamsActive is how many streams are active. 932 StreamsActive int 933 934 // StreamsReserved is how many streams have been reserved via 935 // ClientConn.ReserveNewRequest. 936 StreamsReserved int 937 938 // StreamsPending is how many requests have been sent in excess 939 // of the peer's advertised MaxConcurrentStreams setting and 940 // are waiting for other streams to complete. 941 StreamsPending int 942 943 // MaxConcurrentStreams is how many concurrent streams the 944 // peer advertised as acceptable. Zero means no SETTINGS 945 // frame has been received yet. 946 MaxConcurrentStreams uint32 947 948 // LastIdle, if non-zero, is when the connection last 949 // transitioned to idle state. 950 LastIdle time.Time 951 } 952 953 // State returns a snapshot of cc's state. 954 func (cc *ClientConn) State() ClientConnState { 955 cc.wmu.Lock() 956 maxConcurrent := cc.maxConcurrentStreams 957 if !cc.seenSettings { 958 maxConcurrent = 0 959 } 960 cc.wmu.Unlock() 961 962 cc.mu.Lock() 963 defer cc.mu.Unlock() 964 return ClientConnState{ 965 Closed: cc.closed, 966 Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, 967 StreamsActive: len(cc.streams), 968 StreamsReserved: cc.streamsReserved, 969 StreamsPending: cc.pendingRequests, 970 LastIdle: cc.lastIdle, 971 MaxConcurrentStreams: maxConcurrent, 972 } 973 } 974 975 // clientConnIdleState describes the suitability of a client 976 // connection to initiate a new RoundTrip request. 977 type clientConnIdleState struct { 978 canTakeNewRequest bool 979 } 980 981 func (cc *ClientConn) idleState() clientConnIdleState { 982 cc.mu.Lock() 983 defer cc.mu.Unlock() 984 return cc.idleStateLocked() 985 } 986 987 func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { 988 if cc.singleUse && cc.nextStreamID > 1 { 989 return 990 } 991 var maxConcurrentOkay bool 992 if cc.t.StrictMaxConcurrentStreams { 993 // We'll tell the caller we can take a new request to 994 // prevent the caller from dialing a new TCP 995 // connection, but then we'll block later before 996 // writing it. 997 maxConcurrentOkay = true 998 } else { 999 maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) 1000 } 1001 1002 st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && 1003 !cc.doNotReuse && 1004 int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && 1005 !cc.tooIdleLocked() 1006 return 1007 } 1008 1009 func (cc *ClientConn) canTakeNewRequestLocked() bool { 1010 st := cc.idleStateLocked() 1011 return st.canTakeNewRequest 1012 } 1013 1014 // tooIdleLocked reports whether this connection has been been sitting idle 1015 // for too much wall time. 1016 func (cc *ClientConn) tooIdleLocked() bool { 1017 // The Round(0) strips the monontonic clock reading so the 1018 // times are compared based on their wall time. We don't want 1019 // to reuse a connection that's been sitting idle during 1020 // VM/laptop suspend if monotonic time was also frozen. 1021 return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout 1022 } 1023 1024 // onIdleTimeout is called from a time.AfterFunc goroutine. It will 1025 // only be called when we're idle, but because we're coming from a new 1026 // goroutine, there could be a new request coming in at the same time, 1027 // so this simply calls the synchronized closeIfIdle to shut down this 1028 // connection. The timer could just call closeIfIdle, but this is more 1029 // clear. 1030 func (cc *ClientConn) onIdleTimeout() { 1031 cc.closeIfIdle() 1032 } 1033 1034 func (cc *ClientConn) closeConn() { 1035 t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) 1036 defer t.Stop() 1037 cc.tconn.Close() 1038 } 1039 1040 // A tls.Conn.Close can hang for a long time if the peer is unresponsive. 1041 // Try to shut it down more aggressively. 1042 func (cc *ClientConn) forceCloseConn() { 1043 tc, ok := cc.tconn.(*tls.Conn) 1044 if !ok { 1045 return 1046 } 1047 if nc := tlsUnderlyingConn(tc); nc != nil { 1048 nc.Close() 1049 } 1050 } 1051 1052 func (cc *ClientConn) closeIfIdle() { 1053 cc.mu.Lock() 1054 if len(cc.streams) > 0 || cc.streamsReserved > 0 { 1055 cc.mu.Unlock() 1056 return 1057 } 1058 cc.closed = true 1059 nextID := cc.nextStreamID 1060 // TODO: do clients send GOAWAY too? maybe? Just Close: 1061 cc.mu.Unlock() 1062 1063 if VerboseLogs { 1064 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) 1065 } 1066 cc.closeConn() 1067 } 1068 1069 func (cc *ClientConn) isDoNotReuseAndIdle() bool { 1070 cc.mu.Lock() 1071 defer cc.mu.Unlock() 1072 return cc.doNotReuse && len(cc.streams) == 0 1073 } 1074 1075 var shutdownEnterWaitStateHook = func() {} 1076 1077 // Shutdown gracefully closes the client connection, waiting for running streams to complete. 1078 func (cc *ClientConn) Shutdown(ctx context.Context) error { 1079 if err := cc.sendGoAway(); err != nil { 1080 return err 1081 } 1082 // Wait for all in-flight streams to complete or connection to close 1083 done := make(chan struct{}) 1084 cancelled := false // guarded by cc.mu 1085 go func() { 1086 cc.mu.Lock() 1087 defer cc.mu.Unlock() 1088 for { 1089 if len(cc.streams) == 0 || cc.closed { 1090 cc.closed = true 1091 close(done) 1092 break 1093 } 1094 if cancelled { 1095 break 1096 } 1097 cc.cond.Wait() 1098 } 1099 }() 1100 shutdownEnterWaitStateHook() 1101 select { 1102 case <-done: 1103 cc.closeConn() 1104 return nil 1105 case <-ctx.Done(): 1106 cc.mu.Lock() 1107 // Free the goroutine above 1108 cancelled = true 1109 cc.cond.Broadcast() 1110 cc.mu.Unlock() 1111 return ctx.Err() 1112 } 1113 } 1114 1115 func (cc *ClientConn) sendGoAway() error { 1116 cc.mu.Lock() 1117 closing := cc.closing 1118 cc.closing = true 1119 maxStreamID := cc.nextStreamID 1120 cc.mu.Unlock() 1121 if closing { 1122 // GOAWAY sent already 1123 return nil 1124 } 1125 1126 cc.wmu.Lock() 1127 defer cc.wmu.Unlock() 1128 // Send a graceful shutdown frame to server 1129 if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { 1130 return err 1131 } 1132 if err := cc.bw.Flush(); err != nil { 1133 return err 1134 } 1135 // Prevent new requests 1136 return nil 1137 } 1138 1139 // closes the client connection immediately. In-flight requests are interrupted. 1140 // err is sent to streams. 1141 func (cc *ClientConn) closeForError(err error) { 1142 cc.mu.Lock() 1143 cc.closed = true 1144 for _, cs := range cc.streams { 1145 cs.abortStreamLocked(err) 1146 } 1147 cc.cond.Broadcast() 1148 cc.mu.Unlock() 1149 cc.closeConn() 1150 } 1151 1152 // Close closes the client connection immediately. 1153 // 1154 // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. 1155 func (cc *ClientConn) Close() error { 1156 err := errors.New("http2: client connection force closed via ClientConn.Close") 1157 cc.closeForError(err) 1158 return nil 1159 } 1160 1161 // closes the client connection immediately. In-flight requests are interrupted. 1162 func (cc *ClientConn) closeForLostPing() { 1163 err := errors.New("http2: client connection lost") 1164 if f := cc.t.CountError; f != nil { 1165 f("conn_close_lost_ping") 1166 } 1167 cc.closeForError(err) 1168 } 1169 1170 // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not 1171 // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. 1172 var errRequestCanceled = errors.New("net/http: request canceled") 1173 1174 func commaSeparatedTrailers(req *http.Request) (string, error) { 1175 keys := make([]string, 0, len(req.Trailer)) 1176 for k := range req.Trailer { 1177 k = canonicalHeader(k) 1178 switch k { 1179 case "Transfer-Encoding", "Trailer", "Content-Length": 1180 return "", fmt.Errorf("invalid Trailer key %q", k) 1181 } 1182 keys = append(keys, k) 1183 } 1184 if len(keys) > 0 { 1185 sort.Strings(keys) 1186 return strings.Join(keys, ","), nil 1187 } 1188 return "", nil 1189 } 1190 1191 func (cc *ClientConn) responseHeaderTimeout() time.Duration { 1192 if cc.t.t1 != nil { 1193 return cc.t.t1.ResponseHeaderTimeout 1194 } 1195 // No way to do this (yet?) with just an http2.Transport. Probably 1196 // no need. Request.Cancel this is the new way. We only need to support 1197 // this for compatibility with the old http.Transport fields when 1198 // we're doing transparent http2. 1199 return 0 1200 } 1201 1202 // checkConnHeaders checks whether req has any invalid connection-level headers. 1203 // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. 1204 // Certain headers are special-cased as okay but not transmitted later. 1205 func checkConnHeaders(req *http.Request) error { 1206 if v := req.Header.Get("Upgrade"); v != "" { 1207 return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) 1208 } 1209 if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { 1210 return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) 1211 } 1212 if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { 1213 return fmt.Errorf("http2: invalid Connection request header: %q", vv) 1214 } 1215 return nil 1216 } 1217 1218 // actualContentLength returns a sanitized version of 1219 // req.ContentLength, where 0 actually means zero (not unknown) and -1 1220 // means unknown. 1221 func actualContentLength(req *http.Request) int64 { 1222 if req.Body == nil || req.Body == http.NoBody { 1223 return 0 1224 } 1225 if req.ContentLength != 0 { 1226 return req.ContentLength 1227 } 1228 return -1 1229 } 1230 1231 func (cc *ClientConn) decrStreamReservations() { 1232 cc.mu.Lock() 1233 defer cc.mu.Unlock() 1234 cc.decrStreamReservationsLocked() 1235 } 1236 1237 func (cc *ClientConn) decrStreamReservationsLocked() { 1238 if cc.streamsReserved > 0 { 1239 cc.streamsReserved-- 1240 } 1241 } 1242 1243 func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { 1244 ctx := req.Context() 1245 cs := &clientStream{ 1246 cc: cc, 1247 ctx: ctx, 1248 reqCancel: req.Cancel, 1249 isHead: req.Method == "HEAD", 1250 reqBody: req.Body, 1251 reqBodyContentLength: actualContentLength(req), 1252 trace: httptrace.ContextClientTrace(ctx), 1253 peerClosed: make(chan struct{}), 1254 abort: make(chan struct{}), 1255 respHeaderRecv: make(chan struct{}), 1256 donec: make(chan struct{}), 1257 } 1258 go cs.doRequest(req) 1259 1260 waitDone := func() error { 1261 select { 1262 case <-cs.donec: 1263 return nil 1264 case <-ctx.Done(): 1265 return ctx.Err() 1266 case <-cs.reqCancel: 1267 return errRequestCanceled 1268 } 1269 } 1270 1271 handleResponseHeaders := func() (*http.Response, error) { 1272 res := cs.res 1273 if res.StatusCode > 299 { 1274 // On error or status code 3xx, 4xx, 5xx, etc abort any 1275 // ongoing write, assuming that the server doesn't care 1276 // about our request body. If the server replied with 1xx or 1277 // 2xx, however, then assume the server DOES potentially 1278 // want our body (e.g. full-duplex streaming: 1279 // golang.org/issue/13444). If it turns out the server 1280 // doesn't, they'll RST_STREAM us soon enough. This is a 1281 // heuristic to avoid adding knobs to Transport. Hopefully 1282 // we can keep it. 1283 cs.abortRequestBodyWrite() 1284 } 1285 res.Request = req 1286 res.TLS = cc.tlsState 1287 if res.Body == noBody && actualContentLength(req) == 0 { 1288 // If there isn't a request or response body still being 1289 // written, then wait for the stream to be closed before 1290 // RoundTrip returns. 1291 if err := waitDone(); err != nil { 1292 return nil, err 1293 } 1294 } 1295 return res, nil 1296 } 1297 1298 for { 1299 select { 1300 case <-cs.respHeaderRecv: 1301 return handleResponseHeaders() 1302 case <-cs.abort: 1303 select { 1304 case <-cs.respHeaderRecv: 1305 // If both cs.respHeaderRecv and cs.abort are signaling, 1306 // pick respHeaderRecv. The server probably wrote the 1307 // response and immediately reset the stream. 1308 // golang.org/issue/49645 1309 return handleResponseHeaders() 1310 default: 1311 waitDone() 1312 return nil, cs.abortErr 1313 } 1314 case <-ctx.Done(): 1315 err := ctx.Err() 1316 cs.abortStream(err) 1317 return nil, err 1318 case <-cs.reqCancel: 1319 cs.abortStream(errRequestCanceled) 1320 return nil, errRequestCanceled 1321 } 1322 } 1323 } 1324 1325 // doRequest runs for the duration of the request lifetime. 1326 // 1327 // It sends the request and performs post-request cleanup (closing Request.Body, etc.). 1328 func (cs *clientStream) doRequest(req *http.Request) { 1329 err := cs.writeRequest(req) 1330 cs.cleanupWriteRequest(err) 1331 } 1332 1333 // writeRequest sends a request. 1334 // 1335 // It returns nil after the request is written, the response read, 1336 // and the request stream is half-closed by the peer. 1337 // 1338 // It returns non-nil if the request ends otherwise. 1339 // If the returned error is StreamError, the error Code may be used in resetting the stream. 1340 func (cs *clientStream) writeRequest(req *http.Request) (err error) { 1341 cc := cs.cc 1342 ctx := cs.ctx 1343 1344 if err := checkConnHeaders(req); err != nil { 1345 return err 1346 } 1347 1348 // Acquire the new-request lock by writing to reqHeaderMu. 1349 // This lock guards the critical section covering allocating a new stream ID 1350 // (requires mu) and creating the stream (requires wmu). 1351 if cc.reqHeaderMu == nil { 1352 panic("RoundTrip on uninitialized ClientConn") // for tests 1353 } 1354 select { 1355 case cc.reqHeaderMu <- struct{}{}: 1356 case <-cs.reqCancel: 1357 return errRequestCanceled 1358 case <-ctx.Done(): 1359 return ctx.Err() 1360 } 1361 1362 cc.mu.Lock() 1363 if cc.idleTimer != nil { 1364 cc.idleTimer.Stop() 1365 } 1366 cc.decrStreamReservationsLocked() 1367 if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil { 1368 cc.mu.Unlock() 1369 <-cc.reqHeaderMu 1370 return err 1371 } 1372 cc.addStreamLocked(cs) // assigns stream ID 1373 if isConnectionCloseRequest(req) { 1374 cc.doNotReuse = true 1375 } 1376 cc.mu.Unlock() 1377 1378 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? 1379 if !cc.t.disableCompression() && 1380 req.Header.Get("Accept-Encoding") == "" && 1381 req.Header.Get("Range") == "" && 1382 !cs.isHead { 1383 // Request gzip only, not deflate. Deflate is ambiguous and 1384 // not as universally supported anyway. 1385 // See: https://zlib.net/zlib_faq.html#faq39 1386 // 1387 // Note that we don't request this for HEAD requests, 1388 // due to a bug in nginx: 1389 // http://trac.nginx.org/nginx/ticket/358 1390 // https://golang.org/issue/5522 1391 // 1392 // We don't request gzip if the request is for a range, since 1393 // auto-decoding a portion of a gzipped document will just fail 1394 // anyway. See https://golang.org/issue/8923 1395 cs.requestedGzip = true 1396 } 1397 1398 continueTimeout := cc.t.expectContinueTimeout() 1399 if continueTimeout != 0 { 1400 if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { 1401 continueTimeout = 0 1402 } else { 1403 cs.on100 = make(chan struct{}, 1) 1404 } 1405 } 1406 1407 // Past this point (where we send request headers), it is possible for 1408 // RoundTrip to return successfully. Since the RoundTrip contract permits 1409 // the caller to "mutate or reuse" the Request after closing the Response's Body, 1410 // we must take care when referencing the Request from here on. 1411 err = cs.encodeAndWriteHeaders(req) 1412 <-cc.reqHeaderMu 1413 if err != nil { 1414 return err 1415 } 1416 1417 hasBody := cs.reqBodyContentLength != 0 1418 if !hasBody { 1419 cs.sentEndStream = true 1420 } else { 1421 if continueTimeout != 0 { 1422 traceWait100Continue(cs.trace) 1423 timer := time.NewTimer(continueTimeout) 1424 select { 1425 case <-timer.C: 1426 err = nil 1427 case <-cs.on100: 1428 err = nil 1429 case <-cs.abort: 1430 err = cs.abortErr 1431 case <-ctx.Done(): 1432 err = ctx.Err() 1433 case <-cs.reqCancel: 1434 err = errRequestCanceled 1435 } 1436 timer.Stop() 1437 if err != nil { 1438 traceWroteRequest(cs.trace, err) 1439 return err 1440 } 1441 } 1442 1443 if err = cs.writeRequestBody(req); err != nil { 1444 if err != errStopReqBodyWrite { 1445 traceWroteRequest(cs.trace, err) 1446 return err 1447 } 1448 } else { 1449 cs.sentEndStream = true 1450 } 1451 } 1452 1453 traceWroteRequest(cs.trace, err) 1454 1455 var respHeaderTimer <-chan time.Time 1456 var respHeaderRecv chan struct{} 1457 if d := cc.responseHeaderTimeout(); d != 0 { 1458 timer := time.NewTimer(d) 1459 defer timer.Stop() 1460 respHeaderTimer = timer.C 1461 respHeaderRecv = cs.respHeaderRecv 1462 } 1463 // Wait until the peer half-closes its end of the stream, 1464 // or until the request is aborted (via context, error, or otherwise), 1465 // whichever comes first. 1466 for { 1467 select { 1468 case <-cs.peerClosed: 1469 return nil 1470 case <-respHeaderTimer: 1471 return errTimeout 1472 case <-respHeaderRecv: 1473 respHeaderRecv = nil 1474 respHeaderTimer = nil // keep waiting for END_STREAM 1475 case <-cs.abort: 1476 return cs.abortErr 1477 case <-ctx.Done(): 1478 return ctx.Err() 1479 case <-cs.reqCancel: 1480 return errRequestCanceled 1481 } 1482 } 1483 } 1484 1485 func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { 1486 cc := cs.cc 1487 ctx := cs.ctx 1488 1489 cc.wmu.Lock() 1490 defer cc.wmu.Unlock() 1491 1492 // If the request was canceled while waiting for cc.mu, just quit. 1493 select { 1494 case <-cs.abort: 1495 return cs.abortErr 1496 case <-ctx.Done(): 1497 return ctx.Err() 1498 case <-cs.reqCancel: 1499 return errRequestCanceled 1500 default: 1501 } 1502 1503 // Encode headers. 1504 // 1505 // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is 1506 // sent by writeRequestBody below, along with any Trailers, 1507 // again in form HEADERS{1}, CONTINUATION{0,}) 1508 trailers, err := commaSeparatedTrailers(req) 1509 if err != nil { 1510 return err 1511 } 1512 hasTrailers := trailers != "" 1513 contentLen := actualContentLength(req) 1514 hasBody := contentLen != 0 1515 hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) 1516 if err != nil { 1517 return err 1518 } 1519 1520 // Write the request. 1521 endStream := !hasBody && !hasTrailers 1522 cs.sentHeaders = true 1523 err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) 1524 traceWroteHeaders(cs.trace) 1525 return err 1526 } 1527 1528 // cleanupWriteRequest performs post-request tasks. 1529 // 1530 // If err (the result of writeRequest) is non-nil and the stream is not closed, 1531 // cleanupWriteRequest will send a reset to the peer. 1532 func (cs *clientStream) cleanupWriteRequest(err error) { 1533 cc := cs.cc 1534 1535 if cs.ID == 0 { 1536 // We were canceled before creating the stream, so return our reservation. 1537 cc.decrStreamReservations() 1538 } 1539 1540 // TODO: write h12Compare test showing whether 1541 // Request.Body is closed by the Transport, 1542 // and in multiple cases: server replies <=299 and >299 1543 // while still writing request body 1544 cc.mu.Lock() 1545 mustCloseBody := false 1546 if cs.reqBody != nil && cs.reqBodyClosed == nil { 1547 mustCloseBody = true 1548 cs.reqBodyClosed = make(chan struct{}) 1549 } 1550 bodyClosed := cs.reqBodyClosed 1551 cc.mu.Unlock() 1552 if mustCloseBody { 1553 cs.reqBody.Close() 1554 close(bodyClosed) 1555 } 1556 if bodyClosed != nil { 1557 <-bodyClosed 1558 } 1559 1560 if err != nil && cs.sentEndStream { 1561 // If the connection is closed immediately after the response is read, 1562 // we may be aborted before finishing up here. If the stream was closed 1563 // cleanly on both sides, there is no error. 1564 select { 1565 case <-cs.peerClosed: 1566 err = nil 1567 default: 1568 } 1569 } 1570 if err != nil { 1571 cs.abortStream(err) // possibly redundant, but harmless 1572 if cs.sentHeaders { 1573 if se, ok := err.(StreamError); ok { 1574 if se.Cause != errFromPeer { 1575 cc.writeStreamReset(cs.ID, se.Code, err) 1576 } 1577 } else { 1578 cc.writeStreamReset(cs.ID, ErrCodeCancel, err) 1579 } 1580 } 1581 cs.bufPipe.CloseWithError(err) // no-op if already closed 1582 } else { 1583 if cs.sentHeaders && !cs.sentEndStream { 1584 cc.writeStreamReset(cs.ID, ErrCodeNo, nil) 1585 } 1586 cs.bufPipe.CloseWithError(errRequestCanceled) 1587 } 1588 if cs.ID != 0 { 1589 cc.forgetStreamID(cs.ID) 1590 } 1591 1592 cc.wmu.Lock() 1593 werr := cc.werr 1594 cc.wmu.Unlock() 1595 if werr != nil { 1596 cc.Close() 1597 } 1598 1599 close(cs.donec) 1600 } 1601 1602 // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. 1603 // Must hold cc.mu. 1604 func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { 1605 for { 1606 cc.lastActive = time.Now() 1607 if cc.closed || !cc.canTakeNewRequestLocked() { 1608 return errClientConnUnusable 1609 } 1610 cc.lastIdle = time.Time{} 1611 if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { 1612 return nil 1613 } 1614 cc.pendingRequests++ 1615 cc.cond.Wait() 1616 cc.pendingRequests-- 1617 select { 1618 case <-cs.abort: 1619 return cs.abortErr 1620 default: 1621 } 1622 } 1623 } 1624 1625 // requires cc.wmu be held 1626 func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { 1627 first := true // first frame written (HEADERS is first, then CONTINUATION) 1628 for len(hdrs) > 0 && cc.werr == nil { 1629 chunk := hdrs 1630 if len(chunk) > maxFrameSize { 1631 chunk = chunk[:maxFrameSize] 1632 } 1633 hdrs = hdrs[len(chunk):] 1634 endHeaders := len(hdrs) == 0 1635 if first { 1636 cc.fr.WriteHeaders(HeadersFrameParam{ 1637 StreamID: streamID, 1638 BlockFragment: chunk, 1639 EndStream: endStream, 1640 EndHeaders: endHeaders, 1641 }) 1642 first = false 1643 } else { 1644 cc.fr.WriteContinuation(streamID, endHeaders, chunk) 1645 } 1646 } 1647 cc.bw.Flush() 1648 return cc.werr 1649 } 1650 1651 // internal error values; they don't escape to callers 1652 var ( 1653 // abort request body write; don't send cancel 1654 errStopReqBodyWrite = errors.New("http2: aborting request body write") 1655 1656 // abort request body write, but send stream reset of cancel. 1657 errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") 1658 1659 errReqBodyTooLong = errors.New("http2: request body larger than specified content length") 1660 ) 1661 1662 // frameScratchBufferLen returns the length of a buffer to use for 1663 // outgoing request bodies to read/write to/from. 1664 // 1665 // It returns max(1, min(peer's advertised max frame size, 1666 // Request.ContentLength+1, 512KB)). 1667 func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { 1668 const max = 512 << 10 1669 n := int64(maxFrameSize) 1670 if n > max { 1671 n = max 1672 } 1673 if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n { 1674 // Add an extra byte past the declared content-length to 1675 // give the caller's Request.Body io.Reader a chance to 1676 // give us more bytes than they declared, so we can catch it 1677 // early. 1678 n = cl + 1 1679 } 1680 if n < 1 { 1681 return 1 1682 } 1683 return int(n) // doesn't truncate; max is 512K 1684 } 1685 1686 var bufPool sync.Pool // of *[]byte 1687 1688 func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { 1689 cc := cs.cc 1690 body := cs.reqBody 1691 sentEnd := false // whether we sent the final DATA frame w/ END_STREAM 1692 1693 hasTrailers := req.Trailer != nil 1694 remainLen := cs.reqBodyContentLength 1695 hasContentLen := remainLen != -1 1696 1697 cc.mu.Lock() 1698 maxFrameSize := int(cc.maxFrameSize) 1699 cc.mu.Unlock() 1700 1701 // Scratch buffer for reading into & writing from. 1702 scratchLen := cs.frameScratchBufferLen(maxFrameSize) 1703 var buf []byte 1704 if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { 1705 defer bufPool.Put(bp) 1706 buf = *bp 1707 } else { 1708 buf = make([]byte, scratchLen) 1709 defer bufPool.Put(&buf) 1710 } 1711 1712 var sawEOF bool 1713 for !sawEOF { 1714 n, err := body.Read(buf) 1715 if hasContentLen { 1716 remainLen -= int64(n) 1717 if remainLen == 0 && err == nil { 1718 // The request body's Content-Length was predeclared and 1719 // we just finished reading it all, but the underlying io.Reader 1720 // returned the final chunk with a nil error (which is one of 1721 // the two valid things a Reader can do at EOF). Because we'd prefer 1722 // to send the END_STREAM bit early, double-check that we're actually 1723 // at EOF. Subsequent reads should return (0, EOF) at this point. 1724 // If either value is different, we return an error in one of two ways below. 1725 var scratch [1]byte 1726 var n1 int 1727 n1, err = body.Read(scratch[:]) 1728 remainLen -= int64(n1) 1729 } 1730 if remainLen < 0 { 1731 err = errReqBodyTooLong 1732 return err 1733 } 1734 } 1735 if err != nil { 1736 cc.mu.Lock() 1737 bodyClosed := cs.reqBodyClosed != nil 1738 cc.mu.Unlock() 1739 switch { 1740 case bodyClosed: 1741 return errStopReqBodyWrite 1742 case err == io.EOF: 1743 sawEOF = true 1744 err = nil 1745 default: 1746 return err 1747 } 1748 } 1749 1750 remain := buf[:n] 1751 for len(remain) > 0 && err == nil { 1752 var allowed int32 1753 allowed, err = cs.awaitFlowControl(len(remain)) 1754 if err != nil { 1755 return err 1756 } 1757 cc.wmu.Lock() 1758 data := remain[:allowed] 1759 remain = remain[allowed:] 1760 sentEnd = sawEOF && len(remain) == 0 && !hasTrailers 1761 err = cc.fr.WriteData(cs.ID, sentEnd, data) 1762 if err == nil { 1763 // TODO(bradfitz): this flush is for latency, not bandwidth. 1764 // Most requests won't need this. Make this opt-in or 1765 // opt-out? Use some heuristic on the body type? Nagel-like 1766 // timers? Based on 'n'? Only last chunk of this for loop, 1767 // unless flow control tokens are low? For now, always. 1768 // If we change this, see comment below. 1769 err = cc.bw.Flush() 1770 } 1771 cc.wmu.Unlock() 1772 } 1773 if err != nil { 1774 return err 1775 } 1776 } 1777 1778 if sentEnd { 1779 // Already sent END_STREAM (which implies we have no 1780 // trailers) and flushed, because currently all 1781 // WriteData frames above get a flush. So we're done. 1782 return nil 1783 } 1784 1785 // Since the RoundTrip contract permits the caller to "mutate or reuse" 1786 // a request after the Response's Body is closed, verify that this hasn't 1787 // happened before accessing the trailers. 1788 cc.mu.Lock() 1789 trailer := req.Trailer 1790 err = cs.abortErr 1791 cc.mu.Unlock() 1792 if err != nil { 1793 return err 1794 } 1795 1796 cc.wmu.Lock() 1797 defer cc.wmu.Unlock() 1798 var trls []byte 1799 if len(trailer) > 0 { 1800 trls, err = cc.encodeTrailers(trailer) 1801 if err != nil { 1802 return err 1803 } 1804 } 1805 1806 // Two ways to send END_STREAM: either with trailers, or 1807 // with an empty DATA frame. 1808 if len(trls) > 0 { 1809 err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) 1810 } else { 1811 err = cc.fr.WriteData(cs.ID, true, nil) 1812 } 1813 if ferr := cc.bw.Flush(); ferr != nil && err == nil { 1814 err = ferr 1815 } 1816 return err 1817 } 1818 1819 // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow 1820 // control tokens from the server. 1821 // It returns either the non-zero number of tokens taken or an error 1822 // if the stream is dead. 1823 func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { 1824 cc := cs.cc 1825 ctx := cs.ctx 1826 cc.mu.Lock() 1827 defer cc.mu.Unlock() 1828 for { 1829 if cc.closed { 1830 return 0, errClientConnClosed 1831 } 1832 if cs.reqBodyClosed != nil { 1833 return 0, errStopReqBodyWrite 1834 } 1835 select { 1836 case <-cs.abort: 1837 return 0, cs.abortErr 1838 case <-ctx.Done(): 1839 return 0, ctx.Err() 1840 case <-cs.reqCancel: 1841 return 0, errRequestCanceled 1842 default: 1843 } 1844 if a := cs.flow.available(); a > 0 { 1845 take := a 1846 if int(take) > maxBytes { 1847 1848 take = int32(maxBytes) // can't truncate int; take is int32 1849 } 1850 if take > int32(cc.maxFrameSize) { 1851 take = int32(cc.maxFrameSize) 1852 } 1853 cs.flow.take(take) 1854 return take, nil 1855 } 1856 cc.cond.Wait() 1857 } 1858 } 1859 1860 var errNilRequestURL = errors.New("http2: Request.URI is nil") 1861 1862 // requires cc.wmu be held. 1863 func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { 1864 cc.hbuf.Reset() 1865 if req.URL == nil { 1866 return nil, errNilRequestURL 1867 } 1868 1869 host := req.Host 1870 if host == "" { 1871 host = req.URL.Host 1872 } 1873 host, err := httpguts.PunycodeHostPort(host) 1874 if err != nil { 1875 return nil, err 1876 } 1877 1878 var path string 1879 if req.Method != "CONNECT" { 1880 path = req.URL.RequestURI() 1881 if !validPseudoPath(path) { 1882 orig := path 1883 path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) 1884 if !validPseudoPath(path) { 1885 if req.URL.Opaque != "" { 1886 return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) 1887 } else { 1888 return nil, fmt.Errorf("invalid request :path %q", orig) 1889 } 1890 } 1891 } 1892 } 1893 1894 // Check for any invalid headers and return an error before we 1895 // potentially pollute our hpack state. (We want to be able to 1896 // continue to reuse the hpack encoder for future requests) 1897 for k, vv := range req.Header { 1898 if !httpguts.ValidHeaderFieldName(k) { 1899 return nil, fmt.Errorf("invalid HTTP header name %q", k) 1900 } 1901 for _, v := range vv { 1902 if !httpguts.ValidHeaderFieldValue(v) { 1903 // Don't include the value in the error, because it may be sensitive. 1904 return nil, fmt.Errorf("invalid HTTP header value for header %q", k) 1905 } 1906 } 1907 } 1908 1909 enumerateHeaders := func(f func(name, value string)) { 1910 // 8.1.2.3 Request Pseudo-Header Fields 1911 // The :path pseudo-header field includes the path and query parts of the 1912 // target URI (the path-absolute production and optionally a '?' character 1913 // followed by the query production (see Sections 3.3 and 3.4 of 1914 // [RFC3986]). 1915 m := req.Method 1916 if m == "" { 1917 m = http.MethodGet 1918 } 1919 1920 // follow based on pseudo header order 1921 for _, p := range req.PseudoOrder.Order { 1922 switch p { 1923 case ":authority": 1924 f(":authority", host) 1925 case ":method": 1926 f(":method", req.Method) 1927 case ":path": 1928 if req.Method != "CONNECT" { 1929 f(":path", path) 1930 } 1931 case ":scheme": 1932 if req.Method != "CONNECT" { 1933 f(":scheme", req.URL.Scheme) 1934 } 1935 1936 default: 1937 continue 1938 } 1939 } 1940 1941 if len(req.PseudoOrder.Order) == 0 { 1942 f(":method", m) 1943 f(":authority", host) 1944 1945 if req.Method != "CONNECT" { 1946 f(":scheme", req.URL.Scheme) 1947 f(":path", path) 1948 } 1949 } 1950 1951 if trailers != "" { 1952 f("trailer", trailers) 1953 } 1954 1955 kvs, _ := req.Header.SortedKeyValues(make(map[string]bool), req.HeaderOrder) 1956 1957 var didUA bool 1958 for _, kv := range kvs { 1959 if asciiEqualFold(kv.Key, "host") || asciiEqualFold(kv.Key, "content-length") { 1960 // Host is :authority, already sent. 1961 // Content-Length is automatic, set below. 1962 continue 1963 } else if asciiEqualFold(kv.Key, "connection") || 1964 asciiEqualFold(kv.Key, "proxy-connection") || 1965 asciiEqualFold(kv.Key, "transfer-encoding") || 1966 asciiEqualFold(kv.Key, "upgrade") || 1967 asciiEqualFold(kv.Key, "keep-alive") { 1968 // Per 8.1.2.2 Connection-Specific Header 1969 // Fields, don't send connection-specific 1970 // fields. We have already checked if any 1971 // are error-worthy so just ignore the rest. 1972 continue 1973 } else if asciiEqualFold(kv.Key, "user-agent") { 1974 // Match Go's http1 behavior: at most one 1975 // User-Agent. If set to nil or empty string, 1976 // then omit it. Otherwise if not mentioned, 1977 // include the default (below). 1978 didUA = true 1979 if len(kv.Values) < 1 { 1980 continue 1981 } 1982 1983 kv.Values = kv.Values[:1] 1984 if kv.Values[0] == "" { 1985 continue 1986 } 1987 } else if asciiEqualFold(kv.Key, "cookie") { 1988 // Per 8.1.2.5 To allow for better compression efficiency, the 1989 // Cookie header field MAY be split into separate header fields, 1990 // each with one or more cookie-pairs. 1991 for _, v := range kv.Values { 1992 for { 1993 p := strings.IndexByte(v, ';') 1994 if p < 0 { 1995 break 1996 } 1997 f("cookie", v[:p]) 1998 p++ 1999 // strip space after semicolon if any. 2000 for p+1 <= len(v) && v[p] == ' ' { 2001 p++ 2002 } 2003 v = v[p:] 2004 } 2005 if len(v) > 0 { 2006 f("cookie", v) 2007 } 2008 } 2009 continue 2010 } 2011 2012 for _, v := range kv.Values { 2013 f(kv.Key, v) 2014 } 2015 } 2016 2017 if !didUA { 2018 f("user-agent", defaultUserAgent) 2019 } 2020 } 2021 2022 // Do a first pass over the headers counting bytes to ensure 2023 // we don't exceed cc.peerMaxHeaderListSize. This is done as a 2024 // separate pass before encoding the headers to prevent 2025 // modifying the hpack state. 2026 hlSize := uint64(0) 2027 enumerateHeaders(func(name, value string) { 2028 hf := hpack.HeaderField{Name: name, Value: value} 2029 hlSize += uint64(hf.Size()) 2030 }) 2031 2032 if hlSize > cc.peerMaxHeaderListSize { 2033 return nil, errRequestHeaderListSize 2034 } 2035 2036 trace := httptrace.ContextClientTrace(req.Context()) 2037 traceHeaders := traceHasWroteHeaderField(trace) 2038 2039 // Header list size is ok. Write the headers. 2040 enumerateHeaders(func(name, value string) { 2041 name, ascii := lowerHeader(name) 2042 if !ascii { 2043 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 2044 // field names have to be ASCII characters (just as in HTTP/1.x). 2045 return 2046 } 2047 cc.writeHeader(name, value) 2048 if traceHeaders { 2049 traceWroteHeaderField(trace, name, value) 2050 } 2051 }) 2052 2053 return cc.hbuf.Bytes(), nil 2054 } 2055 2056 // requires cc.wmu be held. 2057 func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { 2058 cc.hbuf.Reset() 2059 2060 hlSize := uint64(0) 2061 for k, vv := range trailer { 2062 for _, v := range vv { 2063 hf := hpack.HeaderField{Name: k, Value: v} 2064 hlSize += uint64(hf.Size()) 2065 } 2066 } 2067 if hlSize > cc.peerMaxHeaderListSize { 2068 return nil, errRequestHeaderListSize 2069 } 2070 2071 for k, vv := range trailer { 2072 lowKey, ascii := lowerHeader(k) 2073 if !ascii { 2074 // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header 2075 // field names have to be ASCII characters (just as in HTTP/1.x). 2076 continue 2077 } 2078 // Transfer-Encoding, etc.. have already been filtered at the 2079 // start of RoundTrip 2080 for _, v := range vv { 2081 cc.writeHeader(lowKey, v) 2082 } 2083 } 2084 return cc.hbuf.Bytes(), nil 2085 } 2086 2087 func (cc *ClientConn) writeHeader(name, value string) { 2088 if VerboseLogs { 2089 log.Printf("http2: Transport encoding header %q = %q", name, value) 2090 } 2091 cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) 2092 } 2093 2094 type resAndError struct { 2095 _ incomparable 2096 res *http.Response 2097 err error 2098 } 2099 2100 // requires cc.mu be held. 2101 func (cc *ClientConn) addStreamLocked(cs *clientStream) { 2102 cs.flow.add(int32(cc.initialWindowSize)) 2103 cs.flow.setConnFlow(&cc.flow) 2104 cs.inflow.init(transportDefaultStreamFlow) 2105 cs.ID = cc.nextStreamID 2106 cc.nextStreamID += 2 2107 cc.streams[cs.ID] = cs 2108 if cs.ID == 0 { 2109 panic("assigned stream ID 0") 2110 } 2111 } 2112 2113 func (cc *ClientConn) forgetStreamID(id uint32) { 2114 cc.mu.Lock() 2115 slen := len(cc.streams) 2116 delete(cc.streams, id) 2117 if len(cc.streams) != slen-1 { 2118 panic("forgetting unknown stream id") 2119 } 2120 cc.lastActive = time.Now() 2121 if len(cc.streams) == 0 && cc.idleTimer != nil { 2122 cc.idleTimer.Reset(cc.idleTimeout) 2123 cc.lastIdle = time.Now() 2124 } 2125 // Wake up writeRequestBody via clientStream.awaitFlowControl and 2126 // wake up RoundTrip if there is a pending request. 2127 cc.cond.Broadcast() 2128 2129 closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil 2130 if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { 2131 if VerboseLogs { 2132 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) 2133 } 2134 cc.closed = true 2135 defer cc.closeConn() 2136 } 2137 2138 cc.mu.Unlock() 2139 } 2140 2141 // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. 2142 type clientConnReadLoop struct { 2143 _ incomparable 2144 cc *ClientConn 2145 } 2146 2147 // readLoop runs in its own goroutine and reads and dispatches frames. 2148 func (cc *ClientConn) readLoop() { 2149 rl := &clientConnReadLoop{cc: cc} 2150 defer rl.cleanup() 2151 cc.readerErr = rl.run() 2152 if ce, ok := cc.readerErr.(ConnectionError); ok { 2153 cc.wmu.Lock() 2154 cc.fr.WriteGoAway(0, ErrCode(ce), nil) 2155 cc.wmu.Unlock() 2156 } 2157 } 2158 2159 // GoAwayError is returned by the Transport when the server closes the 2160 // TCP connection after sending a GOAWAY frame. 2161 type GoAwayError struct { 2162 LastStreamID uint32 2163 ErrCode ErrCode 2164 DebugData string 2165 } 2166 2167 func (e GoAwayError) Error() string { 2168 return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", 2169 e.LastStreamID, e.ErrCode, e.DebugData) 2170 } 2171 2172 func isEOFOrNetReadError(err error) bool { 2173 if err == io.EOF { 2174 return true 2175 } 2176 ne, ok := err.(*net.OpError) 2177 return ok && ne.Op == "read" 2178 } 2179 2180 func (rl *clientConnReadLoop) cleanup() { 2181 cc := rl.cc 2182 cc.t.connPool().MarkDead(cc) 2183 defer cc.closeConn() 2184 defer close(cc.readerDone) 2185 2186 if cc.idleTimer != nil { 2187 cc.idleTimer.Stop() 2188 } 2189 2190 // Close any response bodies if the server closes prematurely. 2191 // TODO: also do this if we've written the headers but not 2192 // gotten a response yet. 2193 err := cc.readerErr 2194 cc.mu.Lock() 2195 if cc.goAway != nil && isEOFOrNetReadError(err) { 2196 err = GoAwayError{ 2197 LastStreamID: cc.goAway.LastStreamID, 2198 ErrCode: cc.goAway.ErrCode, 2199 DebugData: cc.goAwayDebug, 2200 } 2201 } else if err == io.EOF { 2202 err = io.ErrUnexpectedEOF 2203 } 2204 cc.closed = true 2205 2206 for _, cs := range cc.streams { 2207 select { 2208 case <-cs.peerClosed: 2209 // The server closed the stream before closing the conn, 2210 // so no need to interrupt it. 2211 default: 2212 cs.abortStreamLocked(err) 2213 } 2214 } 2215 cc.cond.Broadcast() 2216 cc.mu.Unlock() 2217 } 2218 2219 // countReadFrameError calls Transport.CountError with a string 2220 // representing err. 2221 func (cc *ClientConn) countReadFrameError(err error) { 2222 f := cc.t.CountError 2223 if f == nil || err == nil { 2224 return 2225 } 2226 if ce, ok := err.(ConnectionError); ok { 2227 errCode := ErrCode(ce) 2228 f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken())) 2229 return 2230 } 2231 if errors.Is(err, io.EOF) { 2232 f("read_frame_eof") 2233 return 2234 } 2235 if errors.Is(err, io.ErrUnexpectedEOF) { 2236 f("read_frame_unexpected_eof") 2237 return 2238 } 2239 if errors.Is(err, ErrFrameTooLarge) { 2240 f("read_frame_too_large") 2241 return 2242 } 2243 f("read_frame_other") 2244 } 2245 2246 func (rl *clientConnReadLoop) run() error { 2247 cc := rl.cc 2248 gotSettings := false 2249 readIdleTimeout := cc.t.ReadIdleTimeout 2250 var t *time.Timer 2251 if readIdleTimeout != 0 { 2252 t = time.AfterFunc(readIdleTimeout, cc.healthCheck) 2253 defer t.Stop() 2254 } 2255 for { 2256 f, err := cc.fr.ReadFrame() 2257 if t != nil { 2258 t.Reset(readIdleTimeout) 2259 } 2260 if err != nil { 2261 cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) 2262 } 2263 if se, ok := err.(StreamError); ok { 2264 if cs := rl.streamByID(se.StreamID); cs != nil { 2265 if se.Cause == nil { 2266 se.Cause = cc.fr.errDetail 2267 } 2268 rl.endStreamError(cs, se) 2269 } 2270 continue 2271 } else if err != nil { 2272 cc.countReadFrameError(err) 2273 return err 2274 } 2275 if VerboseLogs { 2276 cc.vlogf("http2: Transport received %s", summarizeFrame(f)) 2277 } 2278 if !gotSettings { 2279 if _, ok := f.(*SettingsFrame); !ok { 2280 cc.logf("protocol error: received %T before a SETTINGS frame", f) 2281 return ConnectionError(ErrCodeProtocol) 2282 } 2283 gotSettings = true 2284 } 2285 2286 switch f := f.(type) { 2287 case *MetaHeadersFrame: 2288 err = rl.processHeaders(f) 2289 case *DataFrame: 2290 err = rl.processData(f) 2291 case *GoAwayFrame: 2292 err = rl.processGoAway(f) 2293 case *RSTStreamFrame: 2294 err = rl.processResetStream(f) 2295 case *SettingsFrame: 2296 err = rl.processSettings(f) 2297 case *PushPromiseFrame: 2298 err = rl.processPushPromise(f) 2299 case *WindowUpdateFrame: 2300 err = rl.processWindowUpdate(f) 2301 case *PingFrame: 2302 err = rl.processPing(f) 2303 default: 2304 cc.logf("Transport: unhandled response frame type %T", f) 2305 } 2306 if err != nil { 2307 if VerboseLogs { 2308 cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) 2309 } 2310 return err 2311 } 2312 } 2313 } 2314 2315 func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { 2316 cs := rl.streamByID(f.StreamID) 2317 if cs == nil { 2318 // We'd get here if we canceled a request while the 2319 // server had its response still in flight. So if this 2320 // was just something we canceled, ignore it. 2321 return nil 2322 } 2323 if cs.readClosed { 2324 rl.endStreamError(cs, StreamError{ 2325 StreamID: f.StreamID, 2326 Code: ErrCodeProtocol, 2327 Cause: errors.New("protocol error: headers after END_STREAM"), 2328 }) 2329 return nil 2330 } 2331 if !cs.firstByte { 2332 if cs.trace != nil { 2333 // TODO(bradfitz): move first response byte earlier, 2334 // when we first read the 9 byte header, not waiting 2335 // until all the HEADERS+CONTINUATION frames have been 2336 // merged. This works for now. 2337 traceFirstResponseByte(cs.trace) 2338 } 2339 cs.firstByte = true 2340 } 2341 if !cs.pastHeaders { 2342 cs.pastHeaders = true 2343 } else { 2344 return rl.processTrailers(cs, f) 2345 } 2346 2347 res, err := rl.handleResponse(cs, f) 2348 if err != nil { 2349 if _, ok := err.(ConnectionError); ok { 2350 return err 2351 } 2352 // Any other error type is a stream error. 2353 rl.endStreamError(cs, StreamError{ 2354 StreamID: f.StreamID, 2355 Code: ErrCodeProtocol, 2356 Cause: err, 2357 }) 2358 return nil // return nil from process* funcs to keep conn alive 2359 } 2360 if res == nil { 2361 // (nil, nil) special case. See handleResponse docs. 2362 return nil 2363 } 2364 cs.resTrailer = &res.Trailer 2365 cs.res = res 2366 close(cs.respHeaderRecv) 2367 if f.StreamEnded() { 2368 rl.endStream(cs) 2369 } 2370 return nil 2371 } 2372 2373 // may return error types nil, or ConnectionError. Any other error value 2374 // is a StreamError of type ErrCodeProtocol. The returned error in that case 2375 // is the detail. 2376 // 2377 // As a special case, handleResponse may return (nil, nil) to skip the 2378 // frame (currently only used for 1xx responses). 2379 func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { 2380 if f.Truncated { 2381 return nil, errResponseHeaderListSize 2382 } 2383 2384 status := f.PseudoValue("status") 2385 if status == "" { 2386 return nil, errors.New("malformed response from server: missing status pseudo header") 2387 } 2388 statusCode, err := strconv.Atoi(status) 2389 if err != nil { 2390 return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") 2391 } 2392 2393 regularFields := f.RegularFields() 2394 strs := make([]string, len(regularFields)) 2395 header := make(http.Header, len(regularFields)) 2396 res := &http.Response{ 2397 Proto: "HTTP/2.0", 2398 ProtoMajor: 2, 2399 Header: header, 2400 StatusCode: statusCode, 2401 Status: status + " " + http.StatusText(statusCode), 2402 } 2403 for _, hf := range regularFields { 2404 key := canonicalHeader(hf.Name) 2405 if key == "Trailer" { 2406 t := res.Trailer 2407 if t == nil { 2408 t = make(http.Header) 2409 res.Trailer = t 2410 } 2411 foreachHeaderElement(hf.Value, func(v string) { 2412 t[canonicalHeader(v)] = nil 2413 }) 2414 } else { 2415 vv := header[key] 2416 if vv == nil && len(strs) > 0 { 2417 // More than likely this will be a single-element key. 2418 // Most headers aren't multi-valued. 2419 // Set the capacity on strs[0] to 1, so any future append 2420 // won't extend the slice into the other strings. 2421 vv, strs = strs[:1:1], strs[1:] 2422 vv[0] = hf.Value 2423 header[key] = vv 2424 } else { 2425 header[key] = append(vv, hf.Value) 2426 } 2427 } 2428 } 2429 2430 if statusCode >= 100 && statusCode <= 199 { 2431 if f.StreamEnded() { 2432 return nil, errors.New("1xx informational response with END_STREAM flag") 2433 } 2434 cs.num1xx++ 2435 const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http 2436 if cs.num1xx > max1xxResponses { 2437 return nil, errors.New("http2: too many 1xx informational responses") 2438 } 2439 if fn := cs.get1xxTraceFunc(); fn != nil { 2440 if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { 2441 return nil, err 2442 } 2443 } 2444 if statusCode == 100 { 2445 traceGot100Continue(cs.trace) 2446 select { 2447 case cs.on100 <- struct{}{}: 2448 default: 2449 } 2450 } 2451 cs.pastHeaders = false // do it all again 2452 return nil, nil 2453 } 2454 2455 res.ContentLength = -1 2456 if clens := res.Header["Content-Length"]; len(clens) == 1 { 2457 if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { 2458 res.ContentLength = int64(cl) 2459 } else { 2460 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2461 // more safe smuggling-wise to ignore. 2462 } 2463 } else if len(clens) > 1 { 2464 // TODO: care? unlike http/1, it won't mess up our framing, so it's 2465 // more safe smuggling-wise to ignore. 2466 } else if f.StreamEnded() && !cs.isHead { 2467 res.ContentLength = 0 2468 } 2469 2470 if cs.isHead { 2471 res.Body = noBody 2472 return res, nil 2473 } 2474 2475 if f.StreamEnded() { 2476 if res.ContentLength > 0 { 2477 res.Body = missingBody{} 2478 } else { 2479 res.Body = noBody 2480 } 2481 return res, nil 2482 } 2483 2484 cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength}) 2485 cs.bytesRemain = res.ContentLength 2486 res.Body = transportResponseBody{cs} 2487 2488 if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") { 2489 res.Header.Del("Content-Encoding") 2490 res.Header.Del("Content-Length") 2491 res.ContentLength = -1 2492 res.Body = &gzipReader{body: res.Body} 2493 res.Uncompressed = true 2494 } 2495 return res, nil 2496 } 2497 2498 func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { 2499 if cs.pastTrailers { 2500 // Too many HEADERS frames for this stream. 2501 return ConnectionError(ErrCodeProtocol) 2502 } 2503 cs.pastTrailers = true 2504 if !f.StreamEnded() { 2505 // We expect that any headers for trailers also 2506 // has END_STREAM. 2507 return ConnectionError(ErrCodeProtocol) 2508 } 2509 if len(f.PseudoFields()) > 0 { 2510 // No pseudo header fields are defined for trailers. 2511 // TODO: ConnectionError might be overly harsh? Check. 2512 return ConnectionError(ErrCodeProtocol) 2513 } 2514 2515 trailer := make(http.Header) 2516 for _, hf := range f.RegularFields() { 2517 key := canonicalHeader(hf.Name) 2518 trailer[key] = append(trailer[key], hf.Value) 2519 } 2520 cs.trailer = trailer 2521 2522 rl.endStream(cs) 2523 return nil 2524 } 2525 2526 // transportResponseBody is the concrete type of Transport.RoundTrip's 2527 // Response.Body. It is an io.ReadCloser. 2528 type transportResponseBody struct { 2529 cs *clientStream 2530 } 2531 2532 func (b transportResponseBody) Read(p []byte) (n int, err error) { 2533 cs := b.cs 2534 cc := cs.cc 2535 2536 if cs.readErr != nil { 2537 return 0, cs.readErr 2538 } 2539 n, err = b.cs.bufPipe.Read(p) 2540 if cs.bytesRemain != -1 { 2541 if int64(n) > cs.bytesRemain { 2542 n = int(cs.bytesRemain) 2543 if err == nil { 2544 err = errors.New("net/http: server replied with more than declared Content-Length; truncated") 2545 cs.abortStream(err) 2546 } 2547 cs.readErr = err 2548 return int(cs.bytesRemain), err 2549 } 2550 cs.bytesRemain -= int64(n) 2551 if err == io.EOF && cs.bytesRemain > 0 { 2552 err = io.ErrUnexpectedEOF 2553 cs.readErr = err 2554 return n, err 2555 } 2556 } 2557 if n == 0 { 2558 // No flow control tokens to send back. 2559 return 2560 } 2561 2562 cc.mu.Lock() 2563 connAdd := cc.inflow.add(n) 2564 var streamAdd int32 2565 if err == nil { // No need to refresh if the stream is over or failed. 2566 streamAdd = cs.inflow.add(n) 2567 } 2568 cc.mu.Unlock() 2569 2570 if connAdd != 0 || streamAdd != 0 { 2571 cc.wmu.Lock() 2572 defer cc.wmu.Unlock() 2573 if connAdd != 0 { 2574 cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) 2575 } 2576 if streamAdd != 0 { 2577 cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) 2578 } 2579 cc.bw.Flush() 2580 } 2581 return 2582 } 2583 2584 var errClosedResponseBody = errors.New("http2: response body closed") 2585 2586 func (b transportResponseBody) Close() error { 2587 cs := b.cs 2588 cc := cs.cc 2589 2590 unread := cs.bufPipe.Len() 2591 if unread > 0 { 2592 cc.mu.Lock() 2593 // Return connection-level flow control. 2594 connAdd := cc.inflow.add(unread) 2595 cc.mu.Unlock() 2596 2597 // TODO(dneil): Acquiring this mutex can block indefinitely. 2598 // Move flow control return to a goroutine? 2599 cc.wmu.Lock() 2600 // Return connection-level flow control. 2601 if connAdd > 0 { 2602 cc.fr.WriteWindowUpdate(0, uint32(connAdd)) 2603 } 2604 cc.bw.Flush() 2605 cc.wmu.Unlock() 2606 } 2607 2608 cs.bufPipe.BreakWithError(errClosedResponseBody) 2609 cs.abortStream(errClosedResponseBody) 2610 2611 select { 2612 case <-cs.donec: 2613 case <-cs.ctx.Done(): 2614 // See golang/go#49366: The net/http package can cancel the 2615 // request context after the response body is fully read. 2616 // Don't treat this as an error. 2617 return nil 2618 case <-cs.reqCancel: 2619 return errRequestCanceled 2620 } 2621 return nil 2622 } 2623 2624 func (rl *clientConnReadLoop) processData(f *DataFrame) error { 2625 cc := rl.cc 2626 cs := rl.streamByID(f.StreamID) 2627 data := f.Data() 2628 if cs == nil { 2629 cc.mu.Lock() 2630 neverSent := cc.nextStreamID 2631 cc.mu.Unlock() 2632 if f.StreamID >= neverSent { 2633 // We never asked for this. 2634 cc.logf("http2: Transport received unsolicited DATA frame; closing connection") 2635 return ConnectionError(ErrCodeProtocol) 2636 } 2637 // We probably did ask for this, but canceled. Just ignore it. 2638 // TODO: be stricter here? only silently ignore things which 2639 // we canceled, but not things which were closed normally 2640 // by the peer? Tough without accumulating too much state. 2641 2642 // But at least return their flow control: 2643 if f.Length > 0 { 2644 cc.mu.Lock() 2645 ok := cc.inflow.take(f.Length) 2646 connAdd := cc.inflow.add(int(f.Length)) 2647 cc.mu.Unlock() 2648 if !ok { 2649 return ConnectionError(ErrCodeFlowControl) 2650 } 2651 if connAdd > 0 { 2652 cc.wmu.Lock() 2653 cc.fr.WriteWindowUpdate(0, uint32(connAdd)) 2654 cc.bw.Flush() 2655 cc.wmu.Unlock() 2656 } 2657 } 2658 return nil 2659 } 2660 if cs.readClosed { 2661 cc.logf("protocol error: received DATA after END_STREAM") 2662 rl.endStreamError(cs, StreamError{ 2663 StreamID: f.StreamID, 2664 Code: ErrCodeProtocol, 2665 }) 2666 return nil 2667 } 2668 if !cs.firstByte { 2669 cc.logf("protocol error: received DATA before a HEADERS frame") 2670 rl.endStreamError(cs, StreamError{ 2671 StreamID: f.StreamID, 2672 Code: ErrCodeProtocol, 2673 }) 2674 return nil 2675 } 2676 if f.Length > 0 { 2677 if cs.isHead && len(data) > 0 { 2678 cc.logf("protocol error: received DATA on a HEAD request") 2679 rl.endStreamError(cs, StreamError{ 2680 StreamID: f.StreamID, 2681 Code: ErrCodeProtocol, 2682 }) 2683 return nil 2684 } 2685 // Check connection-level flow control. 2686 cc.mu.Lock() 2687 if !takeInflows(&cc.inflow, &cs.inflow, f.Length) { 2688 cc.mu.Unlock() 2689 return ConnectionError(ErrCodeFlowControl) 2690 } 2691 // Return any padded flow control now, since we won't 2692 // refund it later on body reads. 2693 var refund int 2694 if pad := int(f.Length) - len(data); pad > 0 { 2695 refund += pad 2696 } 2697 2698 didReset := false 2699 var err error 2700 if len(data) > 0 { 2701 if _, err = cs.bufPipe.Write(data); err != nil { 2702 // Return len(data) now if the stream is already closed, 2703 // since data will never be read. 2704 didReset = true 2705 refund += len(data) 2706 } 2707 } 2708 2709 sendConn := cc.inflow.add(refund) 2710 var sendStream int32 2711 if !didReset { 2712 sendStream = cs.inflow.add(refund) 2713 } 2714 cc.mu.Unlock() 2715 2716 if sendConn > 0 || sendStream > 0 { 2717 cc.wmu.Lock() 2718 if sendConn > 0 { 2719 cc.fr.WriteWindowUpdate(0, uint32(sendConn)) 2720 } 2721 if sendStream > 0 { 2722 cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream)) 2723 } 2724 cc.bw.Flush() 2725 cc.wmu.Unlock() 2726 } 2727 2728 if err != nil { 2729 rl.endStreamError(cs, err) 2730 return nil 2731 } 2732 } 2733 2734 if f.StreamEnded() { 2735 rl.endStream(cs) 2736 } 2737 return nil 2738 } 2739 2740 func (rl *clientConnReadLoop) endStream(cs *clientStream) { 2741 // TODO: check that any declared content-length matches, like 2742 // server.go's (*stream).endStream method. 2743 if !cs.readClosed { 2744 cs.readClosed = true 2745 // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a 2746 // race condition: The caller can read io.EOF from Response.Body 2747 // and close the body before we close cs.peerClosed, causing 2748 // cleanupWriteRequest to send a RST_STREAM. 2749 rl.cc.mu.Lock() 2750 defer rl.cc.mu.Unlock() 2751 cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers) 2752 close(cs.peerClosed) 2753 } 2754 } 2755 2756 func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { 2757 cs.readAborted = true 2758 cs.abortStream(err) 2759 } 2760 2761 func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { 2762 rl.cc.mu.Lock() 2763 defer rl.cc.mu.Unlock() 2764 cs := rl.cc.streams[id] 2765 if cs != nil && !cs.readAborted { 2766 return cs 2767 } 2768 return nil 2769 } 2770 2771 func (cs *clientStream) copyTrailers() { 2772 for k, vv := range cs.trailer { 2773 t := cs.resTrailer 2774 if *t == nil { 2775 *t = make(http.Header) 2776 } 2777 (*t)[k] = vv 2778 } 2779 } 2780 2781 func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { 2782 cc := rl.cc 2783 cc.t.connPool().MarkDead(cc) 2784 if f.ErrCode != 0 { 2785 // TODO: deal with GOAWAY more. particularly the error code 2786 cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) 2787 if fn := cc.t.CountError; fn != nil { 2788 fn("recv_goaway_" + f.ErrCode.stringToken()) 2789 } 2790 } 2791 cc.setGoAway(f) 2792 return nil 2793 } 2794 2795 func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { 2796 cc := rl.cc 2797 // Locking both mu and wmu here allows frame encoding to read settings with only wmu held. 2798 // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless. 2799 cc.wmu.Lock() 2800 defer cc.wmu.Unlock() 2801 2802 if err := rl.processSettingsNoWrite(f); err != nil { 2803 return err 2804 } 2805 if !f.IsAck() { 2806 cc.fr.WriteSettingsAck() 2807 cc.bw.Flush() 2808 } 2809 return nil 2810 } 2811 2812 func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { 2813 cc := rl.cc 2814 cc.mu.Lock() 2815 defer cc.mu.Unlock() 2816 2817 if f.IsAck() { 2818 if cc.wantSettingsAck { 2819 cc.wantSettingsAck = false 2820 return nil 2821 } 2822 return ConnectionError(ErrCodeProtocol) 2823 } 2824 2825 var seenMaxConcurrentStreams bool 2826 err := f.ForeachSetting(func(s Setting) error { 2827 switch s.ID { 2828 case SettingMaxFrameSize: 2829 cc.maxFrameSize = s.Val 2830 case SettingMaxConcurrentStreams: 2831 cc.maxConcurrentStreams = s.Val 2832 seenMaxConcurrentStreams = true 2833 case SettingMaxHeaderListSize: 2834 cc.peerMaxHeaderListSize = uint64(s.Val) 2835 case SettingInitialWindowSize: 2836 // Values above the maximum flow-control 2837 // window size of 2^31-1 MUST be treated as a 2838 // connection error (Section 5.4.1) of type 2839 // FLOW_CONTROL_ERROR. 2840 if s.Val > math.MaxInt32 { 2841 return ConnectionError(ErrCodeFlowControl) 2842 } 2843 2844 // Adjust flow control of currently-open 2845 // frames by the difference of the old initial 2846 // window size and this one. 2847 delta := int32(s.Val) - int32(cc.initialWindowSize) 2848 for _, cs := range cc.streams { 2849 cs.flow.add(delta) 2850 } 2851 cc.cond.Broadcast() 2852 2853 cc.initialWindowSize = s.Val 2854 case SettingHeaderTableSize: 2855 cc.henc.SetMaxDynamicTableSize(s.Val) 2856 cc.peerMaxHeaderTableSize = s.Val 2857 default: 2858 cc.vlogf("Unhandled Setting: %v", s) 2859 } 2860 return nil 2861 }) 2862 if err != nil { 2863 return err 2864 } 2865 2866 if !cc.seenSettings { 2867 if !seenMaxConcurrentStreams { 2868 // This was the servers initial SETTINGS frame and it 2869 // didn't contain a MAX_CONCURRENT_STREAMS field so 2870 // increase the number of concurrent streams this 2871 // connection can establish to our default. 2872 cc.maxConcurrentStreams = defaultMaxConcurrentStreams 2873 } 2874 cc.seenSettings = true 2875 } 2876 2877 return nil 2878 } 2879 2880 func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { 2881 cc := rl.cc 2882 cs := rl.streamByID(f.StreamID) 2883 if f.StreamID != 0 && cs == nil { 2884 return nil 2885 } 2886 2887 cc.mu.Lock() 2888 defer cc.mu.Unlock() 2889 2890 fl := &cc.flow 2891 if cs != nil { 2892 fl = &cs.flow 2893 } 2894 if !fl.add(int32(f.Increment)) { 2895 return ConnectionError(ErrCodeFlowControl) 2896 } 2897 cc.cond.Broadcast() 2898 return nil 2899 } 2900 2901 func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { 2902 cs := rl.streamByID(f.StreamID) 2903 if cs == nil { 2904 // TODO: return error if server tries to RST_STREAM an idle stream 2905 return nil 2906 } 2907 serr := streamError(cs.ID, f.ErrCode) 2908 serr.Cause = errFromPeer 2909 if f.ErrCode == ErrCodeProtocol { 2910 rl.cc.SetDoNotReuse() 2911 } 2912 if fn := cs.cc.t.CountError; fn != nil { 2913 fn("recv_rststream_" + f.ErrCode.stringToken()) 2914 } 2915 cs.abortStream(serr) 2916 2917 cs.bufPipe.CloseWithError(serr) 2918 return nil 2919 } 2920 2921 // Ping sends a PING frame to the server and waits for the ack. 2922 func (cc *ClientConn) Ping(ctx context.Context) error { 2923 c := make(chan struct{}) 2924 // Generate a random payload 2925 var p [8]byte 2926 for { 2927 if _, err := rand.Read(p[:]); err != nil { 2928 return err 2929 } 2930 cc.mu.Lock() 2931 // check for dup before insert 2932 if _, found := cc.pings[p]; !found { 2933 cc.pings[p] = c 2934 cc.mu.Unlock() 2935 break 2936 } 2937 cc.mu.Unlock() 2938 } 2939 errc := make(chan error, 1) 2940 go func() { 2941 cc.wmu.Lock() 2942 defer cc.wmu.Unlock() 2943 if err := cc.fr.WritePing(false, p); err != nil { 2944 errc <- err 2945 return 2946 } 2947 if err := cc.bw.Flush(); err != nil { 2948 errc <- err 2949 return 2950 } 2951 }() 2952 select { 2953 case <-c: 2954 return nil 2955 case err := <-errc: 2956 return err 2957 case <-ctx.Done(): 2958 return ctx.Err() 2959 case <-cc.readerDone: 2960 // connection closed 2961 return cc.readerErr 2962 } 2963 } 2964 2965 func (rl *clientConnReadLoop) processPing(f *PingFrame) error { 2966 if f.IsAck() { 2967 cc := rl.cc 2968 cc.mu.Lock() 2969 defer cc.mu.Unlock() 2970 // If ack, notify listener if any 2971 if c, ok := cc.pings[f.Data]; ok { 2972 close(c) 2973 delete(cc.pings, f.Data) 2974 } 2975 return nil 2976 } 2977 cc := rl.cc 2978 cc.wmu.Lock() 2979 defer cc.wmu.Unlock() 2980 if err := cc.fr.WritePing(true, f.Data); err != nil { 2981 return err 2982 } 2983 return cc.bw.Flush() 2984 } 2985 2986 func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { 2987 // We told the peer we don't want them. 2988 // Spec says: 2989 // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH 2990 // setting of the peer endpoint is set to 0. An endpoint that 2991 // has set this setting and has received acknowledgement MUST 2992 // treat the receipt of a PUSH_PROMISE frame as a connection 2993 // error (Section 5.4.1) of type PROTOCOL_ERROR." 2994 return ConnectionError(ErrCodeProtocol) 2995 } 2996 2997 func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { 2998 // TODO: map err to more interesting error codes, once the 2999 // HTTP community comes up with some. But currently for 3000 // RST_STREAM there's no equivalent to GOAWAY frame's debug 3001 // data, and the error codes are all pretty vague ("cancel"). 3002 cc.wmu.Lock() 3003 cc.fr.WriteRSTStream(streamID, code) 3004 cc.bw.Flush() 3005 cc.wmu.Unlock() 3006 } 3007 3008 var ( 3009 errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") 3010 errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") 3011 ) 3012 3013 func (cc *ClientConn) logf(format string, args ...interface{}) { 3014 cc.t.logf(format, args...) 3015 } 3016 3017 func (cc *ClientConn) vlogf(format string, args ...interface{}) { 3018 cc.t.vlogf(format, args...) 3019 } 3020 3021 func (t *Transport) vlogf(format string, args ...interface{}) { 3022 if VerboseLogs { 3023 t.logf(format, args...) 3024 } 3025 } 3026 3027 func (t *Transport) logf(format string, args ...interface{}) { 3028 log.Printf(format, args...) 3029 } 3030 3031 var noBody io.ReadCloser = noBodyReader{} 3032 3033 type noBodyReader struct{} 3034 3035 func (noBodyReader) Close() error { return nil } 3036 func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } 3037 3038 type missingBody struct{} 3039 3040 func (missingBody) Close() error { return nil } 3041 func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF } 3042 3043 func strSliceContains(ss []string, s string) bool { 3044 for _, v := range ss { 3045 if v == s { 3046 return true 3047 } 3048 } 3049 return false 3050 } 3051 3052 type erringRoundTripper struct{ err error } 3053 3054 func (rt erringRoundTripper) RoundTripErr() error { return rt.err } 3055 func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } 3056 3057 // gzipReader wraps a response body so it can lazily 3058 // call gzip.NewReader on the first call to Read 3059 type gzipReader struct { 3060 _ incomparable 3061 body io.ReadCloser // underlying Response.Body 3062 zr *gzip.Reader // lazily-initialized gzip reader 3063 zerr error // sticky error 3064 } 3065 3066 func (gz *gzipReader) Read(p []byte) (n int, err error) { 3067 if gz.zerr != nil { 3068 return 0, gz.zerr 3069 } 3070 if gz.zr == nil { 3071 gz.zr, err = gzip.NewReader(gz.body) 3072 if err != nil { 3073 gz.zerr = err 3074 return 0, err 3075 } 3076 } 3077 return gz.zr.Read(p) 3078 } 3079 3080 func (gz *gzipReader) Close() error { 3081 if err := gz.body.Close(); err != nil { 3082 return err 3083 } 3084 gz.zerr = fs.ErrClosed 3085 return nil 3086 } 3087 3088 type errorReader struct{ err error } 3089 3090 func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } 3091 3092 // isConnectionCloseRequest reports whether req should use its own 3093 // connection for a single request and then close the connection. 3094 func isConnectionCloseRequest(req *http.Request) bool { 3095 return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") 3096 } 3097 3098 // registerHTTPSProtocol calls Transport.RegisterProtocol but 3099 // converting panics into errors. 3100 func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { 3101 defer func() { 3102 if e := recover(); e != nil { 3103 err = fmt.Errorf("%v", e) 3104 } 3105 }() 3106 t.RegisterProtocol("https", rt) 3107 return nil 3108 } 3109 3110 // noDialH2RoundTripper is a RoundTripper which only tries to complete the request 3111 // if there's already has a cached connection to the host. 3112 // (The field is exported so it can be accessed via reflect from net/http; tested 3113 // by TestNoDialH2RoundTripperType) 3114 type noDialH2RoundTripper struct{ *Transport } 3115 3116 func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 3117 res, err := rt.Transport.RoundTrip(req) 3118 if isNoCachedConnError(err) { 3119 return nil, http.ErrSkipAltProtocol 3120 } 3121 return res, err 3122 } 3123 3124 func (t *Transport) idleConnTimeout() time.Duration { 3125 if t.t1 != nil { 3126 return t.t1.IdleConnTimeout 3127 } 3128 return 0 3129 } 3130 3131 func traceGetConn(req *http.Request, hostPort string) { 3132 trace := httptrace.ContextClientTrace(req.Context()) 3133 if trace == nil || trace.GetConn == nil { 3134 return 3135 } 3136 trace.GetConn(hostPort) 3137 } 3138 3139 func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { 3140 trace := httptrace.ContextClientTrace(req.Context()) 3141 if trace == nil || trace.GotConn == nil { 3142 return 3143 } 3144 ci := httptrace.GotConnInfo{Conn: cc.tconn} 3145 ci.Reused = reused 3146 cc.mu.Lock() 3147 ci.WasIdle = len(cc.streams) == 0 && reused 3148 if ci.WasIdle && !cc.lastActive.IsZero() { 3149 ci.IdleTime = time.Since(cc.lastActive) 3150 } 3151 cc.mu.Unlock() 3152 3153 trace.GotConn(ci) 3154 } 3155 3156 func traceWroteHeaders(trace *httptrace.ClientTrace) { 3157 if trace != nil && trace.WroteHeaders != nil { 3158 trace.WroteHeaders() 3159 } 3160 } 3161 3162 func traceGot100Continue(trace *httptrace.ClientTrace) { 3163 if trace != nil && trace.Got100Continue != nil { 3164 trace.Got100Continue() 3165 } 3166 } 3167 3168 func traceWait100Continue(trace *httptrace.ClientTrace) { 3169 if trace != nil && trace.Wait100Continue != nil { 3170 trace.Wait100Continue() 3171 } 3172 } 3173 3174 func traceWroteRequest(trace *httptrace.ClientTrace, err error) { 3175 if trace != nil && trace.WroteRequest != nil { 3176 trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) 3177 } 3178 } 3179 3180 func traceFirstResponseByte(trace *httptrace.ClientTrace) { 3181 if trace != nil && trace.GotFirstResponseByte != nil { 3182 trace.GotFirstResponseByte() 3183 } 3184 }