github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/internal/transport/http2_client.go (about) 1 /* 2 * 3 * Copyright 2014 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package transport 20 21 import ( 22 "context" 23 "fmt" 24 "io" 25 "math" 26 "net" 27 "path/filepath" 28 "strconv" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "time" 33 34 http "github.com/hxx258456/ccgo/gmhttp" 35 36 "github.com/hxx258456/ccgo/grpc/codes" 37 "github.com/hxx258456/ccgo/grpc/credentials" 38 "github.com/hxx258456/ccgo/grpc/internal/channelz" 39 icredentials "github.com/hxx258456/ccgo/grpc/internal/credentials" 40 "github.com/hxx258456/ccgo/grpc/internal/grpcutil" 41 imetadata "github.com/hxx258456/ccgo/grpc/internal/metadata" 42 "github.com/hxx258456/ccgo/grpc/internal/syscall" 43 "github.com/hxx258456/ccgo/grpc/internal/transport/networktype" 44 "github.com/hxx258456/ccgo/grpc/keepalive" 45 "github.com/hxx258456/ccgo/grpc/metadata" 46 "github.com/hxx258456/ccgo/grpc/peer" 47 "github.com/hxx258456/ccgo/grpc/resolver" 48 "github.com/hxx258456/ccgo/grpc/stats" 49 "github.com/hxx258456/ccgo/grpc/status" 50 "github.com/hxx258456/ccgo/net/http2" 51 "github.com/hxx258456/ccgo/net/http2/hpack" 52 ) 53 54 // clientConnectionCounter counts the number of connections a client has 55 // initiated (equal to the number of http2Clients created). Must be accessed 56 // atomically. 57 var clientConnectionCounter uint64 58 59 // http2Client implements the ClientTransport interface with HTTP2. 60 type http2Client struct { 61 lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. 62 ctx context.Context 63 cancel context.CancelFunc 64 ctxDone <-chan struct{} // Cache the ctx.Done() chan. 65 userAgent string 66 md metadata.MD 67 conn net.Conn // underlying communication channel 68 loopy *loopyWriter 69 remoteAddr net.Addr 70 localAddr net.Addr 71 authInfo credentials.AuthInfo // auth info about the connection 72 73 readerDone chan struct{} // sync point to enable testing. 74 writerDone chan struct{} // sync point to enable testing. 75 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) 76 // that the server sent GoAway on this transport. 77 goAway chan struct{} 78 79 framer *framer 80 // controlBuf delivers all the control related tasks (e.g., window 81 // updates, reset streams, and various settings) to the controller. 82 controlBuf *controlBuffer 83 fc *trInFlow 84 // The scheme used: https if TLS is on, http otherwise. 85 scheme string 86 87 isSecure bool 88 89 perRPCCreds []credentials.PerRPCCredentials 90 91 kp keepalive.ClientParameters 92 keepaliveEnabled bool 93 94 statsHandler stats.Handler 95 96 initialWindowSize int32 97 98 // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE 99 maxSendHeaderListSize *uint32 100 101 bdpEst *bdpEstimator 102 // onPrefaceReceipt is a callback that client transport calls upon 103 // receiving server preface to signal that a succefull HTTP2 104 // connection was established. 105 onPrefaceReceipt func() 106 107 maxConcurrentStreams uint32 108 streamQuota int64 109 streamsQuotaAvailable chan struct{} 110 waitingStreams uint32 111 nextID uint32 112 113 mu sync.Mutex // guard the following variables 114 state transportState 115 activeStreams map[uint32]*Stream 116 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. 117 prevGoAwayID uint32 118 // goAwayReason records the http2.ErrCode and debug data received with the 119 // GoAway frame. 120 goAwayReason GoAwayReason 121 // goAwayDebugMessage contains a detailed human readable string about a 122 // GoAway frame, useful for error messages. 123 goAwayDebugMessage string 124 // A condition variable used to signal when the keepalive goroutine should 125 // go dormant. The condition for dormancy is based on the number of active 126 // streams and the `PermitWithoutStream` keepalive client parameter. And 127 // since the number of active streams is guarded by the above mutex, we use 128 // the same for this condition variable as well. 129 kpDormancyCond *sync.Cond 130 // A boolean to track whether the keepalive goroutine is dormant or not. 131 // This is checked before attempting to signal the above condition 132 // variable. 133 kpDormant bool 134 135 // Fields below are for channelz metric collection. 136 channelzID int64 // channelz unique identification number 137 czData *channelzData 138 139 onGoAway func(GoAwayReason) 140 onClose func() 141 142 bufferPool *bufferPool 143 144 connectionID uint64 145 } 146 147 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { 148 address := addr.Addr 149 networkType, ok := networktype.Get(addr) 150 if fn != nil { 151 // Special handling for unix scheme with custom dialer. Back in the day, 152 // we did not have a unix resolver and therefore targets with a unix 153 // scheme would end up using the passthrough resolver. So, user's used a 154 // custom dialer in this case and expected the original dial target to 155 // be passed to the custom dialer. Now, we have a unix resolver. But if 156 // a custom dialer is specified, we want to retain the old behavior in 157 // terms of the address being passed to the custom dialer. 158 if networkType == "unix" && !strings.HasPrefix(address, "\x00") { 159 // Supported unix targets are either "unix://absolute-path" or 160 // "unix:relative-path". 161 if filepath.IsAbs(address) { 162 return fn(ctx, "unix://"+address) 163 } 164 return fn(ctx, "unix:"+address) 165 } 166 return fn(ctx, address) 167 } 168 if !ok { 169 networkType, address = parseDialTarget(address) 170 } 171 if networkType == "tcp" && useProxy { 172 return proxyDial(ctx, address, grpcUA) 173 } 174 return (&net.Dialer{}).DialContext(ctx, networkType, address) 175 } 176 177 func isTemporary(err error) bool { 178 switch err := err.(type) { 179 case interface { 180 Temporary() bool 181 }: 182 return err.Temporary() 183 case interface { 184 Timeout() bool 185 }: 186 // Timeouts may be resolved upon retry, and are thus treated as 187 // temporary. 188 return err.Timeout() 189 } 190 return true 191 } 192 193 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 194 // and starts to receive messages on it. Non-nil error returns if construction 195 // fails. 196 func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { 197 scheme := "http" 198 ctx, cancel := context.WithCancel(ctx) 199 defer func() { 200 if err != nil { 201 cancel() 202 } 203 }() 204 205 // gRPC, resolver, balancer etc. can specify arbitrary data in the 206 // Attributes field of resolver.Address, which is shoved into connectCtx 207 // and passed to the dialer and credential handshaker. This makes it possible for 208 // address specific arbitrary data to reach custom dialers and credential handshakers. 209 connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) 210 211 conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) 212 if err != nil { 213 if opts.FailOnNonTempDialError { 214 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) 215 } 216 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) 217 } 218 // Any further errors will close the underlying connection 219 defer func(conn net.Conn) { 220 if err != nil { 221 conn.Close() 222 } 223 }(conn) 224 kp := opts.KeepaliveParams 225 // Validate keepalive parameters. 226 if kp.Time == 0 { 227 kp.Time = defaultClientKeepaliveTime 228 } 229 if kp.Timeout == 0 { 230 kp.Timeout = defaultClientKeepaliveTimeout 231 } 232 keepaliveEnabled := false 233 if kp.Time != infinity { 234 if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { 235 return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) 236 } 237 keepaliveEnabled = true 238 } 239 var ( 240 isSecure bool 241 authInfo credentials.AuthInfo 242 ) 243 transportCreds := opts.TransportCredentials 244 perRPCCreds := opts.PerRPCCredentials 245 246 if b := opts.CredsBundle; b != nil { 247 if t := b.TransportCredentials(); t != nil { 248 transportCreds = t 249 } 250 if t := b.PerRPCCredentials(); t != nil { 251 perRPCCreds = append(perRPCCreds, t) 252 } 253 } 254 if transportCreds != nil { 255 rawConn := conn 256 // Pull the deadline from the connectCtx, which will be used for 257 // timeouts in the authentication protocol handshake. Can ignore the 258 // boolean as the deadline will return the zero value, which will make 259 // the conn not timeout on I/O operations. 260 deadline, _ := connectCtx.Deadline() 261 rawConn.SetDeadline(deadline) 262 conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) 263 rawConn.SetDeadline(time.Time{}) 264 if err != nil { 265 return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) 266 } 267 for _, cd := range perRPCCreds { 268 if cd.RequireTransportSecurity() { 269 if ci, ok := authInfo.(interface { 270 GetCommonAuthInfo() credentials.CommonAuthInfo 271 }); ok { 272 secLevel := ci.GetCommonAuthInfo().SecurityLevel 273 if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { 274 return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") 275 } 276 } 277 } 278 } 279 isSecure = true 280 if transportCreds.Info().SecurityProtocol == "tls" { 281 scheme = "https" 282 } 283 } 284 dynamicWindow := true 285 icwz := int32(initialWindowSize) 286 if opts.InitialConnWindowSize >= defaultWindowSize { 287 icwz = opts.InitialConnWindowSize 288 dynamicWindow = false 289 } 290 writeBufSize := opts.WriteBufferSize 291 readBufSize := opts.ReadBufferSize 292 maxHeaderListSize := defaultClientMaxHeaderListSize 293 if opts.MaxHeaderListSize != nil { 294 maxHeaderListSize = *opts.MaxHeaderListSize 295 } 296 t := &http2Client{ 297 ctx: ctx, 298 ctxDone: ctx.Done(), // Cache Done chan. 299 cancel: cancel, 300 userAgent: opts.UserAgent, 301 conn: conn, 302 remoteAddr: conn.RemoteAddr(), 303 localAddr: conn.LocalAddr(), 304 authInfo: authInfo, 305 readerDone: make(chan struct{}), 306 writerDone: make(chan struct{}), 307 goAway: make(chan struct{}), 308 framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), 309 fc: &trInFlow{limit: uint32(icwz)}, 310 scheme: scheme, 311 activeStreams: make(map[uint32]*Stream), 312 isSecure: isSecure, 313 perRPCCreds: perRPCCreds, 314 kp: kp, 315 statsHandler: opts.StatsHandler, 316 initialWindowSize: initialWindowSize, 317 onPrefaceReceipt: onPrefaceReceipt, 318 nextID: 1, 319 maxConcurrentStreams: defaultMaxStreamsClient, 320 streamQuota: defaultMaxStreamsClient, 321 streamsQuotaAvailable: make(chan struct{}, 1), 322 czData: new(channelzData), 323 onGoAway: onGoAway, 324 onClose: onClose, 325 keepaliveEnabled: keepaliveEnabled, 326 bufferPool: newBufferPool(), 327 } 328 329 if md, ok := addr.Metadata.(*metadata.MD); ok { 330 t.md = *md 331 } else if md := imetadata.Get(addr); md != nil { 332 t.md = md 333 } 334 t.controlBuf = newControlBuffer(t.ctxDone) 335 if opts.InitialWindowSize >= defaultWindowSize { 336 t.initialWindowSize = opts.InitialWindowSize 337 dynamicWindow = false 338 } 339 if dynamicWindow { 340 t.bdpEst = &bdpEstimator{ 341 bdp: initialWindowSize, 342 updateFlowControl: t.updateFlowControl, 343 } 344 } 345 if t.statsHandler != nil { 346 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ 347 RemoteAddr: t.remoteAddr, 348 LocalAddr: t.localAddr, 349 }) 350 connBegin := &stats.ConnBegin{ 351 Client: true, 352 } 353 t.statsHandler.HandleConn(t.ctx, connBegin) 354 } 355 if channelz.IsOn() { 356 t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) 357 } 358 if t.keepaliveEnabled { 359 t.kpDormancyCond = sync.NewCond(&t.mu) 360 go t.keepalive() 361 } 362 // Start the reader goroutine for incoming message. Each transport has 363 // a dedicated goroutine which reads HTTP2 frame from network. Then it 364 // dispatches the frame to the corresponding stream entity. 365 go t.reader() 366 367 // Send connection preface to server. 368 n, err := t.conn.Write(clientPreface) 369 if err != nil { 370 err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) 371 t.Close(err) 372 return nil, err 373 } 374 if n != len(clientPreface) { 375 err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) 376 t.Close(err) 377 return nil, err 378 } 379 var ss []http2.Setting 380 381 if t.initialWindowSize != defaultWindowSize { 382 ss = append(ss, http2.Setting{ 383 ID: http2.SettingInitialWindowSize, 384 Val: uint32(t.initialWindowSize), 385 }) 386 } 387 if opts.MaxHeaderListSize != nil { 388 ss = append(ss, http2.Setting{ 389 ID: http2.SettingMaxHeaderListSize, 390 Val: *opts.MaxHeaderListSize, 391 }) 392 } 393 err = t.framer.fr.WriteSettings(ss...) 394 if err != nil { 395 err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) 396 t.Close(err) 397 return nil, err 398 } 399 // Adjust the connection flow control window if needed. 400 if delta := uint32(icwz - defaultWindowSize); delta > 0 { 401 if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { 402 err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) 403 t.Close(err) 404 return nil, err 405 } 406 } 407 408 t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) 409 410 if err := t.framer.writer.Flush(); err != nil { 411 return nil, err 412 } 413 go func() { 414 t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) 415 err := t.loopy.run() 416 if err != nil { 417 if logger.V(logLevel) { 418 logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) 419 } 420 } 421 // Do not close the transport. Let reader goroutine handle it since 422 // there might be data in the buffers. 423 t.conn.Close() 424 t.controlBuf.finish() 425 close(t.writerDone) 426 }() 427 return t, nil 428 } 429 430 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { 431 // TODO(zhaoq): Handle uint32 overflow of Stream.id. 432 s := &Stream{ 433 ct: t, 434 done: make(chan struct{}), 435 method: callHdr.Method, 436 sendCompress: callHdr.SendCompress, 437 buf: newRecvBuffer(), 438 headerChan: make(chan struct{}), 439 contentSubtype: callHdr.ContentSubtype, 440 doneFunc: callHdr.DoneFunc, 441 } 442 s.wq = newWriteQuota(defaultWriteQuota, s.done) 443 s.requestRead = func(n int) { 444 t.adjustWindow(s, uint32(n)) 445 } 446 // The client side stream context should have exactly the same life cycle with the user provided context. 447 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. 448 // So we use the original context here instead of creating a copy. 449 s.ctx = ctx 450 s.trReader = &transportReader{ 451 reader: &recvBufferReader{ 452 ctx: s.ctx, 453 ctxDone: s.ctx.Done(), 454 recv: s.buf, 455 closeStream: func(err error) { 456 t.CloseStream(s, err) 457 }, 458 freeBuffer: t.bufferPool.put, 459 }, 460 windowHandler: func(n int) { 461 t.updateWindow(s, uint32(n)) 462 }, 463 } 464 return s 465 } 466 467 func (t *http2Client) getPeer() *peer.Peer { 468 return &peer.Peer{ 469 Addr: t.remoteAddr, 470 AuthInfo: t.authInfo, 471 } 472 } 473 474 func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { 475 aud := t.createAudience(callHdr) 476 ri := credentials.RequestInfo{ 477 Method: callHdr.Method, 478 AuthInfo: t.authInfo, 479 } 480 ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) 481 authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) 482 if err != nil { 483 return nil, err 484 } 485 callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) 486 if err != nil { 487 return nil, err 488 } 489 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields 490 // first and create a slice of that exact size. 491 // Make the slice of certain predictable size to reduce allocations made by append. 492 hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te 493 hfLen += len(authData) + len(callAuthData) 494 headerFields := make([]hpack.HeaderField, 0, hfLen) 495 headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) 496 headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) 497 headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) 498 headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) 499 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) 500 headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) 501 headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) 502 if callHdr.PreviousAttempts > 0 { 503 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) 504 } 505 506 if callHdr.SendCompress != "" { 507 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) 508 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) 509 } 510 if dl, ok := ctx.Deadline(); ok { 511 // Send out timeout regardless its value. The server can detect timeout context by itself. 512 // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. 513 timeout := time.Until(dl) 514 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) 515 } 516 for k, v := range authData { 517 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 518 } 519 for k, v := range callAuthData { 520 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 521 } 522 if b := stats.OutgoingTags(ctx); b != nil { 523 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) 524 } 525 if b := stats.OutgoingTrace(ctx); b != nil { 526 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) 527 } 528 529 if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { 530 var k string 531 for k, vv := range md { 532 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. 533 if isReservedHeader(k) { 534 continue 535 } 536 for _, v := range vv { 537 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 538 } 539 } 540 for _, vv := range added { 541 for i, v := range vv { 542 if i%2 == 0 { 543 k = strings.ToLower(v) 544 continue 545 } 546 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. 547 if isReservedHeader(k) { 548 continue 549 } 550 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 551 } 552 } 553 } 554 for k, vv := range t.md { 555 if isReservedHeader(k) { 556 continue 557 } 558 for _, v := range vv { 559 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 560 } 561 } 562 return headerFields, nil 563 } 564 565 func (t *http2Client) createAudience(callHdr *CallHdr) string { 566 // Create an audience string only if needed. 567 if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { 568 return "" 569 } 570 // Construct URI required to get auth request metadata. 571 // Omit port if it is the default one. 572 host := strings.TrimSuffix(callHdr.Host, ":443") 573 pos := strings.LastIndex(callHdr.Method, "/") 574 if pos == -1 { 575 pos = len(callHdr.Method) 576 } 577 return "https://" + host + callHdr.Method[:pos] 578 } 579 580 func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { 581 if len(t.perRPCCreds) == 0 { 582 return nil, nil 583 } 584 authData := map[string]string{} 585 for _, c := range t.perRPCCreds { 586 data, err := c.GetRequestMetadata(ctx, audience) 587 if err != nil { 588 if _, ok := status.FromError(err); ok { 589 return nil, err 590 } 591 592 return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) 593 } 594 for k, v := range data { 595 // Capital header names are illegal in HTTP/2. 596 k = strings.ToLower(k) 597 authData[k] = v 598 } 599 } 600 return authData, nil 601 } 602 603 func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { 604 var callAuthData map[string]string 605 // Check if credentials.PerRPCCredentials were provided via call options. 606 // Note: if these credentials are provided both via dial options and call 607 // options, then both sets of credentials will be applied. 608 if callCreds := callHdr.Creds; callCreds != nil { 609 if callCreds.RequireTransportSecurity() { 610 ri, _ := credentials.RequestInfoFromContext(ctx) 611 if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { 612 return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") 613 } 614 } 615 data, err := callCreds.GetRequestMetadata(ctx, audience) 616 if err != nil { 617 return nil, status.Errorf(codes.Internal, "transport: %v", err) 618 } 619 callAuthData = make(map[string]string, len(data)) 620 for k, v := range data { 621 // Capital header names are illegal in HTTP/2 622 k = strings.ToLower(k) 623 callAuthData[k] = v 624 } 625 } 626 return callAuthData, nil 627 } 628 629 // NewStreamError wraps an error and reports additional information. Typically 630 // NewStream errors result in transparent retry, as they mean nothing went onto 631 // the wire. However, there are two notable exceptions: 632 // 633 // 1. If the stream headers violate the max header list size allowed by the 634 // server. In this case there is no reason to retry at all, as it is 635 // assumed the RPC would continue to fail on subsequent attempts. 636 // 2. If the credentials errored when requesting their headers. In this case, 637 // it's possible a retry can fix the problem, but indefinitely transparently 638 // retrying is not appropriate as it is likely the credentials, if they can 639 // eventually succeed, would need I/O to do so. 640 type NewStreamError struct { 641 Err error 642 643 DoNotRetry bool 644 DoNotTransparentRetry bool 645 } 646 647 func (e NewStreamError) Error() string { 648 return e.Err.Error() 649 } 650 651 // NewStream creates a stream and registers it into the transport as "active" 652 // streams. All non-nil errors returned will be *NewStreamError. 653 func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { 654 ctx = peer.NewContext(ctx, t.getPeer()) 655 headerFields, err := t.createHeaderFields(ctx, callHdr) 656 if err != nil { 657 return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} 658 } 659 s := t.newStream(ctx, callHdr) 660 cleanup := func(err error) { 661 if s.swapState(streamDone) == streamDone { 662 // If it was already done, return. 663 return 664 } 665 // The stream was unprocessed by the server. 666 atomic.StoreUint32(&s.unprocessed, 1) 667 s.write(recvMsg{err: err}) 668 close(s.done) 669 // If headerChan isn't closed, then close it. 670 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 671 close(s.headerChan) 672 } 673 } 674 hdr := &headerFrame{ 675 hf: headerFields, 676 endStream: false, 677 initStream: func(id uint32) error { 678 t.mu.Lock() 679 if state := t.state; state != reachable { 680 t.mu.Unlock() 681 // Do a quick cleanup. 682 err := error(errStreamDrain) 683 if state == closing { 684 err = ErrConnClosing 685 } 686 cleanup(err) 687 return err 688 } 689 t.activeStreams[id] = s 690 if channelz.IsOn() { 691 atomic.AddInt64(&t.czData.streamsStarted, 1) 692 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) 693 } 694 // If the keepalive goroutine has gone dormant, wake it up. 695 if t.kpDormant { 696 t.kpDormancyCond.Signal() 697 } 698 t.mu.Unlock() 699 return nil 700 }, 701 onOrphaned: cleanup, 702 wq: s.wq, 703 } 704 firstTry := true 705 var ch chan struct{} 706 checkForStreamQuota := func(it interface{}) bool { 707 if t.streamQuota <= 0 { // Can go negative if server decreases it. 708 if firstTry { 709 t.waitingStreams++ 710 } 711 ch = t.streamsQuotaAvailable 712 return false 713 } 714 if !firstTry { 715 t.waitingStreams-- 716 } 717 t.streamQuota-- 718 h := it.(*headerFrame) 719 h.streamID = t.nextID 720 t.nextID += 2 721 s.id = h.streamID 722 s.fc = &inFlow{limit: uint32(t.initialWindowSize)} 723 if t.streamQuota > 0 && t.waitingStreams > 0 { 724 select { 725 case t.streamsQuotaAvailable <- struct{}{}: 726 default: 727 } 728 } 729 return true 730 } 731 var hdrListSizeErr error 732 checkForHeaderListSize := func(it interface{}) bool { 733 if t.maxSendHeaderListSize == nil { 734 return true 735 } 736 hdrFrame := it.(*headerFrame) 737 var sz int64 738 for _, f := range hdrFrame.hf { 739 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { 740 hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) 741 return false 742 } 743 } 744 return true 745 } 746 for { 747 success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { 748 if !checkForStreamQuota(it) { 749 return false 750 } 751 if !checkForHeaderListSize(it) { 752 return false 753 } 754 return true 755 }, hdr) 756 if err != nil { 757 return nil, &NewStreamError{Err: err} 758 } 759 if success { 760 break 761 } 762 if hdrListSizeErr != nil { 763 return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} 764 } 765 firstTry = false 766 select { 767 case <-ch: 768 case <-ctx.Done(): 769 return nil, &NewStreamError{Err: ContextErr(ctx.Err())} 770 case <-t.goAway: 771 return nil, &NewStreamError{Err: errStreamDrain} 772 case <-t.ctx.Done(): 773 return nil, &NewStreamError{Err: ErrConnClosing} 774 } 775 } 776 if t.statsHandler != nil { 777 header, ok := metadata.FromOutgoingContext(ctx) 778 if ok { 779 header.Set("user-agent", t.userAgent) 780 } else { 781 header = metadata.Pairs("user-agent", t.userAgent) 782 } 783 // Note: The header fields are compressed with hpack after this call returns. 784 // No WireLength field is set here. 785 outHeader := &stats.OutHeader{ 786 Client: true, 787 FullMethod: callHdr.Method, 788 RemoteAddr: t.remoteAddr, 789 LocalAddr: t.localAddr, 790 Compression: callHdr.SendCompress, 791 Header: header, 792 } 793 t.statsHandler.HandleRPC(s.ctx, outHeader) 794 } 795 return s, nil 796 } 797 798 // CloseStream clears the footprint of a stream when the stream is not needed any more. 799 // This must not be executed in reader's goroutine. 800 func (t *http2Client) CloseStream(s *Stream, err error) { 801 var ( 802 rst bool 803 rstCode http2.ErrCode 804 ) 805 if err != nil { 806 rst = true 807 rstCode = http2.ErrCodeCancel 808 } 809 t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) 810 } 811 812 func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { 813 // Set stream status to done. 814 if s.swapState(streamDone) == streamDone { 815 // If it was already done, return. If multiple closeStream calls 816 // happen simultaneously, wait for the first to finish. 817 <-s.done 818 return 819 } 820 // status and trailers can be updated here without any synchronization because the stream goroutine will 821 // only read it after it sees an io.EOF error from read or write and we'll write those errors 822 // only after updating this. 823 s.status = st 824 if len(mdata) > 0 { 825 s.trailer = mdata 826 } 827 if err != nil { 828 // This will unblock reads eventually. 829 s.write(recvMsg{err: err}) 830 } 831 // If headerChan isn't closed, then close it. 832 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 833 s.noHeaders = true 834 close(s.headerChan) 835 } 836 cleanup := &cleanupStream{ 837 streamID: s.id, 838 onWrite: func() { 839 t.mu.Lock() 840 if t.activeStreams != nil { 841 delete(t.activeStreams, s.id) 842 } 843 t.mu.Unlock() 844 if channelz.IsOn() { 845 if eosReceived { 846 atomic.AddInt64(&t.czData.streamsSucceeded, 1) 847 } else { 848 atomic.AddInt64(&t.czData.streamsFailed, 1) 849 } 850 } 851 }, 852 rst: rst, 853 rstCode: rstCode, 854 } 855 addBackStreamQuota := func(interface{}) bool { 856 t.streamQuota++ 857 if t.streamQuota > 0 && t.waitingStreams > 0 { 858 select { 859 case t.streamsQuotaAvailable <- struct{}{}: 860 default: 861 } 862 } 863 return true 864 } 865 t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) 866 // This will unblock write. 867 close(s.done) 868 if s.doneFunc != nil { 869 s.doneFunc() 870 } 871 } 872 873 // Close kicks off the shutdown process of the transport. This should be called 874 // only once on a transport. Once it is called, the transport should not be 875 // accessed any more. 876 // 877 // This method blocks until the addrConn that initiated this transport is 878 // re-connected. This happens because t.onClose() begins reconnect logic at the 879 // addrConn level and blocks until the addrConn is successfully connected. 880 func (t *http2Client) Close(err error) { 881 t.mu.Lock() 882 // Make sure we only Close once. 883 if t.state == closing { 884 t.mu.Unlock() 885 return 886 } 887 // Call t.onClose before setting the state to closing to prevent the client 888 // from attempting to create new streams ASAP. 889 t.onClose() 890 t.state = closing 891 streams := t.activeStreams 892 t.activeStreams = nil 893 if t.kpDormant { 894 // If the keepalive goroutine is blocked on this condition variable, we 895 // should unblock it so that the goroutine eventually exits. 896 t.kpDormancyCond.Signal() 897 } 898 t.mu.Unlock() 899 t.controlBuf.finish() 900 t.cancel() 901 t.conn.Close() 902 if channelz.IsOn() { 903 channelz.RemoveEntry(t.channelzID) 904 } 905 // Append info about previous goaways if there were any, since this may be important 906 // for understanding the root cause for this connection to be closed. 907 _, goAwayDebugMessage := t.GetGoAwayReason() 908 909 var st *status.Status 910 if len(goAwayDebugMessage) > 0 { 911 st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) 912 err = st.Err() 913 } else { 914 st = status.New(codes.Unavailable, err.Error()) 915 } 916 917 // Notify all active streams. 918 for _, s := range streams { 919 t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) 920 } 921 if t.statsHandler != nil { 922 connEnd := &stats.ConnEnd{ 923 Client: true, 924 } 925 t.statsHandler.HandleConn(t.ctx, connEnd) 926 } 927 } 928 929 // GracefulClose sets the state to draining, which prevents new streams from 930 // being created and causes the transport to be closed when the last active 931 // stream is closed. If there are no active streams, the transport is closed 932 // immediately. This does nothing if the transport is already draining or 933 // closing. 934 func (t *http2Client) GracefulClose() { 935 t.mu.Lock() 936 // Make sure we move to draining only from active. 937 if t.state == draining || t.state == closing { 938 t.mu.Unlock() 939 return 940 } 941 t.state = draining 942 active := len(t.activeStreams) 943 t.mu.Unlock() 944 if active == 0 { 945 t.Close(ErrConnClosing) 946 return 947 } 948 t.controlBuf.put(&incomingGoAway{}) 949 } 950 951 // Write formats the data into HTTP2 data frame(s) and sends it out. The caller 952 // should proceed only if Write returns nil. 953 func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { 954 if opts.Last { 955 // If it's the last message, update stream state. 956 if !s.compareAndSwapState(streamActive, streamWriteDone) { 957 return errStreamDone 958 } 959 } else if s.getState() != streamActive { 960 return errStreamDone 961 } 962 df := &dataFrame{ 963 streamID: s.id, 964 endStream: opts.Last, 965 h: hdr, 966 d: data, 967 } 968 if hdr != nil || data != nil { // If it's not an empty data frame, check quota. 969 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { 970 return err 971 } 972 } 973 return t.controlBuf.put(df) 974 } 975 976 func (t *http2Client) getStream(f http2.Frame) *Stream { 977 t.mu.Lock() 978 s := t.activeStreams[f.Header().StreamID] 979 t.mu.Unlock() 980 return s 981 } 982 983 // adjustWindow sends out extra window update over the initial window size 984 // of stream if the application is requesting data larger in size than 985 // the window. 986 func (t *http2Client) adjustWindow(s *Stream, n uint32) { 987 if w := s.fc.maybeAdjust(n); w > 0 { 988 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) 989 } 990 } 991 992 // updateWindow adjusts the inbound quota for the stream. 993 // Window updates will be sent out when the cumulative quota 994 // exceeds the corresponding threshold. 995 func (t *http2Client) updateWindow(s *Stream, n uint32) { 996 if w := s.fc.onRead(n); w > 0 { 997 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) 998 } 999 } 1000 1001 // updateFlowControl updates the incoming flow control windows 1002 // for the transport and the stream based on the current bdp 1003 // estimation. 1004 func (t *http2Client) updateFlowControl(n uint32) { 1005 t.mu.Lock() 1006 for _, s := range t.activeStreams { 1007 s.fc.newLimit(n) 1008 } 1009 t.mu.Unlock() 1010 updateIWS := func(interface{}) bool { 1011 t.initialWindowSize = int32(n) 1012 return true 1013 } 1014 t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) 1015 t.controlBuf.put(&outgoingSettings{ 1016 ss: []http2.Setting{ 1017 { 1018 ID: http2.SettingInitialWindowSize, 1019 Val: n, 1020 }, 1021 }, 1022 }) 1023 } 1024 1025 func (t *http2Client) handleData(f *http2.DataFrame) { 1026 size := f.Header().Length 1027 var sendBDPPing bool 1028 if t.bdpEst != nil { 1029 sendBDPPing = t.bdpEst.add(size) 1030 } 1031 // Decouple connection's flow control from application's read. 1032 // An update on connection's flow control should not depend on 1033 // whether user application has read the data or not. Such a 1034 // restriction is already imposed on the stream's flow control, 1035 // and therefore the sender will be blocked anyways. 1036 // Decoupling the connection flow control will prevent other 1037 // active(fast) streams from starving in presence of slow or 1038 // inactive streams. 1039 // 1040 if w := t.fc.onData(size); w > 0 { 1041 t.controlBuf.put(&outgoingWindowUpdate{ 1042 streamID: 0, 1043 increment: w, 1044 }) 1045 } 1046 if sendBDPPing { 1047 // Avoid excessive ping detection (e.g. in an L7 proxy) 1048 // by sending a window update prior to the BDP ping. 1049 1050 if w := t.fc.reset(); w > 0 { 1051 t.controlBuf.put(&outgoingWindowUpdate{ 1052 streamID: 0, 1053 increment: w, 1054 }) 1055 } 1056 1057 t.controlBuf.put(bdpPing) 1058 } 1059 // Select the right stream to dispatch. 1060 s := t.getStream(f) 1061 if s == nil { 1062 return 1063 } 1064 if size > 0 { 1065 if err := s.fc.onData(size); err != nil { 1066 t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) 1067 return 1068 } 1069 if f.Header().Flags.Has(http2.FlagDataPadded) { 1070 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { 1071 t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) 1072 } 1073 } 1074 // TODO(bradfitz, zhaoq): A copy is required here because there is no 1075 // guarantee f.Data() is consumed before the arrival of next frame. 1076 // Can this copy be eliminated? 1077 if len(f.Data()) > 0 { 1078 buffer := t.bufferPool.get() 1079 buffer.Reset() 1080 buffer.Write(f.Data()) 1081 s.write(recvMsg{buffer: buffer}) 1082 } 1083 } 1084 // The server has closed the stream without sending trailers. Record that 1085 // the read direction is closed, and set the status appropriately. 1086 if f.StreamEnded() { 1087 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) 1088 } 1089 } 1090 1091 func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { 1092 s := t.getStream(f) 1093 if s == nil { 1094 return 1095 } 1096 if f.ErrCode == http2.ErrCodeRefusedStream { 1097 // The stream was unprocessed by the server. 1098 atomic.StoreUint32(&s.unprocessed, 1) 1099 } 1100 statusCode, ok := http2ErrConvTab[f.ErrCode] 1101 if !ok { 1102 if logger.V(logLevel) { 1103 logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) 1104 } 1105 statusCode = codes.Unknown 1106 } 1107 if statusCode == codes.Canceled { 1108 if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { 1109 // Our deadline was already exceeded, and that was likely the cause 1110 // of this cancelation. Alter the status code accordingly. 1111 statusCode = codes.DeadlineExceeded 1112 } 1113 } 1114 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) 1115 } 1116 1117 func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { 1118 if f.IsAck() { 1119 return 1120 } 1121 var maxStreams *uint32 1122 var ss []http2.Setting 1123 var updateFuncs []func() 1124 f.ForeachSetting(func(s http2.Setting) error { 1125 switch s.ID { 1126 case http2.SettingMaxConcurrentStreams: 1127 maxStreams = new(uint32) 1128 *maxStreams = s.Val 1129 case http2.SettingMaxHeaderListSize: 1130 updateFuncs = append(updateFuncs, func() { 1131 t.maxSendHeaderListSize = new(uint32) 1132 *t.maxSendHeaderListSize = s.Val 1133 }) 1134 default: 1135 ss = append(ss, s) 1136 } 1137 return nil 1138 }) 1139 if isFirst && maxStreams == nil { 1140 maxStreams = new(uint32) 1141 *maxStreams = math.MaxUint32 1142 } 1143 sf := &incomingSettings{ 1144 ss: ss, 1145 } 1146 if maxStreams != nil { 1147 updateStreamQuota := func() { 1148 delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) 1149 t.maxConcurrentStreams = *maxStreams 1150 t.streamQuota += delta 1151 if delta > 0 && t.waitingStreams > 0 { 1152 close(t.streamsQuotaAvailable) // wake all of them up. 1153 t.streamsQuotaAvailable = make(chan struct{}, 1) 1154 } 1155 } 1156 updateFuncs = append(updateFuncs, updateStreamQuota) 1157 } 1158 t.controlBuf.executeAndPut(func(interface{}) bool { 1159 for _, f := range updateFuncs { 1160 f() 1161 } 1162 return true 1163 }, sf) 1164 } 1165 1166 func (t *http2Client) handlePing(f *http2.PingFrame) { 1167 if f.IsAck() { 1168 // Maybe it's a BDP ping. 1169 if t.bdpEst != nil { 1170 t.bdpEst.calculate(f.Data) 1171 } 1172 return 1173 } 1174 pingAck := &ping{ack: true} 1175 copy(pingAck.data[:], f.Data[:]) 1176 t.controlBuf.put(pingAck) 1177 } 1178 1179 func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { 1180 t.mu.Lock() 1181 if t.state == closing { 1182 t.mu.Unlock() 1183 return 1184 } 1185 if f.ErrCode == http2.ErrCodeEnhanceYourCalm { 1186 if logger.V(logLevel) { 1187 logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") 1188 } 1189 } 1190 id := f.LastStreamID 1191 if id > 0 && id%2 == 0 { 1192 t.mu.Unlock() 1193 t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) 1194 return 1195 } 1196 // A client can receive multiple GoAways from the server (see 1197 // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first 1198 // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be 1199 // sent after an RTT delay with the ID of the last stream the server will 1200 // process. 1201 // 1202 // Therefore, when we get the first GoAway we don't necessarily close any 1203 // streams. While in case of second GoAway we close all streams created after 1204 // the GoAwayId. This way streams that were in-flight while the GoAway from 1205 // server was being sent don't get killed. 1206 select { 1207 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). 1208 // If there are multiple GoAways the first one should always have an ID greater than the following ones. 1209 if id > t.prevGoAwayID { 1210 t.mu.Unlock() 1211 t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) 1212 return 1213 } 1214 default: 1215 t.setGoAwayReason(f) 1216 close(t.goAway) 1217 t.controlBuf.put(&incomingGoAway{}) 1218 // Notify the clientconn about the GOAWAY before we set the state to 1219 // draining, to allow the client to stop attempting to create streams 1220 // before disallowing new streams on this connection. 1221 t.onGoAway(t.goAwayReason) 1222 t.state = draining 1223 } 1224 // All streams with IDs greater than the GoAwayId 1225 // and smaller than the previous GoAway ID should be killed. 1226 upperLimit := t.prevGoAwayID 1227 if upperLimit == 0 { // This is the first GoAway Frame. 1228 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. 1229 } 1230 for streamID, stream := range t.activeStreams { 1231 if streamID > id && streamID <= upperLimit { 1232 // The stream was unprocessed by the server. 1233 atomic.StoreUint32(&stream.unprocessed, 1) 1234 t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) 1235 } 1236 } 1237 t.prevGoAwayID = id 1238 active := len(t.activeStreams) 1239 t.mu.Unlock() 1240 if active == 0 { 1241 t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) 1242 } 1243 } 1244 1245 // setGoAwayReason sets the value of t.goAwayReason based 1246 // on the GoAway frame received. 1247 // It expects a lock on transport's mutext to be held by 1248 // the caller. 1249 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { 1250 t.goAwayReason = GoAwayNoReason 1251 switch f.ErrCode { 1252 case http2.ErrCodeEnhanceYourCalm: 1253 if string(f.DebugData()) == "too_many_pings" { 1254 t.goAwayReason = GoAwayTooManyPings 1255 } 1256 } 1257 if len(f.DebugData()) == 0 { 1258 t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) 1259 } else { 1260 t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) 1261 } 1262 } 1263 1264 func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { 1265 t.mu.Lock() 1266 defer t.mu.Unlock() 1267 return t.goAwayReason, t.goAwayDebugMessage 1268 } 1269 1270 func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { 1271 t.controlBuf.put(&incomingWindowUpdate{ 1272 streamID: f.Header().StreamID, 1273 increment: f.Increment, 1274 }) 1275 } 1276 1277 // operateHeaders takes action on the decoded headers. 1278 func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { 1279 s := t.getStream(frame) 1280 if s == nil { 1281 return 1282 } 1283 endStream := frame.StreamEnded() 1284 atomic.StoreUint32(&s.bytesReceived, 1) 1285 initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 1286 1287 if !initialHeader && !endStream { 1288 // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. 1289 st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") 1290 t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) 1291 return 1292 } 1293 1294 // frame.Truncated is set to true when framer detects that the current header 1295 // list size hits MaxHeaderListSize limit. 1296 if frame.Truncated { 1297 se := status.New(codes.Internal, "peer header list size exceeded limit") 1298 t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) 1299 return 1300 } 1301 1302 var ( 1303 // If a gRPC Response-Headers has already been received, then it means 1304 // that the peer is speaking gRPC and we are in gRPC mode. 1305 isGRPC = !initialHeader 1306 mdata = make(map[string][]string) 1307 contentTypeErr = "malformed header: missing HTTP content-type" 1308 grpcMessage string 1309 statusGen *status.Status 1310 recvCompress string 1311 httpStatusCode *int 1312 httpStatusErr string 1313 rawStatusCode = codes.Unknown 1314 // headerError is set if an error is encountered while parsing the headers 1315 headerError string 1316 ) 1317 1318 if initialHeader { 1319 httpStatusErr = "malformed header: missing HTTP status" 1320 } 1321 1322 for _, hf := range frame.Fields { 1323 switch hf.Name { 1324 case "content-type": 1325 if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { 1326 contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) 1327 break 1328 } 1329 contentTypeErr = "" 1330 mdata[hf.Name] = append(mdata[hf.Name], hf.Value) 1331 isGRPC = true 1332 case "grpc-encoding": 1333 recvCompress = hf.Value 1334 case "grpc-status": 1335 code, err := strconv.ParseInt(hf.Value, 10, 32) 1336 if err != nil { 1337 se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) 1338 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1339 return 1340 } 1341 rawStatusCode = codes.Code(uint32(code)) 1342 case "grpc-message": 1343 grpcMessage = decodeGrpcMessage(hf.Value) 1344 case "grpc-status-details-bin": 1345 var err error 1346 statusGen, err = decodeGRPCStatusDetails(hf.Value) 1347 if err != nil { 1348 headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) 1349 } 1350 case ":status": 1351 if hf.Value == "200" { 1352 httpStatusErr = "" 1353 statusCode := 200 1354 httpStatusCode = &statusCode 1355 break 1356 } 1357 1358 c, err := strconv.ParseInt(hf.Value, 10, 32) 1359 if err != nil { 1360 se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) 1361 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1362 return 1363 } 1364 statusCode := int(c) 1365 httpStatusCode = &statusCode 1366 1367 httpStatusErr = fmt.Sprintf( 1368 "unexpected HTTP status code received from server: %d (%s)", 1369 statusCode, 1370 http.StatusText(statusCode), 1371 ) 1372 default: 1373 if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { 1374 break 1375 } 1376 v, err := decodeMetadataHeader(hf.Name, hf.Value) 1377 if err != nil { 1378 headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) 1379 logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) 1380 break 1381 } 1382 mdata[hf.Name] = append(mdata[hf.Name], v) 1383 } 1384 } 1385 1386 if !isGRPC || httpStatusErr != "" { 1387 var code = codes.Internal // when header does not include HTTP status, return INTERNAL 1388 1389 if httpStatusCode != nil { 1390 var ok bool 1391 code, ok = HTTPStatusConvTab[*httpStatusCode] 1392 if !ok { 1393 code = codes.Unknown 1394 } 1395 } 1396 var errs []string 1397 if httpStatusErr != "" { 1398 errs = append(errs, httpStatusErr) 1399 } 1400 if contentTypeErr != "" { 1401 errs = append(errs, contentTypeErr) 1402 } 1403 // Verify the HTTP response is a 200. 1404 se := status.New(code, strings.Join(errs, "; ")) 1405 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1406 return 1407 } 1408 1409 if headerError != "" { 1410 se := status.New(codes.Internal, headerError) 1411 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1412 return 1413 } 1414 1415 isHeader := false 1416 1417 // If headerChan hasn't been closed yet 1418 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 1419 s.headerValid = true 1420 if !endStream { 1421 // HEADERS frame block carries a Response-Headers. 1422 isHeader = true 1423 // These values can be set without any synchronization because 1424 // stream goroutine will read it only after seeing a closed 1425 // headerChan which we'll close after setting this. 1426 s.recvCompress = recvCompress 1427 if len(mdata) > 0 { 1428 s.header = mdata 1429 } 1430 } else { 1431 // HEADERS frame block carries a Trailers-Only. 1432 s.noHeaders = true 1433 } 1434 close(s.headerChan) 1435 } 1436 1437 if t.statsHandler != nil { 1438 if isHeader { 1439 inHeader := &stats.InHeader{ 1440 Client: true, 1441 WireLength: int(frame.Header().Length), 1442 Header: metadata.MD(mdata).Copy(), 1443 Compression: s.recvCompress, 1444 } 1445 t.statsHandler.HandleRPC(s.ctx, inHeader) 1446 } else { 1447 inTrailer := &stats.InTrailer{ 1448 Client: true, 1449 WireLength: int(frame.Header().Length), 1450 Trailer: metadata.MD(mdata).Copy(), 1451 } 1452 t.statsHandler.HandleRPC(s.ctx, inTrailer) 1453 } 1454 } 1455 1456 if !endStream { 1457 return 1458 } 1459 1460 if statusGen == nil { 1461 statusGen = status.New(rawStatusCode, grpcMessage) 1462 } 1463 1464 // if client received END_STREAM from server while stream was still active, send RST_STREAM 1465 rst := s.getState() == streamActive 1466 t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) 1467 } 1468 1469 // reader runs as a separate goroutine in charge of reading data from network 1470 // connection. 1471 // 1472 // TODO(zhaoq): currently one reader per transport. Investigate whether this is 1473 // optimal. 1474 // TODO(zhaoq): Check the validity of the incoming frame sequence. 1475 func (t *http2Client) reader() { 1476 defer close(t.readerDone) 1477 // Check the validity of server preface. 1478 frame, err := t.framer.fr.ReadFrame() 1479 if err != nil { 1480 err = connectionErrorf(true, err, "error reading server preface: %v", err) 1481 t.Close(err) // this kicks off resetTransport, so must be last before return 1482 return 1483 } 1484 t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) 1485 if t.keepaliveEnabled { 1486 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 1487 } 1488 sf, ok := frame.(*http2.SettingsFrame) 1489 if !ok { 1490 // this kicks off resetTransport, so must be last before return 1491 t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) 1492 return 1493 } 1494 t.onPrefaceReceipt() 1495 t.handleSettings(sf, true) 1496 1497 // loop to keep reading incoming messages on this transport. 1498 for { 1499 t.controlBuf.throttle() 1500 frame, err := t.framer.fr.ReadFrame() 1501 if t.keepaliveEnabled { 1502 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 1503 } 1504 if err != nil { 1505 // Abort an active stream if the http2.Framer returns a 1506 // http2.StreamError. This can happen only if the server's response 1507 // is malformed http2. 1508 if se, ok := err.(http2.StreamError); ok { 1509 t.mu.Lock() 1510 s := t.activeStreams[se.StreamID] 1511 t.mu.Unlock() 1512 if s != nil { 1513 // use error detail to provide better err message 1514 code := http2ErrConvTab[se.Code] 1515 errorDetail := t.framer.fr.ErrorDetail() 1516 var msg string 1517 if errorDetail != nil { 1518 msg = errorDetail.Error() 1519 } else { 1520 msg = "received invalid frame" 1521 } 1522 t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) 1523 } 1524 continue 1525 } else { 1526 // Transport error. 1527 t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) 1528 return 1529 } 1530 } 1531 switch frame := frame.(type) { 1532 case *http2.MetaHeadersFrame: 1533 t.operateHeaders(frame) 1534 case *http2.DataFrame: 1535 t.handleData(frame) 1536 case *http2.RSTStreamFrame: 1537 t.handleRSTStream(frame) 1538 case *http2.SettingsFrame: 1539 t.handleSettings(frame, false) 1540 case *http2.PingFrame: 1541 t.handlePing(frame) 1542 case *http2.GoAwayFrame: 1543 t.handleGoAway(frame) 1544 case *http2.WindowUpdateFrame: 1545 t.handleWindowUpdate(frame) 1546 default: 1547 if logger.V(logLevel) { 1548 logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) 1549 } 1550 } 1551 } 1552 } 1553 1554 func minTime(a, b time.Duration) time.Duration { 1555 if a < b { 1556 return a 1557 } 1558 return b 1559 } 1560 1561 // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. 1562 func (t *http2Client) keepalive() { 1563 p := &ping{data: [8]byte{}} 1564 // True iff a ping has been sent, and no data has been received since then. 1565 outstandingPing := false 1566 // Amount of time remaining before which we should receive an ACK for the 1567 // last sent ping. 1568 timeoutLeft := time.Duration(0) 1569 // Records the last value of t.lastRead before we go block on the timer. 1570 // This is required to check for read activity since then. 1571 prevNano := time.Now().UnixNano() 1572 timer := time.NewTimer(t.kp.Time) 1573 for { 1574 select { 1575 case <-timer.C: 1576 lastRead := atomic.LoadInt64(&t.lastRead) 1577 if lastRead > prevNano { 1578 // There has been read activity since the last time we were here. 1579 outstandingPing = false 1580 // Next timer should fire at kp.Time seconds from lastRead time. 1581 timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) 1582 prevNano = lastRead 1583 continue 1584 } 1585 if outstandingPing && timeoutLeft <= 0 { 1586 t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) 1587 return 1588 } 1589 t.mu.Lock() 1590 if t.state == closing { 1591 // If the transport is closing, we should exit from the 1592 // keepalive goroutine here. If not, we could have a race 1593 // between the call to Signal() from Close() and the call to 1594 // Wait() here, whereby the keepalive goroutine ends up 1595 // blocking on the condition variable which will never be 1596 // signalled again. 1597 t.mu.Unlock() 1598 return 1599 } 1600 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { 1601 // If a ping was sent out previously (because there were active 1602 // streams at that point) which wasn't acked and its timeout 1603 // hadn't fired, but we got here and are about to go dormant, 1604 // we should make sure that we unconditionally send a ping once 1605 // we awaken. 1606 outstandingPing = false 1607 t.kpDormant = true 1608 t.kpDormancyCond.Wait() 1609 } 1610 t.kpDormant = false 1611 t.mu.Unlock() 1612 1613 // We get here either because we were dormant and a new stream was 1614 // created which unblocked the Wait() call, or because the 1615 // keepalive timer expired. In both cases, we need to send a ping. 1616 if !outstandingPing { 1617 if channelz.IsOn() { 1618 atomic.AddInt64(&t.czData.kpCount, 1) 1619 } 1620 t.controlBuf.put(p) 1621 timeoutLeft = t.kp.Timeout 1622 outstandingPing = true 1623 } 1624 // The amount of time to sleep here is the minimum of kp.Time and 1625 // timeoutLeft. This will ensure that we wait only for kp.Time 1626 // before sending out the next ping (for cases where the ping is 1627 // acked). 1628 sleepDuration := minTime(t.kp.Time, timeoutLeft) 1629 timeoutLeft -= sleepDuration 1630 timer.Reset(sleepDuration) 1631 case <-t.ctx.Done(): 1632 if !timer.Stop() { 1633 <-timer.C 1634 } 1635 return 1636 } 1637 } 1638 } 1639 1640 func (t *http2Client) Error() <-chan struct{} { 1641 return t.ctx.Done() 1642 } 1643 1644 func (t *http2Client) GoAway() <-chan struct{} { 1645 return t.goAway 1646 } 1647 1648 func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { 1649 s := channelz.SocketInternalMetric{ 1650 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), 1651 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), 1652 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), 1653 MessagesSent: atomic.LoadInt64(&t.czData.msgSent), 1654 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), 1655 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), 1656 LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), 1657 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), 1658 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), 1659 LocalFlowControlWindow: int64(t.fc.getSize()), 1660 SocketOptions: channelz.GetSocketOption(t.conn), 1661 LocalAddr: t.localAddr, 1662 RemoteAddr: t.remoteAddr, 1663 // RemoteName : 1664 } 1665 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { 1666 s.Security = au.GetSecurityValue() 1667 } 1668 s.RemoteFlowControlWindow = t.getOutFlowWindow() 1669 return &s 1670 } 1671 1672 func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } 1673 1674 func (t *http2Client) IncrMsgSent() { 1675 atomic.AddInt64(&t.czData.msgSent, 1) 1676 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) 1677 } 1678 1679 func (t *http2Client) IncrMsgRecv() { 1680 atomic.AddInt64(&t.czData.msgRecv, 1) 1681 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) 1682 } 1683 1684 func (t *http2Client) getOutFlowWindow() int64 { 1685 resp := make(chan uint32, 1) 1686 timer := time.NewTimer(time.Second) 1687 defer timer.Stop() 1688 t.controlBuf.put(&outFlowControlSizeRequest{resp}) 1689 select { 1690 case sz := <-resp: 1691 return int64(sz) 1692 case <-t.ctxDone: 1693 return -1 1694 case <-timer.C: 1695 return -2 1696 } 1697 }