gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/internal/transport/http2_client.go (about) 1 /* 2 * 3 * Copyright 2014 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package transport 20 21 import ( 22 "context" 23 "fmt" 24 "io" 25 "math" 26 "net" 27 "path/filepath" 28 "strconv" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "time" 33 34 http "gitee.com/zhaochuninhefei/gmgo/gmhttp" 35 36 "gitee.com/zhaochuninhefei/gmgo/grpc/codes" 37 "gitee.com/zhaochuninhefei/gmgo/grpc/credentials" 38 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/channelz" 39 icredentials "gitee.com/zhaochuninhefei/gmgo/grpc/internal/credentials" 40 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcutil" 41 imetadata "gitee.com/zhaochuninhefei/gmgo/grpc/internal/metadata" 42 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/syscall" 43 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/transport/networktype" 44 "gitee.com/zhaochuninhefei/gmgo/grpc/keepalive" 45 "gitee.com/zhaochuninhefei/gmgo/grpc/metadata" 46 "gitee.com/zhaochuninhefei/gmgo/grpc/peer" 47 "gitee.com/zhaochuninhefei/gmgo/grpc/resolver" 48 "gitee.com/zhaochuninhefei/gmgo/grpc/stats" 49 "gitee.com/zhaochuninhefei/gmgo/grpc/status" 50 "gitee.com/zhaochuninhefei/gmgo/net/http2" 51 "gitee.com/zhaochuninhefei/gmgo/net/http2/hpack" 52 ) 53 54 // clientConnectionCounter counts the number of connections a client has 55 // initiated (equal to the number of http2Clients created). Must be accessed 56 // atomically. 57 var clientConnectionCounter uint64 58 59 // http2Client implements the ClientTransport interface with HTTP2. 60 type http2Client struct { 61 lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. 62 ctx context.Context 63 cancel context.CancelFunc 64 ctxDone <-chan struct{} // Cache the ctx.Done() chan. 65 userAgent string 66 md metadata.MD 67 conn net.Conn // underlying communication channel 68 loopy *loopyWriter 69 remoteAddr net.Addr 70 localAddr net.Addr 71 authInfo credentials.AuthInfo // auth info about the connection 72 73 readerDone chan struct{} // sync point to enable testing. 74 writerDone chan struct{} // sync point to enable testing. 75 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) 76 // that the server sent GoAway on this transport. 77 goAway chan struct{} 78 79 framer *framer 80 // controlBuf delivers all the control related tasks (e.g., window 81 // updates, reset streams, and various settings) to the controller. 82 controlBuf *controlBuffer 83 fc *trInFlow 84 // The scheme used: https if TLS is on, http otherwise. 85 scheme string 86 87 isSecure bool 88 89 perRPCCreds []credentials.PerRPCCredentials 90 91 kp keepalive.ClientParameters 92 keepaliveEnabled bool 93 94 statsHandler stats.Handler 95 96 initialWindowSize int32 97 98 // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE 99 maxSendHeaderListSize *uint32 100 101 bdpEst *bdpEstimator 102 // onPrefaceReceipt is a callback that client transport calls upon 103 // receiving server preface to signal that a succefull HTTP2 104 // connection was established. 105 onPrefaceReceipt func() 106 107 maxConcurrentStreams uint32 108 streamQuota int64 109 streamsQuotaAvailable chan struct{} 110 waitingStreams uint32 111 nextID uint32 112 113 mu sync.Mutex // guard the following variables 114 state transportState 115 activeStreams map[uint32]*Stream 116 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. 117 prevGoAwayID uint32 118 // goAwayReason records the http2.ErrCode and debug data received with the 119 // GoAway frame. 120 goAwayReason GoAwayReason 121 // goAwayDebugMessage contains a detailed human readable string about a 122 // GoAway frame, useful for error messages. 123 goAwayDebugMessage string 124 // A condition variable used to signal when the keepalive goroutine should 125 // go dormant. The condition for dormancy is based on the number of active 126 // streams and the `PermitWithoutStream` keepalive client parameter. And 127 // since the number of active streams is guarded by the above mutex, we use 128 // the same for this condition variable as well. 129 kpDormancyCond *sync.Cond 130 // A boolean to track whether the keepalive goroutine is dormant or not. 131 // This is checked before attempting to signal the above condition 132 // variable. 133 kpDormant bool 134 135 // Fields below are for channelz metric collection. 136 channelzID int64 // channelz unique identification number 137 czData *channelzData 138 139 onGoAway func(GoAwayReason) 140 onClose func() 141 142 bufferPool *bufferPool 143 144 connectionID uint64 145 } 146 147 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { 148 address := addr.Addr 149 networkType, ok := networktype.Get(addr) 150 if fn != nil { 151 // Special handling for unix scheme with custom dialer. Back in the day, 152 // we did not have a unix resolver and therefore targets with a unix 153 // scheme would end up using the passthrough resolver. So, user's used a 154 // custom dialer in this case and expected the original dial target to 155 // be passed to the custom dialer. Now, we have a unix resolver. But if 156 // a custom dialer is specified, we want to retain the old behavior in 157 // terms of the address being passed to the custom dialer. 158 if networkType == "unix" && !strings.HasPrefix(address, "\x00") { 159 // Supported unix targets are either "unix://absolute-path" or 160 // "unix:relative-path". 161 if filepath.IsAbs(address) { 162 return fn(ctx, "unix://"+address) 163 } 164 return fn(ctx, "unix:"+address) 165 } 166 return fn(ctx, address) 167 } 168 if !ok { 169 networkType, address = parseDialTarget(address) 170 } 171 if networkType == "tcp" && useProxy { 172 return proxyDial(ctx, address, grpcUA) 173 } 174 return (&net.Dialer{}).DialContext(ctx, networkType, address) 175 } 176 177 func isTemporary(err error) bool { 178 switch err := err.(type) { 179 case interface { 180 Temporary() bool 181 }: 182 return err.Temporary() 183 case interface { 184 Timeout() bool 185 }: 186 // Timeouts may be resolved upon retry, and are thus treated as 187 // temporary. 188 return err.Timeout() 189 } 190 return true 191 } 192 193 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 194 // and starts to receive messages on it. Non-nil error returns if construction 195 // fails. 196 func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { 197 scheme := "http" 198 ctx, cancel := context.WithCancel(ctx) 199 defer func() { 200 if err != nil { 201 cancel() 202 } 203 }() 204 205 // gRPC, resolver, balancer etc. can specify arbitrary data in the 206 // Attributes field of resolver.Address, which is shoved into connectCtx 207 // and passed to the dialer and credential handshaker. This makes it possible for 208 // address specific arbitrary data to reach custom dialers and credential handshakers. 209 connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) 210 211 conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) 212 if err != nil { 213 if opts.FailOnNonTempDialError { 214 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) 215 } 216 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) 217 } 218 // Any further errors will close the underlying connection 219 defer func(conn net.Conn) { 220 if err != nil { 221 _ = conn.Close() 222 } 223 }(conn) 224 kp := opts.KeepaliveParams 225 // Validate keepalive parameters. 226 if kp.Time == 0 { 227 kp.Time = defaultClientKeepaliveTime 228 } 229 if kp.Timeout == 0 { 230 kp.Timeout = defaultClientKeepaliveTimeout 231 } 232 keepaliveEnabled := false 233 if kp.Time != infinity { 234 if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { 235 return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) 236 } 237 keepaliveEnabled = true 238 } 239 var ( 240 isSecure bool 241 authInfo credentials.AuthInfo 242 ) 243 transportCreds := opts.TransportCredentials 244 perRPCCreds := opts.PerRPCCredentials 245 246 if b := opts.CredsBundle; b != nil { 247 if t := b.TransportCredentials(); t != nil { 248 transportCreds = t 249 } 250 if t := b.PerRPCCredentials(); t != nil { 251 perRPCCreds = append(perRPCCreds, t) 252 } 253 } 254 if transportCreds != nil { 255 rawConn := conn 256 // Pull the deadline from the connectCtx, which will be used for 257 // timeouts in the authentication protocol handshake. Can ignore the 258 // boolean as the deadline will return the zero value, which will make 259 // the conn not timeout on I/O operations. 260 deadline, _ := connectCtx.Deadline() 261 _ = rawConn.SetDeadline(deadline) 262 conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) 263 _ = rawConn.SetDeadline(time.Time{}) 264 if err != nil { 265 return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) 266 } 267 for _, cd := range perRPCCreds { 268 if cd.RequireTransportSecurity() { 269 if ci, ok := authInfo.(interface { 270 GetCommonAuthInfo() credentials.CommonAuthInfo 271 }); ok { 272 secLevel := ci.GetCommonAuthInfo().SecurityLevel 273 if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { 274 return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") 275 } 276 } 277 } 278 } 279 isSecure = true 280 if transportCreds.Info().SecurityProtocol == "tls" { 281 scheme = "https" 282 } 283 } 284 dynamicWindow := true 285 icwz := int32(initialWindowSize) 286 if opts.InitialConnWindowSize >= defaultWindowSize { 287 icwz = opts.InitialConnWindowSize 288 dynamicWindow = false 289 } 290 writeBufSize := opts.WriteBufferSize 291 readBufSize := opts.ReadBufferSize 292 maxHeaderListSize := defaultClientMaxHeaderListSize 293 if opts.MaxHeaderListSize != nil { 294 maxHeaderListSize = *opts.MaxHeaderListSize 295 } 296 t := &http2Client{ 297 ctx: ctx, 298 ctxDone: ctx.Done(), // Cache Done chan. 299 cancel: cancel, 300 userAgent: opts.UserAgent, 301 conn: conn, 302 remoteAddr: conn.RemoteAddr(), 303 localAddr: conn.LocalAddr(), 304 authInfo: authInfo, 305 readerDone: make(chan struct{}), 306 writerDone: make(chan struct{}), 307 goAway: make(chan struct{}), 308 framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), 309 fc: &trInFlow{limit: uint32(icwz)}, 310 scheme: scheme, 311 activeStreams: make(map[uint32]*Stream), 312 isSecure: isSecure, 313 perRPCCreds: perRPCCreds, 314 kp: kp, 315 statsHandler: opts.StatsHandler, 316 initialWindowSize: initialWindowSize, 317 onPrefaceReceipt: onPrefaceReceipt, 318 nextID: 1, 319 maxConcurrentStreams: defaultMaxStreamsClient, 320 streamQuota: defaultMaxStreamsClient, 321 streamsQuotaAvailable: make(chan struct{}, 1), 322 czData: new(channelzData), 323 onGoAway: onGoAway, 324 onClose: onClose, 325 keepaliveEnabled: keepaliveEnabled, 326 bufferPool: newBufferPool(), 327 } 328 329 // 虽然 Metadata 已被废弃,推荐使用 Attributes 代替, 330 // 但这里无法直接修改,因为赋值的目标变量t的类型 http2Client 中依然存在 metadata.MD 类型的字段。 331 //goland:noinspection GoDeprecation 332 if md, ok := addr.Metadata.(*metadata.MD); ok { 333 t.md = *md 334 } else if md := imetadata.Get(addr); md != nil { 335 t.md = md 336 } 337 t.controlBuf = newControlBuffer(t.ctxDone) 338 if opts.InitialWindowSize >= defaultWindowSize { 339 t.initialWindowSize = opts.InitialWindowSize 340 dynamicWindow = false 341 } 342 if dynamicWindow { 343 t.bdpEst = &bdpEstimator{ 344 bdp: initialWindowSize, 345 updateFlowControl: t.updateFlowControl, 346 } 347 } 348 if t.statsHandler != nil { 349 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ 350 RemoteAddr: t.remoteAddr, 351 LocalAddr: t.localAddr, 352 }) 353 connBegin := &stats.ConnBegin{ 354 Client: true, 355 } 356 t.statsHandler.HandleConn(t.ctx, connBegin) 357 } 358 if channelz.IsOn() { 359 t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) 360 } 361 if t.keepaliveEnabled { 362 t.kpDormancyCond = sync.NewCond(&t.mu) 363 go t.keepalive() 364 } 365 // Start the reader goroutine for incoming message. Each transport has 366 // a dedicated goroutine which reads HTTP2 frame from network. Then it 367 // dispatches the frame to the corresponding stream entity. 368 go t.reader() 369 370 // Send connection preface to server. 371 n, err := t.conn.Write(clientPreface) 372 if err != nil { 373 err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) 374 t.Close(err) 375 return nil, err 376 } 377 if n != len(clientPreface) { 378 err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) 379 t.Close(err) 380 return nil, err 381 } 382 var ss []http2.Setting 383 384 if t.initialWindowSize != defaultWindowSize { 385 ss = append(ss, http2.Setting{ 386 ID: http2.SettingInitialWindowSize, 387 Val: uint32(t.initialWindowSize), 388 }) 389 } 390 if opts.MaxHeaderListSize != nil { 391 ss = append(ss, http2.Setting{ 392 ID: http2.SettingMaxHeaderListSize, 393 Val: *opts.MaxHeaderListSize, 394 }) 395 } 396 err = t.framer.fr.WriteSettings(ss...) 397 if err != nil { 398 err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) 399 t.Close(err) 400 return nil, err 401 } 402 // Adjust the connection flow control window if needed. 403 if delta := uint32(icwz - defaultWindowSize); delta > 0 { 404 if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { 405 err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) 406 t.Close(err) 407 return nil, err 408 } 409 } 410 411 t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) 412 413 if err := t.framer.writer.Flush(); err != nil { 414 return nil, err 415 } 416 go func() { 417 t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) 418 err := t.loopy.run() 419 if err != nil { 420 if logger.V(logLevel) { 421 logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) 422 } 423 } 424 // Do not close the transport. Let reader goroutine handle it since 425 // there might be data in the buffers. 426 _ = t.conn.Close() 427 t.controlBuf.finish() 428 close(t.writerDone) 429 }() 430 return t, nil 431 } 432 433 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { 434 // TODO(zhaoq): Handle uint32 overflow of Stream.id. 435 s := &Stream{ 436 ct: t, 437 done: make(chan struct{}), 438 method: callHdr.Method, 439 sendCompress: callHdr.SendCompress, 440 buf: newRecvBuffer(), 441 headerChan: make(chan struct{}), 442 contentSubtype: callHdr.ContentSubtype, 443 doneFunc: callHdr.DoneFunc, 444 } 445 s.wq = newWriteQuota(defaultWriteQuota, s.done) 446 s.requestRead = func(n int) { 447 t.adjustWindow(s, uint32(n)) 448 } 449 // The client side stream context should have exactly the same life cycle with the user provided context. 450 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. 451 // So we use the original context here instead of creating a copy. 452 s.ctx = ctx 453 s.trReader = &transportReader{ 454 reader: &recvBufferReader{ 455 ctx: s.ctx, 456 ctxDone: s.ctx.Done(), 457 recv: s.buf, 458 closeStream: func(err error) { 459 t.CloseStream(s, err) 460 }, 461 freeBuffer: t.bufferPool.put, 462 }, 463 windowHandler: func(n int) { 464 t.updateWindow(s, uint32(n)) 465 }, 466 } 467 return s 468 } 469 470 func (t *http2Client) getPeer() *peer.Peer { 471 return &peer.Peer{ 472 Addr: t.remoteAddr, 473 AuthInfo: t.authInfo, 474 } 475 } 476 477 func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { 478 aud := t.createAudience(callHdr) 479 ri := credentials.RequestInfo{ 480 Method: callHdr.Method, 481 AuthInfo: t.authInfo, 482 } 483 ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) 484 authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) 485 if err != nil { 486 return nil, err 487 } 488 callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) 489 if err != nil { 490 return nil, err 491 } 492 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields 493 // first and create a slice of that exact size. 494 // Make the slice of certain predictable size to reduce allocations made by append. 495 hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te 496 hfLen += len(authData) + len(callAuthData) 497 headerFields := make([]hpack.HeaderField, 0, hfLen) 498 headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) 499 headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) 500 headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) 501 headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) 502 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) 503 headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) 504 headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) 505 if callHdr.PreviousAttempts > 0 { 506 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) 507 } 508 509 if callHdr.SendCompress != "" { 510 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) 511 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) 512 } 513 if dl, ok := ctx.Deadline(); ok { 514 // Send out timeout regardless its value. The server can detect timeout context by itself. 515 // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. 516 timeout := time.Until(dl) 517 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) 518 } 519 for k, v := range authData { 520 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 521 } 522 for k, v := range callAuthData { 523 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 524 } 525 if b := stats.OutgoingTags(ctx); b != nil { 526 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) 527 } 528 if b := stats.OutgoingTrace(ctx); b != nil { 529 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) 530 } 531 532 if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { 533 var k string 534 for k, vv := range md { 535 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. 536 if isReservedHeader(k) { 537 continue 538 } 539 for _, v := range vv { 540 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 541 } 542 } 543 for _, vv := range added { 544 for i, v := range vv { 545 if i%2 == 0 { 546 k = strings.ToLower(v) 547 continue 548 } 549 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. 550 if isReservedHeader(k) { 551 continue 552 } 553 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 554 } 555 } 556 } 557 for k, vv := range t.md { 558 if isReservedHeader(k) { 559 continue 560 } 561 for _, v := range vv { 562 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 563 } 564 } 565 return headerFields, nil 566 } 567 568 func (t *http2Client) createAudience(callHdr *CallHdr) string { 569 // Create an audience string only if needed. 570 if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { 571 return "" 572 } 573 // Construct URI required to get auth request metadata. 574 // Omit port if it is the default one. 575 host := strings.TrimSuffix(callHdr.Host, ":443") 576 pos := strings.LastIndex(callHdr.Method, "/") 577 if pos == -1 { 578 pos = len(callHdr.Method) 579 } 580 return "https://" + host + callHdr.Method[:pos] 581 } 582 583 func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { 584 if len(t.perRPCCreds) == 0 { 585 return nil, nil 586 } 587 authData := map[string]string{} 588 for _, c := range t.perRPCCreds { 589 data, err := c.GetRequestMetadata(ctx, audience) 590 if err != nil { 591 if _, ok := status.FromError(err); ok { 592 return nil, err 593 } 594 595 return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) 596 } 597 for k, v := range data { 598 // Capital header names are illegal in HTTP/2. 599 k = strings.ToLower(k) 600 authData[k] = v 601 } 602 } 603 return authData, nil 604 } 605 606 func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { 607 var callAuthData map[string]string 608 // Check if credentials.PerRPCCredentials were provided via call options. 609 // Note: if these credentials are provided both via dial options and call 610 // options, then both sets of credentials will be applied. 611 if callCreds := callHdr.Creds; callCreds != nil { 612 if callCreds.RequireTransportSecurity() { 613 ri, _ := credentials.RequestInfoFromContext(ctx) 614 if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { 615 return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") 616 } 617 } 618 data, err := callCreds.GetRequestMetadata(ctx, audience) 619 if err != nil { 620 return nil, status.Errorf(codes.Internal, "transport: %v", err) 621 } 622 callAuthData = make(map[string]string, len(data)) 623 for k, v := range data { 624 // Capital header names are illegal in HTTP/2 625 k = strings.ToLower(k) 626 callAuthData[k] = v 627 } 628 } 629 return callAuthData, nil 630 } 631 632 // NewStreamError wraps an error and reports additional information. Typically 633 // NewStream errors result in transparent retry, as they mean nothing went onto 634 // the wire. However, there are two notable exceptions: 635 // 636 // 1. If the stream headers violate the max header list size allowed by the 637 // server. In this case there is no reason to retry at all, as it is 638 // assumed the RPC would continue to fail on subsequent attempts. 639 // 2. If the credentials errored when requesting their headers. In this case, 640 // it's possible a retry can fix the problem, but indefinitely transparently 641 // retrying is not appropriate as it is likely the credentials, if they can 642 // eventually succeed, would need I/O to do so. 643 type NewStreamError struct { 644 Err error 645 646 DoNotRetry bool 647 DoNotTransparentRetry bool 648 } 649 650 func (e NewStreamError) Error() string { 651 return e.Err.Error() 652 } 653 654 // NewStream creates a stream and registers it into the transport as "active" 655 // streams. All non-nil errors returned will be *NewStreamError. 656 func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { 657 ctx = peer.NewContext(ctx, t.getPeer()) 658 headerFields, err := t.createHeaderFields(ctx, callHdr) 659 if err != nil { 660 return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} 661 } 662 s := t.newStream(ctx, callHdr) 663 cleanup := func(err error) { 664 if s.swapState(streamDone) == streamDone { 665 // If it was already done, return. 666 return 667 } 668 // The stream was unprocessed by the server. 669 atomic.StoreUint32(&s.unprocessed, 1) 670 s.write(recvMsg{err: err}) 671 close(s.done) 672 // If headerChan isn't closed, then close it. 673 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 674 close(s.headerChan) 675 } 676 } 677 hdr := &headerFrame{ 678 hf: headerFields, 679 endStream: false, 680 initStream: func(id uint32) error { 681 t.mu.Lock() 682 if state := t.state; state != reachable { 683 t.mu.Unlock() 684 // Do a quick cleanup. 685 err := errStreamDrain 686 if state == closing { 687 err = ErrConnClosing 688 } 689 cleanup(err) 690 return err 691 } 692 t.activeStreams[id] = s 693 if channelz.IsOn() { 694 atomic.AddInt64(&t.czData.streamsStarted, 1) 695 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) 696 } 697 // If the keepalive goroutine has gone dormant, wake it up. 698 if t.kpDormant { 699 t.kpDormancyCond.Signal() 700 } 701 t.mu.Unlock() 702 return nil 703 }, 704 onOrphaned: cleanup, 705 wq: s.wq, 706 } 707 firstTry := true 708 var ch chan struct{} 709 checkForStreamQuota := func(it interface{}) bool { 710 if t.streamQuota <= 0 { // Can go negative if server decreases it. 711 if firstTry { 712 t.waitingStreams++ 713 } 714 ch = t.streamsQuotaAvailable 715 return false 716 } 717 if !firstTry { 718 t.waitingStreams-- 719 } 720 t.streamQuota-- 721 h := it.(*headerFrame) 722 h.streamID = t.nextID 723 t.nextID += 2 724 s.id = h.streamID 725 s.fc = &inFlow{limit: uint32(t.initialWindowSize)} 726 if t.streamQuota > 0 && t.waitingStreams > 0 { 727 select { 728 case t.streamsQuotaAvailable <- struct{}{}: 729 default: 730 } 731 } 732 return true 733 } 734 var hdrListSizeErr error 735 checkForHeaderListSize := func(it interface{}) bool { 736 if t.maxSendHeaderListSize == nil { 737 return true 738 } 739 hdrFrame := it.(*headerFrame) 740 var sz int64 741 for _, f := range hdrFrame.hf { 742 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { 743 hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) 744 return false 745 } 746 } 747 return true 748 } 749 for { 750 success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { 751 if !checkForStreamQuota(it) { 752 return false 753 } 754 if !checkForHeaderListSize(it) { 755 return false 756 } 757 return true 758 }, hdr) 759 if err != nil { 760 return nil, &NewStreamError{Err: err} 761 } 762 if success { 763 break 764 } 765 if hdrListSizeErr != nil { 766 return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} 767 } 768 firstTry = false 769 select { 770 case <-ch: 771 case <-ctx.Done(): 772 return nil, &NewStreamError{Err: ContextErr(ctx.Err())} 773 case <-t.goAway: 774 return nil, &NewStreamError{Err: errStreamDrain} 775 case <-t.ctx.Done(): 776 return nil, &NewStreamError{Err: ErrConnClosing} 777 } 778 } 779 if t.statsHandler != nil { 780 header, ok := metadata.FromOutgoingContext(ctx) 781 if ok { 782 header.Set("user-agent", t.userAgent) 783 } else { 784 header = metadata.Pairs("user-agent", t.userAgent) 785 } 786 // Note: The header fields are compressed with hpack after this call returns. 787 // No WireLength field is set here. 788 outHeader := &stats.OutHeader{ 789 Client: true, 790 FullMethod: callHdr.Method, 791 RemoteAddr: t.remoteAddr, 792 LocalAddr: t.localAddr, 793 Compression: callHdr.SendCompress, 794 Header: header, 795 } 796 t.statsHandler.HandleRPC(s.ctx, outHeader) 797 } 798 return s, nil 799 } 800 801 // CloseStream clears the footprint of a stream when the stream is not needed any more. 802 // This must not be executed in reader's goroutine. 803 func (t *http2Client) CloseStream(s *Stream, err error) { 804 var ( 805 rst bool 806 rstCode http2.ErrCode 807 ) 808 if err != nil { 809 rst = true 810 rstCode = http2.ErrCodeCancel 811 } 812 t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) 813 } 814 815 func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { 816 // Set stream status to done. 817 if s.swapState(streamDone) == streamDone { 818 // If it was already done, return. If multiple closeStream calls 819 // happen simultaneously, wait for the first to finish. 820 <-s.done 821 return 822 } 823 // status and trailers can be updated here without any synchronization because the stream goroutine will 824 // only read it after it sees an io.EOF error from read or write and we'll write those errors 825 // only after updating this. 826 s.status = st 827 if len(mdata) > 0 { 828 s.trailer = mdata 829 } 830 if err != nil { 831 // This will unblock reads eventually. 832 s.write(recvMsg{err: err}) 833 } 834 // If headerChan isn't closed, then close it. 835 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 836 s.noHeaders = true 837 close(s.headerChan) 838 } 839 cleanup := &cleanupStream{ 840 streamID: s.id, 841 onWrite: func() { 842 t.mu.Lock() 843 if t.activeStreams != nil { 844 delete(t.activeStreams, s.id) 845 } 846 t.mu.Unlock() 847 if channelz.IsOn() { 848 if eosReceived { 849 atomic.AddInt64(&t.czData.streamsSucceeded, 1) 850 } else { 851 atomic.AddInt64(&t.czData.streamsFailed, 1) 852 } 853 } 854 }, 855 rst: rst, 856 rstCode: rstCode, 857 } 858 addBackStreamQuota := func(interface{}) bool { 859 t.streamQuota++ 860 if t.streamQuota > 0 && t.waitingStreams > 0 { 861 select { 862 case t.streamsQuotaAvailable <- struct{}{}: 863 default: 864 } 865 } 866 return true 867 } 868 _, _ = t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) 869 // This will unblock write. 870 close(s.done) 871 if s.doneFunc != nil { 872 s.doneFunc() 873 } 874 } 875 876 // Close kicks off the shutdown process of the transport. This should be called 877 // only once on a transport. Once it is called, the transport should not be 878 // accessed any more. 879 // 880 // This method blocks until the addrConn that initiated this transport is 881 // re-connected. This happens because t.onClose() begins reconnect logic at the 882 // addrConn level and blocks until the addrConn is successfully connected. 883 func (t *http2Client) Close(err error) { 884 t.mu.Lock() 885 // Make sure we only Close once. 886 if t.state == closing { 887 t.mu.Unlock() 888 return 889 } 890 // Call t.onClose before setting the state to closing to prevent the client 891 // from attempting to create new streams ASAP. 892 t.onClose() 893 t.state = closing 894 streams := t.activeStreams 895 t.activeStreams = nil 896 if t.kpDormant { 897 // If the keepalive goroutine is blocked on this condition variable, we 898 // should unblock it so that the goroutine eventually exits. 899 t.kpDormancyCond.Signal() 900 } 901 t.mu.Unlock() 902 t.controlBuf.finish() 903 t.cancel() 904 _ = t.conn.Close() 905 if channelz.IsOn() { 906 channelz.RemoveEntry(t.channelzID) 907 } 908 // Append info about previous goaways if there were any, since this may be important 909 // for understanding the root cause for this connection to be closed. 910 _, goAwayDebugMessage := t.GetGoAwayReason() 911 912 var st *status.Status 913 if len(goAwayDebugMessage) > 0 { 914 st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) 915 err = st.Err() 916 } else { 917 st = status.New(codes.Unavailable, err.Error()) 918 } 919 920 // Notify all active streams. 921 for _, s := range streams { 922 t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) 923 } 924 if t.statsHandler != nil { 925 connEnd := &stats.ConnEnd{ 926 Client: true, 927 } 928 t.statsHandler.HandleConn(t.ctx, connEnd) 929 } 930 } 931 932 // GracefulClose sets the state to draining, which prevents new streams from 933 // being created and causes the transport to be closed when the last active 934 // stream is closed. If there are no active streams, the transport is closed 935 // immediately. This does nothing if the transport is already draining or 936 // closing. 937 func (t *http2Client) GracefulClose() { 938 t.mu.Lock() 939 // Make sure we move to draining only from active. 940 if t.state == draining || t.state == closing { 941 t.mu.Unlock() 942 return 943 } 944 t.state = draining 945 active := len(t.activeStreams) 946 t.mu.Unlock() 947 if active == 0 { 948 t.Close(ErrConnClosing) 949 return 950 } 951 _ = t.controlBuf.put(&incomingGoAway{}) 952 } 953 954 // Write formats the data into HTTP2 data frame(s) and sends it out. The caller 955 // should proceed only if Write returns nil. 956 func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { 957 if opts.Last { 958 // If it's the last message, update stream state. 959 if !s.compareAndSwapState(streamActive, streamWriteDone) { 960 return errStreamDone 961 } 962 } else if s.getState() != streamActive { 963 return errStreamDone 964 } 965 df := &dataFrame{ 966 streamID: s.id, 967 endStream: opts.Last, 968 h: hdr, 969 d: data, 970 } 971 if hdr != nil || data != nil { // If it's not an empty data frame, check quota. 972 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { 973 return err 974 } 975 } 976 return t.controlBuf.put(df) 977 } 978 979 func (t *http2Client) getStream(f http2.Frame) *Stream { 980 t.mu.Lock() 981 s := t.activeStreams[f.Header().StreamID] 982 t.mu.Unlock() 983 return s 984 } 985 986 // adjustWindow sends out extra window update over the initial window size 987 // of stream if the application is requesting data larger in size than 988 // the window. 989 func (t *http2Client) adjustWindow(s *Stream, n uint32) { 990 if w := s.fc.maybeAdjust(n); w > 0 { 991 _ = t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) 992 } 993 } 994 995 // updateWindow adjusts the inbound quota for the stream. 996 // Window updates will be sent out when the cumulative quota 997 // exceeds the corresponding threshold. 998 func (t *http2Client) updateWindow(s *Stream, n uint32) { 999 if w := s.fc.onRead(n); w > 0 { 1000 _ = t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) 1001 } 1002 } 1003 1004 // updateFlowControl updates the incoming flow control windows 1005 // for the transport and the stream based on the current bdp 1006 // estimation. 1007 func (t *http2Client) updateFlowControl(n uint32) { 1008 t.mu.Lock() 1009 for _, s := range t.activeStreams { 1010 s.fc.newLimit(n) 1011 } 1012 t.mu.Unlock() 1013 updateIWS := func(interface{}) bool { 1014 t.initialWindowSize = int32(n) 1015 return true 1016 } 1017 _, _ = t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) 1018 _ = t.controlBuf.put(&outgoingSettings{ 1019 ss: []http2.Setting{ 1020 { 1021 ID: http2.SettingInitialWindowSize, 1022 Val: n, 1023 }, 1024 }, 1025 }) 1026 } 1027 1028 func (t *http2Client) handleData(f *http2.DataFrame) { 1029 size := f.Header().Length 1030 var sendBDPPing bool 1031 if t.bdpEst != nil { 1032 sendBDPPing = t.bdpEst.add(size) 1033 } 1034 // Decouple connection's flow control from application's read. 1035 // An update on connection's flow control should not depend on 1036 // whether user application has read the data or not. Such a 1037 // restriction is already imposed on the stream's flow control, 1038 // and therefore the sender will be blocked anyways. 1039 // Decoupling the connection flow control will prevent other 1040 // active(fast) streams from starving in presence of slow or 1041 // inactive streams. 1042 // 1043 if w := t.fc.onData(size); w > 0 { 1044 _ = t.controlBuf.put(&outgoingWindowUpdate{ 1045 streamID: 0, 1046 increment: w, 1047 }) 1048 } 1049 if sendBDPPing { 1050 // Avoid excessive ping detection (e.g. in an L7 proxy) 1051 // by sending a window update prior to the BDP ping. 1052 1053 if w := t.fc.reset(); w > 0 { 1054 _ = t.controlBuf.put(&outgoingWindowUpdate{ 1055 streamID: 0, 1056 increment: w, 1057 }) 1058 } 1059 1060 _ = t.controlBuf.put(bdpPing) 1061 } 1062 // Select the right stream to dispatch. 1063 s := t.getStream(f) 1064 if s == nil { 1065 return 1066 } 1067 if size > 0 { 1068 if err := s.fc.onData(size); err != nil { 1069 t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) 1070 return 1071 } 1072 if f.Header().Flags.Has(http2.FlagDataPadded) { 1073 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { 1074 _ = t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) 1075 } 1076 } 1077 // TODO(bradfitz, zhaoq): A copy is required here because there is no 1078 // guarantee f.Data() is consumed before the arrival of next frame. 1079 // Can this copy be eliminated? 1080 if len(f.Data()) > 0 { 1081 buffer := t.bufferPool.get() 1082 buffer.Reset() 1083 buffer.Write(f.Data()) 1084 s.write(recvMsg{buffer: buffer}) 1085 } 1086 } 1087 // The server has closed the stream without sending trailers. Record that 1088 // the read direction is closed, and set the status appropriately. 1089 if f.StreamEnded() { 1090 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) 1091 } 1092 } 1093 1094 func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { 1095 s := t.getStream(f) 1096 if s == nil { 1097 return 1098 } 1099 if f.ErrCode == http2.ErrCodeRefusedStream { 1100 // The stream was unprocessed by the server. 1101 atomic.StoreUint32(&s.unprocessed, 1) 1102 } 1103 statusCode, ok := http2ErrConvTab[f.ErrCode] 1104 if !ok { 1105 if logger.V(logLevel) { 1106 logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) 1107 } 1108 statusCode = codes.Unknown 1109 } 1110 if statusCode == codes.Canceled { 1111 if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { 1112 // Our deadline was already exceeded, and that was likely the cause 1113 // of this cancelation. Alter the status code accordingly. 1114 statusCode = codes.DeadlineExceeded 1115 } 1116 } 1117 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) 1118 } 1119 1120 func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { 1121 if f.IsAck() { 1122 return 1123 } 1124 var maxStreams *uint32 1125 var ss []http2.Setting 1126 var updateFuncs []func() 1127 _ = f.ForeachSetting(func(s http2.Setting) error { 1128 switch s.ID { 1129 case http2.SettingMaxConcurrentStreams: 1130 maxStreams = new(uint32) 1131 *maxStreams = s.Val 1132 case http2.SettingMaxHeaderListSize: 1133 updateFuncs = append(updateFuncs, func() { 1134 t.maxSendHeaderListSize = new(uint32) 1135 *t.maxSendHeaderListSize = s.Val 1136 }) 1137 default: 1138 ss = append(ss, s) 1139 } 1140 return nil 1141 }) 1142 if isFirst && maxStreams == nil { 1143 maxStreams = new(uint32) 1144 *maxStreams = math.MaxUint32 1145 } 1146 sf := &incomingSettings{ 1147 ss: ss, 1148 } 1149 if maxStreams != nil { 1150 updateStreamQuota := func() { 1151 delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) 1152 t.maxConcurrentStreams = *maxStreams 1153 t.streamQuota += delta 1154 if delta > 0 && t.waitingStreams > 0 { 1155 close(t.streamsQuotaAvailable) // wake all of them up. 1156 t.streamsQuotaAvailable = make(chan struct{}, 1) 1157 } 1158 } 1159 updateFuncs = append(updateFuncs, updateStreamQuota) 1160 } 1161 _, _ = t.controlBuf.executeAndPut(func(interface{}) bool { 1162 for _, f := range updateFuncs { 1163 f() 1164 } 1165 return true 1166 }, sf) 1167 } 1168 1169 func (t *http2Client) handlePing(f *http2.PingFrame) { 1170 if f.IsAck() { 1171 // Maybe it's a BDP ping. 1172 if t.bdpEst != nil { 1173 t.bdpEst.calculate(f.Data) 1174 } 1175 return 1176 } 1177 pingAck := &ping{ack: true} 1178 copy(pingAck.data[:], f.Data[:]) 1179 _ = t.controlBuf.put(pingAck) 1180 } 1181 1182 func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { 1183 t.mu.Lock() 1184 if t.state == closing { 1185 t.mu.Unlock() 1186 return 1187 } 1188 if f.ErrCode == http2.ErrCodeEnhanceYourCalm { 1189 if logger.V(logLevel) { 1190 logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") 1191 } 1192 } 1193 id := f.LastStreamID 1194 if id > 0 && id%2 == 0 { 1195 t.mu.Unlock() 1196 t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) 1197 return 1198 } 1199 // A client can receive multiple GoAways from the server (see 1200 // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first 1201 // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be 1202 // sent after an RTT delay with the ID of the last stream the server will 1203 // process. 1204 // 1205 // Therefore, when we get the first GoAway we don't necessarily close any 1206 // streams. While in case of second GoAway we close all streams created after 1207 // the GoAwayId. This way streams that were in-flight while the GoAway from 1208 // server was being sent don't get killed. 1209 select { 1210 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). 1211 // If there are multiple GoAways the first one should always have an ID greater than the following ones. 1212 if id > t.prevGoAwayID { 1213 t.mu.Unlock() 1214 t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) 1215 return 1216 } 1217 default: 1218 t.setGoAwayReason(f) 1219 close(t.goAway) 1220 _ = t.controlBuf.put(&incomingGoAway{}) 1221 // Notify the clientconn about the GOAWAY before we set the state to 1222 // draining, to allow the client to stop attempting to create streams 1223 // before disallowing new streams on this connection. 1224 t.onGoAway(t.goAwayReason) 1225 t.state = draining 1226 } 1227 // All streams with IDs greater than the GoAwayId 1228 // and smaller than the previous GoAway ID should be killed. 1229 upperLimit := t.prevGoAwayID 1230 if upperLimit == 0 { // This is the first GoAway Frame. 1231 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. 1232 } 1233 for streamID, stream := range t.activeStreams { 1234 if streamID > id && streamID <= upperLimit { 1235 // The stream was unprocessed by the server. 1236 atomic.StoreUint32(&stream.unprocessed, 1) 1237 t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) 1238 } 1239 } 1240 t.prevGoAwayID = id 1241 active := len(t.activeStreams) 1242 t.mu.Unlock() 1243 if active == 0 { 1244 t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) 1245 } 1246 } 1247 1248 // setGoAwayReason sets the value of t.goAwayReason based 1249 // on the GoAway frame received. 1250 // It expects a lock on transport's mutext to be held by 1251 // the caller. 1252 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { 1253 t.goAwayReason = GoAwayNoReason 1254 switch f.ErrCode { 1255 case http2.ErrCodeEnhanceYourCalm: 1256 if string(f.DebugData()) == "too_many_pings" { 1257 t.goAwayReason = GoAwayTooManyPings 1258 } 1259 } 1260 if len(f.DebugData()) == 0 { 1261 t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) 1262 } else { 1263 t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) 1264 } 1265 } 1266 1267 func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { 1268 t.mu.Lock() 1269 defer t.mu.Unlock() 1270 return t.goAwayReason, t.goAwayDebugMessage 1271 } 1272 1273 func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { 1274 _ = t.controlBuf.put(&incomingWindowUpdate{ 1275 streamID: f.Header().StreamID, 1276 increment: f.Increment, 1277 }) 1278 } 1279 1280 // operateHeaders takes action on the decoded headers. 1281 func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { 1282 s := t.getStream(frame) 1283 if s == nil { 1284 return 1285 } 1286 endStream := frame.StreamEnded() 1287 atomic.StoreUint32(&s.bytesReceived, 1) 1288 initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 1289 1290 if !initialHeader && !endStream { 1291 // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. 1292 st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") 1293 t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) 1294 return 1295 } 1296 1297 // frame.Truncated is set to true when framer detects that the current header 1298 // list size hits MaxHeaderListSize limit. 1299 if frame.Truncated { 1300 se := status.New(codes.Internal, "peer header list size exceeded limit") 1301 t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) 1302 return 1303 } 1304 1305 var ( 1306 // If a gRPC Response-Headers has already been received, then it means 1307 // that the peer is speaking gRPC and we are in gRPC mode. 1308 isGRPC = !initialHeader 1309 mdata = make(map[string][]string) 1310 contentTypeErr = "malformed header: missing HTTP content-type" 1311 grpcMessage string 1312 statusGen *status.Status 1313 recvCompress string 1314 httpStatusCode *int 1315 httpStatusErr string 1316 rawStatusCode = codes.Unknown 1317 // headerError is set if an error is encountered while parsing the headers 1318 headerError string 1319 ) 1320 1321 if initialHeader { 1322 httpStatusErr = "malformed header: missing HTTP status" 1323 } 1324 1325 for _, hf := range frame.Fields { 1326 switch hf.Name { 1327 case "content-type": 1328 if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { 1329 contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) 1330 break 1331 } 1332 contentTypeErr = "" 1333 mdata[hf.Name] = append(mdata[hf.Name], hf.Value) 1334 isGRPC = true 1335 case "grpc-encoding": 1336 recvCompress = hf.Value 1337 case "grpc-status": 1338 code, err := strconv.ParseInt(hf.Value, 10, 32) 1339 if err != nil { 1340 se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) 1341 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1342 return 1343 } 1344 rawStatusCode = codes.Code(uint32(code)) 1345 case "grpc-message": 1346 grpcMessage = decodeGrpcMessage(hf.Value) 1347 case "grpc-status-details-bin": 1348 var err error 1349 statusGen, err = decodeGRPCStatusDetails(hf.Value) 1350 if err != nil { 1351 headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) 1352 } 1353 case ":status": 1354 if hf.Value == "200" { 1355 httpStatusErr = "" 1356 statusCode := 200 1357 httpStatusCode = &statusCode 1358 break 1359 } 1360 1361 c, err := strconv.ParseInt(hf.Value, 10, 32) 1362 if err != nil { 1363 se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) 1364 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1365 return 1366 } 1367 statusCode := int(c) 1368 httpStatusCode = &statusCode 1369 1370 httpStatusErr = fmt.Sprintf( 1371 "unexpected HTTP status code received from server: %d (%s)", 1372 statusCode, 1373 http.StatusText(statusCode), 1374 ) 1375 default: 1376 if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { 1377 break 1378 } 1379 v, err := decodeMetadataHeader(hf.Name, hf.Value) 1380 if err != nil { 1381 headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) 1382 logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) 1383 break 1384 } 1385 mdata[hf.Name] = append(mdata[hf.Name], v) 1386 } 1387 } 1388 1389 if !isGRPC || httpStatusErr != "" { 1390 var code = codes.Internal // when header does not include HTTP status, return INTERNAL 1391 1392 if httpStatusCode != nil { 1393 var ok bool 1394 code, ok = HTTPStatusConvTab[*httpStatusCode] 1395 if !ok { 1396 code = codes.Unknown 1397 } 1398 } 1399 var errs []string 1400 if httpStatusErr != "" { 1401 errs = append(errs, httpStatusErr) 1402 } 1403 if contentTypeErr != "" { 1404 errs = append(errs, contentTypeErr) 1405 } 1406 // Verify the HTTP response is a 200. 1407 se := status.New(code, strings.Join(errs, "; ")) 1408 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1409 return 1410 } 1411 1412 if headerError != "" { 1413 se := status.New(codes.Internal, headerError) 1414 t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) 1415 return 1416 } 1417 1418 isHeader := false 1419 1420 // If headerChan hasn't been closed yet 1421 if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { 1422 s.headerValid = true 1423 if !endStream { 1424 // HEADERS frame block carries a Response-Headers. 1425 isHeader = true 1426 // These values can be set without any synchronization because 1427 // stream goroutine will read it only after seeing a closed 1428 // headerChan which we'll close after setting this. 1429 s.recvCompress = recvCompress 1430 if len(mdata) > 0 { 1431 s.header = mdata 1432 } 1433 } else { 1434 // HEADERS frame block carries a Trailers-Only. 1435 s.noHeaders = true 1436 } 1437 close(s.headerChan) 1438 } 1439 1440 if t.statsHandler != nil { 1441 if isHeader { 1442 inHeader := &stats.InHeader{ 1443 Client: true, 1444 WireLength: int(frame.Header().Length), 1445 Header: metadata.MD(mdata).Copy(), 1446 Compression: s.recvCompress, 1447 } 1448 t.statsHandler.HandleRPC(s.ctx, inHeader) 1449 } else { 1450 inTrailer := &stats.InTrailer{ 1451 Client: true, 1452 WireLength: int(frame.Header().Length), 1453 Trailer: metadata.MD(mdata).Copy(), 1454 } 1455 t.statsHandler.HandleRPC(s.ctx, inTrailer) 1456 } 1457 } 1458 1459 if !endStream { 1460 return 1461 } 1462 1463 if statusGen == nil { 1464 statusGen = status.New(rawStatusCode, grpcMessage) 1465 } 1466 1467 // if client received END_STREAM from server while stream was still active, send RST_STREAM 1468 rst := s.getState() == streamActive 1469 t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) 1470 } 1471 1472 // reader runs as a separate goroutine in charge of reading data from network 1473 // connection. 1474 // 1475 // TODO(zhaoq): currently one reader per transport. Investigate whether this is 1476 // optimal. 1477 // TODO(zhaoq): Check the validity of the incoming frame sequence. 1478 func (t *http2Client) reader() { 1479 defer close(t.readerDone) 1480 // Check the validity of server preface. 1481 frame, err := t.framer.fr.ReadFrame() 1482 if err != nil { 1483 err = connectionErrorf(true, err, "error reading server preface: %v", err) 1484 t.Close(err) // this kicks off resetTransport, so must be last before return 1485 return 1486 } 1487 _ = t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) 1488 if t.keepaliveEnabled { 1489 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 1490 } 1491 sf, ok := frame.(*http2.SettingsFrame) 1492 if !ok { 1493 // this kicks off resetTransport, so must be last before return 1494 t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) 1495 return 1496 } 1497 t.onPrefaceReceipt() 1498 t.handleSettings(sf, true) 1499 1500 // loop to keep reading incoming messages on this transport. 1501 for { 1502 t.controlBuf.throttle() 1503 frame, err := t.framer.fr.ReadFrame() 1504 if t.keepaliveEnabled { 1505 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 1506 } 1507 if err != nil { 1508 // Abort an active stream if the http2.Framer returns a 1509 // http2.StreamError. This can happen only if the server's response 1510 // is malformed http2. 1511 if se, ok := err.(http2.StreamError); ok { 1512 t.mu.Lock() 1513 s := t.activeStreams[se.StreamID] 1514 t.mu.Unlock() 1515 if s != nil { 1516 // use error detail to provide better err message 1517 code := http2ErrConvTab[se.Code] 1518 errorDetail := t.framer.fr.ErrorDetail() 1519 var msg string 1520 if errorDetail != nil { 1521 msg = errorDetail.Error() 1522 } else { 1523 msg = "received invalid frame" 1524 } 1525 t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) 1526 } 1527 continue 1528 } else { 1529 // Transport error. 1530 t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) 1531 return 1532 } 1533 } 1534 switch frame := frame.(type) { 1535 case *http2.MetaHeadersFrame: 1536 t.operateHeaders(frame) 1537 case *http2.DataFrame: 1538 t.handleData(frame) 1539 case *http2.RSTStreamFrame: 1540 t.handleRSTStream(frame) 1541 case *http2.SettingsFrame: 1542 t.handleSettings(frame, false) 1543 case *http2.PingFrame: 1544 t.handlePing(frame) 1545 case *http2.GoAwayFrame: 1546 t.handleGoAway(frame) 1547 case *http2.WindowUpdateFrame: 1548 t.handleWindowUpdate(frame) 1549 default: 1550 if logger.V(logLevel) { 1551 logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) 1552 } 1553 } 1554 } 1555 } 1556 1557 func minTime(a, b time.Duration) time.Duration { 1558 if a < b { 1559 return a 1560 } 1561 return b 1562 } 1563 1564 // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. 1565 func (t *http2Client) keepalive() { 1566 p := &ping{data: [8]byte{}} 1567 // True iff a ping has been sent, and no data has been received since then. 1568 outstandingPing := false 1569 // Amount of time remaining before which we should receive an ACK for the 1570 // last sent ping. 1571 timeoutLeft := time.Duration(0) 1572 // Records the last value of t.lastRead before we go block on the timer. 1573 // This is required to check for read activity since then. 1574 prevNano := time.Now().UnixNano() 1575 timer := time.NewTimer(t.kp.Time) 1576 for { 1577 select { 1578 case <-timer.C: 1579 lastRead := atomic.LoadInt64(&t.lastRead) 1580 if lastRead > prevNano { 1581 // There has been read activity since the last time we were here. 1582 outstandingPing = false 1583 // Next timer should fire at kp.Time seconds from lastRead time. 1584 timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) 1585 prevNano = lastRead 1586 continue 1587 } 1588 if outstandingPing && timeoutLeft <= 0 { 1589 t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) 1590 return 1591 } 1592 t.mu.Lock() 1593 if t.state == closing { 1594 // If the transport is closing, we should exit from the 1595 // keepalive goroutine here. If not, we could have a race 1596 // between the call to Signal() from Close() and the call to 1597 // Wait() here, whereby the keepalive goroutine ends up 1598 // blocking on the condition variable which will never be 1599 // signalled again. 1600 t.mu.Unlock() 1601 return 1602 } 1603 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { 1604 // If a ping was sent out previously (because there were active 1605 // streams at that point) which wasn't acked and its timeout 1606 // hadn't fired, but we got here and are about to go dormant, 1607 // we should make sure that we unconditionally send a ping once 1608 // we awaken. 1609 outstandingPing = false 1610 t.kpDormant = true 1611 t.kpDormancyCond.Wait() 1612 } 1613 t.kpDormant = false 1614 t.mu.Unlock() 1615 1616 // We get here either because we were dormant and a new stream was 1617 // created which unblocked the Wait() call, or because the 1618 // keepalive timer expired. In both cases, we need to send a ping. 1619 if !outstandingPing { 1620 if channelz.IsOn() { 1621 atomic.AddInt64(&t.czData.kpCount, 1) 1622 } 1623 _ = t.controlBuf.put(p) 1624 timeoutLeft = t.kp.Timeout 1625 outstandingPing = true 1626 } 1627 // The amount of time to sleep here is the minimum of kp.Time and 1628 // timeoutLeft. This will ensure that we wait only for kp.Time 1629 // before sending out the next ping (for cases where the ping is 1630 // acked). 1631 sleepDuration := minTime(t.kp.Time, timeoutLeft) 1632 timeoutLeft -= sleepDuration 1633 timer.Reset(sleepDuration) 1634 case <-t.ctx.Done(): 1635 if !timer.Stop() { 1636 <-timer.C 1637 } 1638 return 1639 } 1640 } 1641 } 1642 1643 func (t *http2Client) Error() <-chan struct{} { 1644 return t.ctx.Done() 1645 } 1646 1647 func (t *http2Client) GoAway() <-chan struct{} { 1648 return t.goAway 1649 } 1650 1651 func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { 1652 s := channelz.SocketInternalMetric{ 1653 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), 1654 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), 1655 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), 1656 MessagesSent: atomic.LoadInt64(&t.czData.msgSent), 1657 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), 1658 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), 1659 LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), 1660 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), 1661 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), 1662 LocalFlowControlWindow: int64(t.fc.getSize()), 1663 SocketOptions: channelz.GetSocketOption(t.conn), 1664 LocalAddr: t.localAddr, 1665 RemoteAddr: t.remoteAddr, 1666 // RemoteName : 1667 } 1668 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { 1669 s.Security = au.GetSecurityValue() 1670 } 1671 s.RemoteFlowControlWindow = t.getOutFlowWindow() 1672 return &s 1673 } 1674 1675 func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } 1676 1677 func (t *http2Client) IncrMsgSent() { 1678 atomic.AddInt64(&t.czData.msgSent, 1) 1679 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) 1680 } 1681 1682 func (t *http2Client) IncrMsgRecv() { 1683 atomic.AddInt64(&t.czData.msgRecv, 1) 1684 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) 1685 } 1686 1687 func (t *http2Client) getOutFlowWindow() int64 { 1688 resp := make(chan uint32, 1) 1689 timer := time.NewTimer(time.Second) 1690 defer timer.Stop() 1691 _ = t.controlBuf.put(&outFlowControlSizeRequest{resp}) 1692 select { 1693 case sz := <-resp: 1694 return int64(sz) 1695 case <-t.ctxDone: 1696 return -1 1697 case <-timer.C: 1698 return -2 1699 } 1700 }