gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/internal/transport/http2_server.go (about) 1 /* 2 * 3 * Copyright 2014 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package transport 20 21 import ( 22 "bytes" 23 "context" 24 "errors" 25 "fmt" 26 "io" 27 "math" 28 "net" 29 "strconv" 30 "sync" 31 "sync/atomic" 32 "time" 33 34 http "gitee.com/ks-custle/core-gm/gmhttp" 35 36 "gitee.com/ks-custle/core-gm/grpc/internal/grpcutil" 37 "gitee.com/ks-custle/core-gm/net/http2" 38 "gitee.com/ks-custle/core-gm/net/http2/hpack" 39 "github.com/golang/protobuf/proto" 40 41 "gitee.com/ks-custle/core-gm/grpc/codes" 42 "gitee.com/ks-custle/core-gm/grpc/credentials" 43 "gitee.com/ks-custle/core-gm/grpc/internal/channelz" 44 "gitee.com/ks-custle/core-gm/grpc/internal/grpcrand" 45 "gitee.com/ks-custle/core-gm/grpc/keepalive" 46 "gitee.com/ks-custle/core-gm/grpc/metadata" 47 "gitee.com/ks-custle/core-gm/grpc/peer" 48 "gitee.com/ks-custle/core-gm/grpc/stats" 49 "gitee.com/ks-custle/core-gm/grpc/status" 50 "gitee.com/ks-custle/core-gm/grpc/tap" 51 ) 52 53 var ( 54 // ErrIllegalHeaderWrite indicates that setting header is illegal because of 55 // the stream's state. 56 ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") 57 // ErrHeaderListSizeLimitViolation indicates that the header list size is larger 58 // than the limit set by peer. 59 ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") 60 ) 61 62 // serverConnectionCounter counts the number of connections a server has seen 63 // (equal to the number of http2Servers created). Must be accessed atomically. 64 var serverConnectionCounter uint64 65 66 // http2Server implements the ServerTransport interface with HTTP2. 67 type http2Server struct { 68 lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. 69 ctx context.Context 70 done chan struct{} 71 conn net.Conn 72 loopy *loopyWriter 73 readerDone chan struct{} // sync point to enable testing. 74 writerDone chan struct{} // sync point to enable testing. 75 remoteAddr net.Addr 76 localAddr net.Addr 77 authInfo credentials.AuthInfo // auth info about the connection 78 inTapHandle tap.ServerInHandle 79 framer *framer 80 // The max number of concurrent streams. 81 maxStreams uint32 82 // controlBuf delivers all the control related tasks (e.g., window 83 // updates, reset streams, and various settings) to the controller. 84 controlBuf *controlBuffer 85 fc *trInFlow 86 stats stats.Handler 87 // Keepalive and max-age parameters for the server. 88 kp keepalive.ServerParameters 89 // Keepalive enforcement policy. 90 kep keepalive.EnforcementPolicy 91 // The time instance last ping was received. 92 lastPingAt time.Time 93 // Number of times the client has violated keepalive ping policy so far. 94 pingStrikes uint8 95 // Flag to signify that number of ping strikes should be reset to 0. 96 // This is set whenever data or header frames are sent. 97 // 1 means yes. 98 resetPingStrikes uint32 // Accessed atomically. 99 initialWindowSize int32 100 bdpEst *bdpEstimator 101 maxSendHeaderListSize *uint32 102 103 mu sync.Mutex // guard the following 104 105 // drainChan is initialized when Drain() is called the first time. 106 // After which the server writes out the first GoAway(with ID 2^31-1) frame. 107 // Then an independent goroutine will be launched to later send the second GoAway. 108 // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. 109 // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is 110 // already underway. 111 drainChan chan struct{} 112 state transportState 113 activeStreams map[uint32]*Stream 114 // idle is the time instant when the connection went idle. 115 // This is either the beginning of the connection or when the number of 116 // RPCs go down to 0. 117 // When the connection is busy, this value is set to 0. 118 idle time.Time 119 120 // Fields below are for channelz metric collection. 121 channelzID int64 // channelz unique identification number 122 czData *channelzData 123 bufferPool *bufferPool 124 125 connectionID uint64 126 127 // maxStreamMu guards the maximum stream ID 128 // This lock may not be taken if mu is already held. 129 maxStreamMu sync.Mutex 130 maxStreamID uint32 // max stream ID ever seen 131 } 132 133 // NewServerTransport creates a http2 transport with conn and configuration 134 // options from config. 135 // 136 // It returns a non-nil transport and a nil error on success. On failure, it 137 // returns a nil transport and a non-nil error. For a special case where the 138 // underlying conn gets closed before the client preface could be read, it 139 // returns a nil transport and a nil error. 140 func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { 141 var authInfo credentials.AuthInfo 142 rawConn := conn 143 if config.Credentials != nil { 144 var err error 145 conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) 146 if err != nil { 147 // ErrConnDispatched means that the connection was dispatched away 148 // from gRPC; those connections should be left open. io.EOF means 149 // the connection was closed before handshaking completed, which can 150 // happen naturally from probers. Return these errors directly. 151 if err == credentials.ErrConnDispatched || err == io.EOF { 152 return nil, err 153 } 154 return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) 155 } 156 } 157 writeBufSize := config.WriteBufferSize 158 readBufSize := config.ReadBufferSize 159 maxHeaderListSize := defaultServerMaxHeaderListSize 160 if config.MaxHeaderListSize != nil { 161 maxHeaderListSize = *config.MaxHeaderListSize 162 } 163 framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) 164 // Send initial settings as connection preface to client. 165 isettings := []http2.Setting{{ 166 ID: http2.SettingMaxFrameSize, 167 Val: http2MaxFrameLen, 168 }} 169 // TODO(zhaoq): Have a better way to signal "no limit" because 0 is 170 // permitted in the HTTP2 spec. 171 maxStreams := config.MaxStreams 172 if maxStreams == 0 { 173 maxStreams = math.MaxUint32 174 } else { 175 isettings = append(isettings, http2.Setting{ 176 ID: http2.SettingMaxConcurrentStreams, 177 Val: maxStreams, 178 }) 179 } 180 dynamicWindow := true 181 iwz := int32(initialWindowSize) 182 if config.InitialWindowSize >= defaultWindowSize { 183 iwz = config.InitialWindowSize 184 dynamicWindow = false 185 } 186 icwz := int32(initialWindowSize) 187 if config.InitialConnWindowSize >= defaultWindowSize { 188 icwz = config.InitialConnWindowSize 189 dynamicWindow = false 190 } 191 if iwz != defaultWindowSize { 192 isettings = append(isettings, http2.Setting{ 193 ID: http2.SettingInitialWindowSize, 194 Val: uint32(iwz)}) 195 } 196 if config.MaxHeaderListSize != nil { 197 isettings = append(isettings, http2.Setting{ 198 ID: http2.SettingMaxHeaderListSize, 199 Val: *config.MaxHeaderListSize, 200 }) 201 } 202 if config.HeaderTableSize != nil { 203 isettings = append(isettings, http2.Setting{ 204 ID: http2.SettingHeaderTableSize, 205 Val: *config.HeaderTableSize, 206 }) 207 } 208 if err := framer.fr.WriteSettings(isettings...); err != nil { 209 return nil, connectionErrorf(false, err, "transport: %v", err) 210 } 211 // Adjust the connection flow control window if needed. 212 if delta := uint32(icwz - defaultWindowSize); delta > 0 { 213 if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { 214 return nil, connectionErrorf(false, err, "transport: %v", err) 215 } 216 } 217 kp := config.KeepaliveParams 218 if kp.MaxConnectionIdle == 0 { 219 kp.MaxConnectionIdle = defaultMaxConnectionIdle 220 } 221 if kp.MaxConnectionAge == 0 { 222 kp.MaxConnectionAge = defaultMaxConnectionAge 223 } 224 // Add a jitter to MaxConnectionAge. 225 kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) 226 if kp.MaxConnectionAgeGrace == 0 { 227 kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace 228 } 229 if kp.Time == 0 { 230 kp.Time = defaultServerKeepaliveTime 231 } 232 if kp.Timeout == 0 { 233 kp.Timeout = defaultServerKeepaliveTimeout 234 } 235 kep := config.KeepalivePolicy 236 if kep.MinTime == 0 { 237 kep.MinTime = defaultKeepalivePolicyMinTime 238 } 239 240 done := make(chan struct{}) 241 t := &http2Server{ 242 ctx: setConnection(context.Background(), rawConn), 243 done: done, 244 conn: conn, 245 remoteAddr: conn.RemoteAddr(), 246 localAddr: conn.LocalAddr(), 247 authInfo: authInfo, 248 framer: framer, 249 readerDone: make(chan struct{}), 250 writerDone: make(chan struct{}), 251 maxStreams: maxStreams, 252 inTapHandle: config.InTapHandle, 253 fc: &trInFlow{limit: uint32(icwz)}, 254 state: reachable, 255 activeStreams: make(map[uint32]*Stream), 256 stats: config.StatsHandler, 257 kp: kp, 258 idle: time.Now(), 259 kep: kep, 260 initialWindowSize: iwz, 261 czData: new(channelzData), 262 bufferPool: newBufferPool(), 263 } 264 t.controlBuf = newControlBuffer(t.done) 265 if dynamicWindow { 266 t.bdpEst = &bdpEstimator{ 267 bdp: initialWindowSize, 268 updateFlowControl: t.updateFlowControl, 269 } 270 } 271 if t.stats != nil { 272 t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ 273 RemoteAddr: t.remoteAddr, 274 LocalAddr: t.localAddr, 275 }) 276 connBegin := &stats.ConnBegin{} 277 t.stats.HandleConn(t.ctx, connBegin) 278 } 279 if channelz.IsOn() { 280 t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) 281 } 282 283 t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) 284 285 t.framer.writer.Flush() 286 287 defer func() { 288 if err != nil { 289 t.Close() 290 } 291 }() 292 293 // Check the validity of client preface. 294 preface := make([]byte, len(clientPreface)) 295 if _, err := io.ReadFull(t.conn, preface); err != nil { 296 // In deployments where a gRPC server runs behind a cloud load balancer 297 // which performs regular TCP level health checks, the connection is 298 // closed immediately by the latter. Returning io.EOF here allows the 299 // grpc server implementation to recognize this scenario and suppress 300 // logging to reduce spam. 301 if err == io.EOF { 302 return nil, io.EOF 303 } 304 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) 305 } 306 if !bytes.Equal(preface, clientPreface) { 307 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) 308 } 309 310 frame, err := t.framer.fr.ReadFrame() 311 if err == io.EOF || err == io.ErrUnexpectedEOF { 312 return nil, err 313 } 314 if err != nil { 315 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) 316 } 317 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 318 sf, ok := frame.(*http2.SettingsFrame) 319 if !ok { 320 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) 321 } 322 t.handleSettings(sf) 323 324 go func() { 325 t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) 326 t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler 327 if err := t.loopy.run(); err != nil { 328 if logger.V(logLevel) { 329 logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) 330 } 331 } 332 t.conn.Close() 333 t.controlBuf.finish() 334 close(t.writerDone) 335 }() 336 go t.keepalive() 337 return t, nil 338 } 339 340 // operateHeader takes action on the decoded headers. 341 func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { 342 // Acquire max stream ID lock for entire duration 343 t.maxStreamMu.Lock() 344 defer t.maxStreamMu.Unlock() 345 346 streamID := frame.Header().StreamID 347 348 // frame.Truncated is set to true when framer detects that the current header 349 // list size hits MaxHeaderListSize limit. 350 if frame.Truncated { 351 t.controlBuf.put(&cleanupStream{ 352 streamID: streamID, 353 rst: true, 354 rstCode: http2.ErrCodeFrameSize, 355 onWrite: func() {}, 356 }) 357 return false 358 } 359 360 if streamID%2 != 1 || streamID <= t.maxStreamID { 361 // illegal gRPC stream id. 362 if logger.V(logLevel) { 363 logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) 364 } 365 return true 366 } 367 t.maxStreamID = streamID 368 369 buf := newRecvBuffer() 370 s := &Stream{ 371 id: streamID, 372 st: t, 373 buf: buf, 374 fc: &inFlow{limit: uint32(t.initialWindowSize)}, 375 } 376 var ( 377 // If a gRPC Response-Headers has already been received, then it means 378 // that the peer is speaking gRPC and we are in gRPC mode. 379 isGRPC = false 380 mdata = make(map[string][]string) 381 httpMethod string 382 // headerError is set if an error is encountered while parsing the headers 383 headerError bool 384 385 timeoutSet bool 386 timeout time.Duration 387 ) 388 389 for _, hf := range frame.Fields { 390 switch hf.Name { 391 case "content-type": 392 contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) 393 if !validContentType { 394 break 395 } 396 mdata[hf.Name] = append(mdata[hf.Name], hf.Value) 397 s.contentSubtype = contentSubtype 398 isGRPC = true 399 case "grpc-encoding": 400 s.recvCompress = hf.Value 401 case ":method": 402 httpMethod = hf.Value 403 case ":path": 404 s.method = hf.Value 405 case "grpc-timeout": 406 timeoutSet = true 407 var err error 408 if timeout, err = decodeTimeout(hf.Value); err != nil { 409 headerError = true 410 } 411 // "Transports must consider requests containing the Connection header 412 // as malformed." - A41 413 case "connection": 414 if logger.V(logLevel) { 415 logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") 416 } 417 headerError = true 418 default: 419 if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { 420 break 421 } 422 v, err := decodeMetadataHeader(hf.Name, hf.Value) 423 if err != nil { 424 headerError = true 425 logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) 426 break 427 } 428 mdata[hf.Name] = append(mdata[hf.Name], v) 429 } 430 } 431 432 // "If multiple Host headers or multiple :authority headers are present, the 433 // request must be rejected with an HTTP status code 400 as required by Host 434 // validation in RFC 7230 ยง5.4, gRPC status code INTERNAL, or RST_STREAM 435 // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 436 // error, this takes precedence over a client not speaking gRPC. 437 if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { 438 errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) 439 if logger.V(logLevel) { 440 logger.Errorf("transport: %v", errMsg) 441 } 442 t.controlBuf.put(&earlyAbortStream{ 443 httpStatus: 400, 444 streamID: streamID, 445 contentSubtype: s.contentSubtype, 446 status: status.New(codes.Internal, errMsg), 447 }) 448 return false 449 } 450 451 if !isGRPC || headerError { 452 t.controlBuf.put(&cleanupStream{ 453 streamID: streamID, 454 rst: true, 455 rstCode: http2.ErrCodeProtocol, 456 onWrite: func() {}, 457 }) 458 return false 459 } 460 461 // "If :authority is missing, Host must be renamed to :authority." - A41 462 if len(mdata[":authority"]) == 0 { 463 // No-op if host isn't present, no eventual :authority header is a valid 464 // RPC. 465 if host, ok := mdata["host"]; ok { 466 mdata[":authority"] = host 467 delete(mdata, "host") 468 } 469 } else { 470 // "If :authority is present, Host must be discarded" - A41 471 delete(mdata, "host") 472 } 473 474 if frame.StreamEnded() { 475 // s is just created by the caller. No lock needed. 476 s.state = streamReadDone 477 } 478 if timeoutSet { 479 s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) 480 } else { 481 s.ctx, s.cancel = context.WithCancel(t.ctx) 482 } 483 pr := &peer.Peer{ 484 Addr: t.remoteAddr, 485 } 486 // Attach Auth info if there is any. 487 if t.authInfo != nil { 488 pr.AuthInfo = t.authInfo 489 } 490 s.ctx = peer.NewContext(s.ctx, pr) 491 // Attach the received metadata to the context. 492 if len(mdata) > 0 { 493 s.ctx = metadata.NewIncomingContext(s.ctx, mdata) 494 if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { 495 s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) 496 } 497 if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { 498 s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) 499 } 500 } 501 t.mu.Lock() 502 if t.state != reachable { 503 t.mu.Unlock() 504 s.cancel() 505 return false 506 } 507 if uint32(len(t.activeStreams)) >= t.maxStreams { 508 t.mu.Unlock() 509 t.controlBuf.put(&cleanupStream{ 510 streamID: streamID, 511 rst: true, 512 rstCode: http2.ErrCodeRefusedStream, 513 onWrite: func() {}, 514 }) 515 s.cancel() 516 return false 517 } 518 if httpMethod != http.MethodPost { 519 t.mu.Unlock() 520 if logger.V(logLevel) { 521 logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) 522 } 523 t.controlBuf.put(&cleanupStream{ 524 streamID: streamID, 525 rst: true, 526 rstCode: http2.ErrCodeProtocol, 527 onWrite: func() {}, 528 }) 529 s.cancel() 530 return false 531 } 532 if t.inTapHandle != nil { 533 var err error 534 if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { 535 t.mu.Unlock() 536 if logger.V(logLevel) { 537 logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) 538 } 539 stat, ok := status.FromError(err) 540 if !ok { 541 stat = status.New(codes.PermissionDenied, err.Error()) 542 } 543 t.controlBuf.put(&earlyAbortStream{ 544 httpStatus: 200, 545 streamID: s.id, 546 contentSubtype: s.contentSubtype, 547 status: stat, 548 }) 549 return false 550 } 551 } 552 t.activeStreams[streamID] = s 553 if len(t.activeStreams) == 1 { 554 t.idle = time.Time{} 555 } 556 t.mu.Unlock() 557 if channelz.IsOn() { 558 atomic.AddInt64(&t.czData.streamsStarted, 1) 559 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) 560 } 561 s.requestRead = func(n int) { 562 t.adjustWindow(s, uint32(n)) 563 } 564 s.ctx = traceCtx(s.ctx, s.method) 565 if t.stats != nil { 566 s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) 567 inHeader := &stats.InHeader{ 568 FullMethod: s.method, 569 RemoteAddr: t.remoteAddr, 570 LocalAddr: t.localAddr, 571 Compression: s.recvCompress, 572 WireLength: int(frame.Header().Length), 573 Header: metadata.MD(mdata).Copy(), 574 } 575 t.stats.HandleRPC(s.ctx, inHeader) 576 } 577 s.ctxDone = s.ctx.Done() 578 s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) 579 s.trReader = &transportReader{ 580 reader: &recvBufferReader{ 581 ctx: s.ctx, 582 ctxDone: s.ctxDone, 583 recv: s.buf, 584 freeBuffer: t.bufferPool.put, 585 }, 586 windowHandler: func(n int) { 587 t.updateWindow(s, uint32(n)) 588 }, 589 } 590 // Register the stream with loopy. 591 t.controlBuf.put(®isterStream{ 592 streamID: s.id, 593 wq: s.wq, 594 }) 595 handle(s) 596 return false 597 } 598 599 // HandleStreams receives incoming streams using the given handler. This is 600 // typically run in a separate goroutine. 601 // traceCtx attaches trace to ctx and returns the new context. 602 func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { 603 defer close(t.readerDone) 604 for { 605 t.controlBuf.throttle() 606 frame, err := t.framer.fr.ReadFrame() 607 atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) 608 if err != nil { 609 if se, ok := err.(http2.StreamError); ok { 610 if logger.V(logLevel) { 611 logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) 612 } 613 t.mu.Lock() 614 s := t.activeStreams[se.StreamID] 615 t.mu.Unlock() 616 if s != nil { 617 t.closeStream(s, true, se.Code, false) 618 } else { 619 t.controlBuf.put(&cleanupStream{ 620 streamID: se.StreamID, 621 rst: true, 622 rstCode: se.Code, 623 onWrite: func() {}, 624 }) 625 } 626 continue 627 } 628 if err == io.EOF || err == io.ErrUnexpectedEOF { 629 t.Close() 630 return 631 } 632 if logger.V(logLevel) { 633 logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) 634 } 635 t.Close() 636 return 637 } 638 switch frame := frame.(type) { 639 case *http2.MetaHeadersFrame: 640 if t.operateHeaders(frame, handle, traceCtx) { 641 t.Close() 642 break 643 } 644 case *http2.DataFrame: 645 t.handleData(frame) 646 case *http2.RSTStreamFrame: 647 t.handleRSTStream(frame) 648 case *http2.SettingsFrame: 649 t.handleSettings(frame) 650 case *http2.PingFrame: 651 t.handlePing(frame) 652 case *http2.WindowUpdateFrame: 653 t.handleWindowUpdate(frame) 654 case *http2.GoAwayFrame: 655 // TODO: Handle GoAway from the client appropriately. 656 default: 657 if logger.V(logLevel) { 658 logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) 659 } 660 } 661 } 662 } 663 664 func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { 665 t.mu.Lock() 666 defer t.mu.Unlock() 667 if t.activeStreams == nil { 668 // The transport is closing. 669 return nil, false 670 } 671 s, ok := t.activeStreams[f.Header().StreamID] 672 if !ok { 673 // The stream is already done. 674 return nil, false 675 } 676 return s, true 677 } 678 679 // adjustWindow sends out extra window update over the initial window size 680 // of stream if the application is requesting data larger in size than 681 // the window. 682 func (t *http2Server) adjustWindow(s *Stream, n uint32) { 683 if w := s.fc.maybeAdjust(n); w > 0 { 684 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) 685 } 686 687 } 688 689 // updateWindow adjusts the inbound quota for the stream and the transport. 690 // Window updates will deliver to the controller for sending when 691 // the cumulative quota exceeds the corresponding threshold. 692 func (t *http2Server) updateWindow(s *Stream, n uint32) { 693 if w := s.fc.onRead(n); w > 0 { 694 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, 695 increment: w, 696 }) 697 } 698 } 699 700 // updateFlowControl updates the incoming flow control windows 701 // for the transport and the stream based on the current bdp 702 // estimation. 703 func (t *http2Server) updateFlowControl(n uint32) { 704 t.mu.Lock() 705 for _, s := range t.activeStreams { 706 s.fc.newLimit(n) 707 } 708 t.initialWindowSize = int32(n) 709 t.mu.Unlock() 710 t.controlBuf.put(&outgoingWindowUpdate{ 711 streamID: 0, 712 increment: t.fc.newLimit(n), 713 }) 714 t.controlBuf.put(&outgoingSettings{ 715 ss: []http2.Setting{ 716 { 717 ID: http2.SettingInitialWindowSize, 718 Val: n, 719 }, 720 }, 721 }) 722 723 } 724 725 func (t *http2Server) handleData(f *http2.DataFrame) { 726 size := f.Header().Length 727 var sendBDPPing bool 728 if t.bdpEst != nil { 729 sendBDPPing = t.bdpEst.add(size) 730 } 731 // Decouple connection's flow control from application's read. 732 // An update on connection's flow control should not depend on 733 // whether user application has read the data or not. Such a 734 // restriction is already imposed on the stream's flow control, 735 // and therefore the sender will be blocked anyways. 736 // Decoupling the connection flow control will prevent other 737 // active(fast) streams from starving in presence of slow or 738 // inactive streams. 739 if w := t.fc.onData(size); w > 0 { 740 t.controlBuf.put(&outgoingWindowUpdate{ 741 streamID: 0, 742 increment: w, 743 }) 744 } 745 if sendBDPPing { 746 // Avoid excessive ping detection (e.g. in an L7 proxy) 747 // by sending a window update prior to the BDP ping. 748 if w := t.fc.reset(); w > 0 { 749 t.controlBuf.put(&outgoingWindowUpdate{ 750 streamID: 0, 751 increment: w, 752 }) 753 } 754 t.controlBuf.put(bdpPing) 755 } 756 // Select the right stream to dispatch. 757 s, ok := t.getStream(f) 758 if !ok { 759 return 760 } 761 if s.getState() == streamReadDone { 762 t.closeStream(s, true, http2.ErrCodeStreamClosed, false) 763 return 764 } 765 if size > 0 { 766 if err := s.fc.onData(size); err != nil { 767 t.closeStream(s, true, http2.ErrCodeFlowControl, false) 768 return 769 } 770 if f.Header().Flags.Has(http2.FlagDataPadded) { 771 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { 772 t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) 773 } 774 } 775 // TODO(bradfitz, zhaoq): A copy is required here because there is no 776 // guarantee f.Data() is consumed before the arrival of next frame. 777 // Can this copy be eliminated? 778 if len(f.Data()) > 0 { 779 buffer := t.bufferPool.get() 780 buffer.Reset() 781 buffer.Write(f.Data()) 782 s.write(recvMsg{buffer: buffer}) 783 } 784 } 785 if f.StreamEnded() { 786 // Received the end of stream from the client. 787 s.compareAndSwapState(streamActive, streamReadDone) 788 s.write(recvMsg{err: io.EOF}) 789 } 790 } 791 792 func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { 793 // If the stream is not deleted from the transport's active streams map, then do a regular close stream. 794 if s, ok := t.getStream(f); ok { 795 t.closeStream(s, false, 0, false) 796 return 797 } 798 // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. 799 t.controlBuf.put(&cleanupStream{ 800 streamID: f.Header().StreamID, 801 rst: false, 802 rstCode: 0, 803 onWrite: func() {}, 804 }) 805 } 806 807 func (t *http2Server) handleSettings(f *http2.SettingsFrame) { 808 if f.IsAck() { 809 return 810 } 811 var ss []http2.Setting 812 var updateFuncs []func() 813 f.ForeachSetting(func(s http2.Setting) error { 814 switch s.ID { 815 case http2.SettingMaxHeaderListSize: 816 updateFuncs = append(updateFuncs, func() { 817 t.maxSendHeaderListSize = new(uint32) 818 *t.maxSendHeaderListSize = s.Val 819 }) 820 default: 821 ss = append(ss, s) 822 } 823 return nil 824 }) 825 t.controlBuf.executeAndPut(func(interface{}) bool { 826 for _, f := range updateFuncs { 827 f() 828 } 829 return true 830 }, &incomingSettings{ 831 ss: ss, 832 }) 833 } 834 835 const ( 836 maxPingStrikes = 2 837 defaultPingTimeout = 2 * time.Hour 838 ) 839 840 func (t *http2Server) handlePing(f *http2.PingFrame) { 841 if f.IsAck() { 842 if f.Data == goAwayPing.data && t.drainChan != nil { 843 close(t.drainChan) 844 return 845 } 846 // Maybe it's a BDP ping. 847 if t.bdpEst != nil { 848 t.bdpEst.calculate(f.Data) 849 } 850 return 851 } 852 pingAck := &ping{ack: true} 853 copy(pingAck.data[:], f.Data[:]) 854 t.controlBuf.put(pingAck) 855 856 now := time.Now() 857 defer func() { 858 t.lastPingAt = now 859 }() 860 // A reset ping strikes means that we don't need to check for policy 861 // violation for this ping and the pingStrikes counter should be set 862 // to 0. 863 if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { 864 t.pingStrikes = 0 865 return 866 } 867 t.mu.Lock() 868 ns := len(t.activeStreams) 869 t.mu.Unlock() 870 if ns < 1 && !t.kep.PermitWithoutStream { 871 // Keepalive shouldn't be active thus, this new ping should 872 // have come after at least defaultPingTimeout. 873 if t.lastPingAt.Add(defaultPingTimeout).After(now) { 874 t.pingStrikes++ 875 } 876 } else { 877 // Check if keepalive policy is respected. 878 if t.lastPingAt.Add(t.kep.MinTime).After(now) { 879 t.pingStrikes++ 880 } 881 } 882 883 if t.pingStrikes > maxPingStrikes { 884 // Send goaway and close the connection. 885 if logger.V(logLevel) { 886 logger.Errorf("transport: Got too many pings from the client, closing the connection.") 887 } 888 t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) 889 } 890 } 891 892 func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { 893 t.controlBuf.put(&incomingWindowUpdate{ 894 streamID: f.Header().StreamID, 895 increment: f.Increment, 896 }) 897 } 898 899 func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { 900 for k, vv := range md { 901 if isReservedHeader(k) { 902 // Clients don't tolerate reading restricted headers after some non restricted ones were sent. 903 continue 904 } 905 for _, v := range vv { 906 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 907 } 908 } 909 return headerFields 910 } 911 912 func (t *http2Server) checkForHeaderListSize(it interface{}) bool { 913 if t.maxSendHeaderListSize == nil { 914 return true 915 } 916 hdrFrame := it.(*headerFrame) 917 var sz int64 918 for _, f := range hdrFrame.hf { 919 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { 920 if logger.V(logLevel) { 921 logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) 922 } 923 return false 924 } 925 } 926 return true 927 } 928 929 // WriteHeader sends the header metadata md back to the client. 930 func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { 931 if s.updateHeaderSent() || s.getState() == streamDone { 932 return ErrIllegalHeaderWrite 933 } 934 s.hdrMu.Lock() 935 if md.Len() > 0 { 936 if s.header.Len() > 0 { 937 s.header = metadata.Join(s.header, md) 938 } else { 939 s.header = md 940 } 941 } 942 if err := t.writeHeaderLocked(s); err != nil { 943 s.hdrMu.Unlock() 944 return err 945 } 946 s.hdrMu.Unlock() 947 return nil 948 } 949 950 func (t *http2Server) setResetPingStrikes() { 951 atomic.StoreUint32(&t.resetPingStrikes, 1) 952 } 953 954 func (t *http2Server) writeHeaderLocked(s *Stream) error { 955 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields 956 // first and create a slice of that exact size. 957 headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. 958 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) 959 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) 960 if s.sendCompress != "" { 961 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) 962 } 963 headerFields = appendHeaderFieldsFromMD(headerFields, s.header) 964 success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ 965 streamID: s.id, 966 hf: headerFields, 967 endStream: false, 968 onWrite: t.setResetPingStrikes, 969 }) 970 if !success { 971 if err != nil { 972 return err 973 } 974 t.closeStream(s, true, http2.ErrCodeInternal, false) 975 return ErrHeaderListSizeLimitViolation 976 } 977 if t.stats != nil { 978 // Note: Headers are compressed with hpack after this call returns. 979 // No WireLength field is set here. 980 outHeader := &stats.OutHeader{ 981 Header: s.header.Copy(), 982 Compression: s.sendCompress, 983 } 984 t.stats.HandleRPC(s.Context(), outHeader) 985 } 986 return nil 987 } 988 989 // WriteStatus sends stream status to the client and terminates the stream. 990 // There is no further I/O operations being able to perform on this stream. 991 // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early 992 // OK is adopted. 993 func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { 994 if s.getState() == streamDone { 995 return nil 996 } 997 s.hdrMu.Lock() 998 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields 999 // first and create a slice of that exact size. 1000 headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. 1001 if !s.updateHeaderSent() { // No headers have been sent. 1002 if len(s.header) > 0 { // Send a separate header frame. 1003 if err := t.writeHeaderLocked(s); err != nil { 1004 s.hdrMu.Unlock() 1005 return err 1006 } 1007 } else { // Send a trailer only response. 1008 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) 1009 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) 1010 } 1011 } 1012 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) 1013 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) 1014 1015 if p := st.Proto(); p != nil && len(p.Details) > 0 { 1016 stBytes, err := proto.Marshal(p) 1017 if err != nil { 1018 // TODO: return error instead, when callers are able to handle it. 1019 logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) 1020 } else { 1021 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) 1022 } 1023 } 1024 1025 // Attach the trailer metadata. 1026 headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) 1027 trailingHeader := &headerFrame{ 1028 streamID: s.id, 1029 hf: headerFields, 1030 endStream: true, 1031 onWrite: t.setResetPingStrikes, 1032 } 1033 s.hdrMu.Unlock() 1034 success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) 1035 if !success { 1036 if err != nil { 1037 return err 1038 } 1039 t.closeStream(s, true, http2.ErrCodeInternal, false) 1040 return ErrHeaderListSizeLimitViolation 1041 } 1042 // Send a RST_STREAM after the trailers if the client has not already half-closed. 1043 rst := s.getState() == streamActive 1044 t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) 1045 if t.stats != nil { 1046 // Note: The trailer fields are compressed with hpack after this call returns. 1047 // No WireLength field is set here. 1048 t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ 1049 Trailer: s.trailer.Copy(), 1050 }) 1051 } 1052 return nil 1053 } 1054 1055 // Write converts the data into HTTP2 data frame and sends it out. Non-nil error 1056 // is returns if it fails (e.g., framing error, transport error). 1057 func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { 1058 if !s.isHeaderSent() { // Headers haven't been written yet. 1059 if err := t.WriteHeader(s, nil); err != nil { 1060 if _, ok := err.(ConnectionError); ok { 1061 return err 1062 } 1063 // TODO(mmukhi, dfawley): Make sure this is the right code to return. 1064 return status.Errorf(codes.Internal, "transport: %v", err) 1065 } 1066 } else { 1067 // Writing headers checks for this condition. 1068 if s.getState() == streamDone { 1069 // TODO(mmukhi, dfawley): Should the server write also return io.EOF? 1070 s.cancel() 1071 select { 1072 case <-t.done: 1073 return ErrConnClosing 1074 default: 1075 } 1076 return ContextErr(s.ctx.Err()) 1077 } 1078 } 1079 df := &dataFrame{ 1080 streamID: s.id, 1081 h: hdr, 1082 d: data, 1083 onEachWrite: t.setResetPingStrikes, 1084 } 1085 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { 1086 select { 1087 case <-t.done: 1088 return ErrConnClosing 1089 default: 1090 } 1091 return ContextErr(s.ctx.Err()) 1092 } 1093 return t.controlBuf.put(df) 1094 } 1095 1096 // keepalive running in a separate goroutine does the following: 1097 // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. 1098 // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. 1099 // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. 1100 // 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection 1101 // after an additional duration of keepalive.Timeout. 1102 func (t *http2Server) keepalive() { 1103 p := &ping{} 1104 // True iff a ping has been sent, and no data has been received since then. 1105 outstandingPing := false 1106 // Amount of time remaining before which we should receive an ACK for the 1107 // last sent ping. 1108 kpTimeoutLeft := time.Duration(0) 1109 // Records the last value of t.lastRead before we go block on the timer. 1110 // This is required to check for read activity since then. 1111 prevNano := time.Now().UnixNano() 1112 // Initialize the different timers to their default values. 1113 idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) 1114 ageTimer := time.NewTimer(t.kp.MaxConnectionAge) 1115 kpTimer := time.NewTimer(t.kp.Time) 1116 defer func() { 1117 // We need to drain the underlying channel in these timers after a call 1118 // to Stop(), only if we are interested in resetting them. Clearly we 1119 // are not interested in resetting them here. 1120 idleTimer.Stop() 1121 ageTimer.Stop() 1122 kpTimer.Stop() 1123 }() 1124 1125 for { 1126 select { 1127 case <-idleTimer.C: 1128 t.mu.Lock() 1129 idle := t.idle 1130 if idle.IsZero() { // The connection is non-idle. 1131 t.mu.Unlock() 1132 idleTimer.Reset(t.kp.MaxConnectionIdle) 1133 continue 1134 } 1135 val := t.kp.MaxConnectionIdle - time.Since(idle) 1136 t.mu.Unlock() 1137 if val <= 0 { 1138 // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. 1139 // Gracefully close the connection. 1140 t.Drain() 1141 return 1142 } 1143 idleTimer.Reset(val) 1144 case <-ageTimer.C: 1145 t.Drain() 1146 ageTimer.Reset(t.kp.MaxConnectionAgeGrace) 1147 select { 1148 case <-ageTimer.C: 1149 // Close the connection after grace period. 1150 if logger.V(logLevel) { 1151 logger.Infof("transport: closing server transport due to maximum connection age.") 1152 } 1153 t.Close() 1154 case <-t.done: 1155 } 1156 return 1157 case <-kpTimer.C: 1158 lastRead := atomic.LoadInt64(&t.lastRead) 1159 if lastRead > prevNano { 1160 // There has been read activity since the last time we were 1161 // here. Setup the timer to fire at kp.Time seconds from 1162 // lastRead time and continue. 1163 outstandingPing = false 1164 kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) 1165 prevNano = lastRead 1166 continue 1167 } 1168 if outstandingPing && kpTimeoutLeft <= 0 { 1169 if logger.V(logLevel) { 1170 logger.Infof("transport: closing server transport due to idleness.") 1171 } 1172 t.Close() 1173 return 1174 } 1175 if !outstandingPing { 1176 if channelz.IsOn() { 1177 atomic.AddInt64(&t.czData.kpCount, 1) 1178 } 1179 t.controlBuf.put(p) 1180 kpTimeoutLeft = t.kp.Timeout 1181 outstandingPing = true 1182 } 1183 // The amount of time to sleep here is the minimum of kp.Time and 1184 // timeoutLeft. This will ensure that we wait only for kp.Time 1185 // before sending out the next ping (for cases where the ping is 1186 // acked). 1187 sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) 1188 kpTimeoutLeft -= sleepDuration 1189 kpTimer.Reset(sleepDuration) 1190 case <-t.done: 1191 return 1192 } 1193 } 1194 } 1195 1196 // Close starts shutting down the http2Server transport. 1197 // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This 1198 // could cause some resource issue. Revisit this later. 1199 func (t *http2Server) Close() { 1200 t.mu.Lock() 1201 if t.state == closing { 1202 t.mu.Unlock() 1203 return 1204 } 1205 t.state = closing 1206 streams := t.activeStreams 1207 t.activeStreams = nil 1208 t.mu.Unlock() 1209 t.controlBuf.finish() 1210 close(t.done) 1211 if err := t.conn.Close(); err != nil && logger.V(logLevel) { 1212 logger.Infof("transport: error closing conn during Close: %v", err) 1213 } 1214 if channelz.IsOn() { 1215 channelz.RemoveEntry(t.channelzID) 1216 } 1217 // Cancel all active streams. 1218 for _, s := range streams { 1219 s.cancel() 1220 } 1221 if t.stats != nil { 1222 connEnd := &stats.ConnEnd{} 1223 t.stats.HandleConn(t.ctx, connEnd) 1224 } 1225 } 1226 1227 // deleteStream deletes the stream s from transport's active streams. 1228 func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { 1229 // In case stream sending and receiving are invoked in separate 1230 // goroutines (e.g., bi-directional streaming), cancel needs to be 1231 // called to interrupt the potential blocking on other goroutines. 1232 s.cancel() 1233 1234 t.mu.Lock() 1235 if _, ok := t.activeStreams[s.id]; ok { 1236 delete(t.activeStreams, s.id) 1237 if len(t.activeStreams) == 0 { 1238 t.idle = time.Now() 1239 } 1240 } 1241 t.mu.Unlock() 1242 1243 if channelz.IsOn() { 1244 if eosReceived { 1245 atomic.AddInt64(&t.czData.streamsSucceeded, 1) 1246 } else { 1247 atomic.AddInt64(&t.czData.streamsFailed, 1) 1248 } 1249 } 1250 } 1251 1252 // finishStream closes the stream and puts the trailing headerFrame into controlbuf. 1253 func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { 1254 oldState := s.swapState(streamDone) 1255 if oldState == streamDone { 1256 // If the stream was already done, return. 1257 return 1258 } 1259 1260 hdr.cleanup = &cleanupStream{ 1261 streamID: s.id, 1262 rst: rst, 1263 rstCode: rstCode, 1264 onWrite: func() { 1265 t.deleteStream(s, eosReceived) 1266 }, 1267 } 1268 t.controlBuf.put(hdr) 1269 } 1270 1271 // closeStream clears the footprint of a stream when the stream is not needed any more. 1272 func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { 1273 s.swapState(streamDone) 1274 t.deleteStream(s, eosReceived) 1275 1276 t.controlBuf.put(&cleanupStream{ 1277 streamID: s.id, 1278 rst: rst, 1279 rstCode: rstCode, 1280 onWrite: func() {}, 1281 }) 1282 } 1283 1284 func (t *http2Server) RemoteAddr() net.Addr { 1285 return t.remoteAddr 1286 } 1287 1288 func (t *http2Server) Drain() { 1289 t.mu.Lock() 1290 defer t.mu.Unlock() 1291 if t.drainChan != nil { 1292 return 1293 } 1294 t.drainChan = make(chan struct{}) 1295 t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) 1296 } 1297 1298 var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} 1299 1300 // Handles outgoing GoAway and returns true if loopy needs to put itself 1301 // in draining mode. 1302 func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { 1303 t.maxStreamMu.Lock() 1304 t.mu.Lock() 1305 if t.state == closing { // TODO(mmukhi): This seems unnecessary. 1306 t.mu.Unlock() 1307 t.maxStreamMu.Unlock() 1308 // The transport is closing. 1309 return false, ErrConnClosing 1310 } 1311 if !g.headsUp { 1312 // Stop accepting more streams now. 1313 t.state = draining 1314 sid := t.maxStreamID 1315 if len(t.activeStreams) == 0 { 1316 g.closeConn = true 1317 } 1318 t.mu.Unlock() 1319 t.maxStreamMu.Unlock() 1320 if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { 1321 return false, err 1322 } 1323 if g.closeConn { 1324 // Abruptly close the connection following the GoAway (via 1325 // loopywriter). But flush out what's inside the buffer first. 1326 t.framer.writer.Flush() 1327 return false, fmt.Errorf("transport: Connection closing") 1328 } 1329 return true, nil 1330 } 1331 t.mu.Unlock() 1332 t.maxStreamMu.Unlock() 1333 // For a graceful close, send out a GoAway with stream ID of MaxUInt32, 1334 // Follow that with a ping and wait for the ack to come back or a timer 1335 // to expire. During this time accept new streams since they might have 1336 // originated before the GoAway reaches the client. 1337 // After getting the ack or timer expiration send out another GoAway this 1338 // time with an ID of the max stream server intends to process. 1339 if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { 1340 return false, err 1341 } 1342 if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { 1343 return false, err 1344 } 1345 go func() { 1346 timer := time.NewTimer(time.Minute) 1347 defer timer.Stop() 1348 select { 1349 case <-t.drainChan: 1350 case <-timer.C: 1351 case <-t.done: 1352 return 1353 } 1354 t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) 1355 }() 1356 return false, nil 1357 } 1358 1359 func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { 1360 s := channelz.SocketInternalMetric{ 1361 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), 1362 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), 1363 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), 1364 MessagesSent: atomic.LoadInt64(&t.czData.msgSent), 1365 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), 1366 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), 1367 LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), 1368 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), 1369 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), 1370 LocalFlowControlWindow: int64(t.fc.getSize()), 1371 SocketOptions: channelz.GetSocketOption(t.conn), 1372 LocalAddr: t.localAddr, 1373 RemoteAddr: t.remoteAddr, 1374 // RemoteName : 1375 } 1376 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { 1377 s.Security = au.GetSecurityValue() 1378 } 1379 s.RemoteFlowControlWindow = t.getOutFlowWindow() 1380 return &s 1381 } 1382 1383 func (t *http2Server) IncrMsgSent() { 1384 atomic.AddInt64(&t.czData.msgSent, 1) 1385 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) 1386 } 1387 1388 func (t *http2Server) IncrMsgRecv() { 1389 atomic.AddInt64(&t.czData.msgRecv, 1) 1390 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) 1391 } 1392 1393 func (t *http2Server) getOutFlowWindow() int64 { 1394 resp := make(chan uint32, 1) 1395 timer := time.NewTimer(time.Second) 1396 defer timer.Stop() 1397 t.controlBuf.put(&outFlowControlSizeRequest{resp}) 1398 select { 1399 case sz := <-resp: 1400 return int64(sz) 1401 case <-t.done: 1402 return -1 1403 case <-timer.C: 1404 return -2 1405 } 1406 } 1407 1408 func getJitter(v time.Duration) time.Duration { 1409 if v == infinity { 1410 return 0 1411 } 1412 // Generate a jitter between +/- 10% of the value. 1413 r := int64(v / 10) 1414 j := grpcrand.Int63n(2*r) - r 1415 return time.Duration(j) 1416 } 1417 1418 type connectionKey struct{} 1419 1420 // GetConnection gets the connection from the context. 1421 func GetConnection(ctx context.Context) net.Conn { 1422 conn, _ := ctx.Value(connectionKey{}).(net.Conn) 1423 return conn 1424 } 1425 1426 // SetConnection adds the connection to the context to be able to get 1427 // information about the destination ip and port for an incoming RPC. This also 1428 // allows any unary or streaming interceptors to see the connection. 1429 func setConnection(ctx context.Context, conn net.Conn) context.Context { 1430 return context.WithValue(ctx, connectionKey{}, conn) 1431 }