gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/stream.go (about) 1 /* 2 * 3 * Copyright 2014 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 package grpc 20 21 import ( 22 "context" 23 "errors" 24 "io" 25 "math" 26 "strconv" 27 "sync" 28 "time" 29 30 "gitee.com/zhaochuninhefei/gmgo/grpc/balancer" 31 "gitee.com/zhaochuninhefei/gmgo/grpc/codes" 32 "gitee.com/zhaochuninhefei/gmgo/grpc/encoding" 33 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/balancerload" 34 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/binarylog" 35 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/channelz" 36 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcrand" 37 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcutil" 38 iresolver "gitee.com/zhaochuninhefei/gmgo/grpc/internal/resolver" 39 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/serviceconfig" 40 "gitee.com/zhaochuninhefei/gmgo/grpc/internal/transport" 41 "gitee.com/zhaochuninhefei/gmgo/grpc/metadata" 42 "gitee.com/zhaochuninhefei/gmgo/grpc/peer" 43 "gitee.com/zhaochuninhefei/gmgo/grpc/stats" 44 "gitee.com/zhaochuninhefei/gmgo/grpc/status" 45 "gitee.com/zhaochuninhefei/gmgo/net/trace" 46 ) 47 48 // StreamHandler defines the handler called by gRPC server to complete the 49 // execution of a streaming RPC. If a StreamHandler returns an error, it 50 // should be produced by the status package, or else gRPC will use 51 // codes.Unknown as the status code and err.Error() as the status message 52 // of the RPC. 53 type StreamHandler func(srv interface{}, stream ServerStream) error 54 55 // StreamDesc represents a streaming RPC service's method specification. Used 56 // on the server when registering services and on the client when initiating 57 // new streams. 58 type StreamDesc struct { 59 // StreamName and Handler are only used when registering handlers on a 60 // server. 61 StreamName string // the name of the method excluding the service 62 Handler StreamHandler // the handler called for the method 63 64 // ServerStreams and ClientStreams are used for registering handlers on a 65 // server as well as defining RPC behavior when passed to NewClientStream 66 // and ClientConn.NewStream. At least one must be true. 67 ServerStreams bool // indicates the server can perform streaming sends 68 ClientStreams bool // indicates the client can perform streaming sends 69 } 70 71 // Stream defines the common interface a client or server stream has to satisfy. 72 // 73 // ToDeprecated: See ClientStream and ServerStream documentation instead. 74 //goland:noinspection GoCommentStart 75 type Stream interface { 76 // ToDeprecated: See ClientStream and ServerStream documentation instead. 77 Context() context.Context 78 // ToDeprecated: See ClientStream and ServerStream documentation instead. 79 SendMsg(m interface{}) error 80 // ToDeprecated: See ClientStream and ServerStream documentation instead. 81 RecvMsg(m interface{}) error 82 } 83 84 // ClientStream defines the client-side behavior of a streaming RPC. 85 // 86 // All errors returned from ClientStream methods are compatible with the 87 // status package. 88 type ClientStream interface { 89 // Header returns the header metadata received from the server if there 90 // is any. It blocks if the metadata is not ready to read. 91 Header() (metadata.MD, error) 92 // Trailer returns the trailer metadata from the server, if there is any. 93 // It must only be called after stream.CloseAndRecv has returned, or 94 // stream.Recv has returned a non-nil error (including io.EOF). 95 Trailer() metadata.MD 96 // CloseSend closes the send direction of the stream. It closes the stream 97 // when non-nil error is met. It is also not safe to call CloseSend 98 // concurrently with SendMsg. 99 CloseSend() error 100 // Context returns the context for this stream. 101 // 102 // It should not be called until after Header or RecvMsg has returned. Once 103 // called, subsequent client-side retries are disabled. 104 Context() context.Context 105 // SendMsg is generally called by generated code. On error, SendMsg aborts 106 // the stream. If the error was generated by the client, the status is 107 // returned directly; otherwise, io.EOF is returned and the status of 108 // the stream may be discovered using RecvMsg. 109 // 110 // SendMsg blocks until: 111 // - There is sufficient flow control to schedule m with the transport, or 112 // - The stream is done, or 113 // - The stream breaks. 114 // 115 // SendMsg does not wait until the message is received by the server. An 116 // untimely stream closure may result in lost messages. To ensure delivery, 117 // users should ensure the RPC completed successfully using RecvMsg. 118 // 119 // It is safe to have a goroutine calling SendMsg and another goroutine 120 // calling RecvMsg on the same stream at the same time, but it is not safe 121 // to call SendMsg on the same stream in different goroutines. It is also 122 // not safe to call CloseSend concurrently with SendMsg. 123 SendMsg(m interface{}) error 124 // RecvMsg blocks until it receives a message into m or the stream is 125 // done. It returns io.EOF when the stream completes successfully. On 126 // any other error, the stream is aborted and the error contains the RPC 127 // status. 128 // 129 // It is safe to have a goroutine calling SendMsg and another goroutine 130 // calling RecvMsg on the same stream at the same time, but it is not 131 // safe to call RecvMsg on the same stream in different goroutines. 132 RecvMsg(m interface{}) error 133 } 134 135 // NewStream creates a new Stream for the client side. This is typically 136 // called by generated code. ctx is used for the lifetime of the stream. 137 // 138 // To ensure resources are not leaked due to the stream returned, one of the following 139 // actions must be performed: 140 // 141 // 1. Call Close on the ClientConn. 142 // 2. Cancel the context provided. 143 // 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated 144 // client-streaming RPC, for instance, might use the helper function 145 // CloseAndRecv (note that CloseSend does not Recv, therefore is not 146 // guaranteed to release all resources). 147 // 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. 148 // 149 // If none of the above happen, a goroutine and a context will be leaked, and grpc 150 // will not call the optionally-configured stats handler with a stats.End message. 151 func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { 152 // allow interceptor to see all applicable call options, which means those 153 // configured as defaults from dial option as well as per-call options 154 opts = combine(cc.dopts.callOptions, opts) 155 156 if cc.dopts.streamInt != nil { 157 return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) 158 } 159 return newClientStream(ctx, desc, cc, method, opts...) 160 } 161 162 // NewClientStream is a wrapper for ClientConn.NewStream. 163 func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { 164 return cc.NewStream(ctx, desc, method, opts...) 165 } 166 167 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { 168 if channelz.IsOn() { 169 cc.incrCallsStarted() 170 defer func() { 171 if err != nil { 172 cc.incrCallsFailed() 173 } 174 }() 175 } 176 // Provide an opportunity for the first RPC to see the first service config 177 // provided by the resolver. 178 if err := cc.waitForResolvedAddrs(ctx); err != nil { 179 return nil, err 180 } 181 182 var mc serviceconfig.MethodConfig 183 var onCommit func() 184 var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { 185 return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) 186 } 187 188 rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} 189 rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) 190 if err != nil { 191 return nil, toRPCErr(err) 192 } 193 194 if rpcConfig != nil { 195 if rpcConfig.Context != nil { 196 ctx = rpcConfig.Context 197 } 198 mc = rpcConfig.MethodConfig 199 onCommit = rpcConfig.OnCommitted 200 if rpcConfig.Interceptor != nil { 201 rpcInfo.Context = nil 202 ns := newStream 203 newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { 204 cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) 205 if err != nil { 206 return nil, toRPCErr(err) 207 } 208 return cs, nil 209 } 210 } 211 } 212 213 return newStream(ctx, func() {}) 214 } 215 216 func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { 217 c := defaultCallInfo() 218 if mc.WaitForReady != nil { 219 c.failFast = !*mc.WaitForReady 220 } 221 222 // Possible context leak: 223 // The cancel function for the child context we create will only be called 224 // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if 225 // an error is generated by SendMsg. 226 // https://github.com/grpc/grpc-go/issues/1818. 227 var cancel context.CancelFunc 228 if mc.Timeout != nil && *mc.Timeout >= 0 { 229 ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) 230 } else { 231 ctx, cancel = context.WithCancel(ctx) 232 } 233 defer func() { 234 if err != nil { 235 cancel() 236 } 237 }() 238 239 for _, o := range opts { 240 if err := o.before(c); err != nil { 241 return nil, toRPCErr(err) 242 } 243 } 244 c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) 245 c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) 246 if err := setCallInfoCodec(c); err != nil { 247 return nil, err 248 } 249 250 callHdr := &transport.CallHdr{ 251 Host: cc.authority, 252 Method: method, 253 ContentSubtype: c.contentSubtype, 254 DoneFunc: doneFunc, 255 } 256 257 // Set our outgoing compression according to the UseCompressor CallOption, if 258 // set. In that case, also find the compressor from the encoding package. 259 // Otherwise, use the compressor configured by the WithCompressor DialOption, 260 // if set. 261 var cp Compressor 262 var comp encoding.Compressor 263 if ct := c.compressorType; ct != "" { 264 callHdr.SendCompress = ct 265 if ct != encoding.Identity { 266 comp = encoding.GetCompressor(ct) 267 if comp == nil { 268 return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) 269 } 270 } 271 } else if cc.dopts.cp != nil { 272 callHdr.SendCompress = cc.dopts.cp.Type() 273 cp = cc.dopts.cp 274 } 275 if c.creds != nil { 276 callHdr.Creds = c.creds 277 } 278 279 cs := &clientStream{ 280 callHdr: callHdr, 281 ctx: ctx, 282 methodConfig: &mc, 283 opts: opts, 284 callInfo: c, 285 cc: cc, 286 desc: desc, 287 codec: c.codec, 288 cp: cp, 289 comp: comp, 290 cancel: cancel, 291 firstAttempt: true, 292 onCommit: onCommit, 293 } 294 if !cc.dopts.disableRetry { 295 cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) 296 } 297 cs.binlog = binarylog.GetMethodLogger(method) 298 299 if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { 300 cs.finish(err) 301 return nil, err 302 } 303 304 op := func(a *csAttempt) error { return a.newStream() } 305 if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { 306 cs.finish(err) 307 return nil, err 308 } 309 310 if cs.binlog != nil { 311 md, _ := metadata.FromOutgoingContext(ctx) 312 logEntry := &binarylog.ClientHeader{ 313 OnClientSide: true, 314 Header: md, 315 MethodName: method, 316 Authority: cs.cc.authority, 317 } 318 if deadline, ok := ctx.Deadline(); ok { 319 logEntry.Timeout = time.Until(deadline) 320 if logEntry.Timeout < 0 { 321 logEntry.Timeout = 0 322 } 323 } 324 cs.binlog.Log(logEntry) 325 } 326 327 if desc != unaryStreamDesc { 328 // Listen on cc and stream contexts to cleanup when the user closes the 329 // ClientConn or cancels the stream context. In all other cases, an error 330 // should already be injected into the recv buffer by the transport, which 331 // the client will eventually receive, and then we will cancel the stream's 332 // context in clientStream.finish. 333 go func() { 334 select { 335 case <-cc.ctx.Done(): 336 cs.finish(ErrClientConnClosing) 337 case <-ctx.Done(): 338 cs.finish(toRPCErr(ctx.Err())) 339 } 340 }() 341 } 342 return cs, nil 343 } 344 345 // newAttemptLocked creates a new attempt with a transport. 346 // If it succeeds, then it replaces clientStream's attempt with this new attempt. 347 func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { 348 ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) 349 method := cs.callHdr.Method 350 sh := cs.cc.dopts.copts.StatsHandler 351 var beginTime time.Time 352 if sh != nil { 353 ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) 354 beginTime = time.Now() 355 begin := &stats.Begin{ 356 Client: true, 357 BeginTime: beginTime, 358 FailFast: cs.callInfo.failFast, 359 IsClientStream: cs.desc.ClientStreams, 360 IsServerStream: cs.desc.ServerStreams, 361 IsTransparentRetryAttempt: isTransparent, 362 } 363 sh.HandleRPC(ctx, begin) 364 } 365 366 var trInfo *traceInfo 367 if EnableTracing { 368 trInfo = &traceInfo{ 369 tr: trace.New("grpc.Sent."+methodFamily(method), method), 370 firstLine: firstLine{ 371 client: true, 372 }, 373 } 374 if deadline, ok := ctx.Deadline(); ok { 375 trInfo.firstLine.deadline = time.Until(deadline) 376 } 377 trInfo.tr.LazyLog(&trInfo.firstLine, false) 378 ctx = trace.NewContext(ctx, trInfo.tr) 379 } 380 381 newAttempt := &csAttempt{ 382 ctx: ctx, 383 beginTime: beginTime, 384 cs: cs, 385 dc: cs.cc.dopts.dc, 386 statsHandler: sh, 387 trInfo: trInfo, 388 } 389 defer func() { 390 if retErr != nil { 391 // This attempt is not set in the clientStream, so it's finish won't 392 // be called. Call it here for stats and trace in case they are not 393 // nil. 394 newAttempt.finish(retErr) 395 } 396 }() 397 398 if err := ctx.Err(); err != nil { 399 return toRPCErr(err) 400 } 401 402 // Target.Scheme is deprecated, use Target.GetScheme() instead. 403 //if cs.cc.parsedTarget.Scheme == "xds" { 404 if cs.cc.parsedTarget.GetScheme() == "xds" { 405 // Add extra metadata (metadata that will be added by transport) to context 406 // so the balancer can see them. 407 ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( 408 "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), 409 )) 410 } 411 t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) 412 if err != nil { 413 return err 414 } 415 if trInfo != nil { 416 trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) 417 } 418 newAttempt.t = t 419 newAttempt.done = done 420 cs.attempt = newAttempt 421 return nil 422 } 423 424 func (a *csAttempt) newStream() error { 425 cs := a.cs 426 cs.callHdr.PreviousAttempts = cs.numRetries 427 s, err := a.t.NewStream(a.ctx, cs.callHdr) 428 if err != nil { 429 // Return without converting to an RPC error so retry code can 430 // inspect. 431 return err 432 } 433 cs.attempt.s = s 434 cs.attempt.p = &parser{r: s} 435 return nil 436 } 437 438 // clientStream implements a client side Stream. 439 type clientStream struct { 440 callHdr *transport.CallHdr 441 opts []CallOption 442 callInfo *callInfo 443 cc *ClientConn 444 desc *StreamDesc 445 446 codec baseCodec 447 cp Compressor 448 comp encoding.Compressor 449 450 cancel context.CancelFunc // cancels all attempts 451 452 sentLast bool // sent an end stream 453 454 methodConfig *MethodConfig 455 456 ctx context.Context // the application's context, wrapped by stats/tracing 457 458 retryThrottler *retryThrottler // The throttler active when the RPC began. 459 460 binlog *binarylog.MethodLogger // Binary logger, can be nil. 461 // serverHeaderBinlogged is a boolean for whether server header has been 462 // logged. Server header will be logged when the first time one of those 463 // happens: stream.Header(), stream.Recv(). 464 // 465 // It's only read and used by Recv() and Header(), so it doesn't need to be 466 // synchronized. 467 serverHeaderBinlogged bool 468 469 mu sync.Mutex 470 firstAttempt bool // if true, transparent retry is valid 471 numRetries int // exclusive of transparent retry attempt(s) 472 numRetriesSincePushback int // retries since pushback; to reset backoff 473 finished bool // TODO: replace with atomic cmpxchg or sync.Once? 474 // attempt is the active client stream attempt. 475 // The only place where it is written is the newAttemptLocked method and this method never writes nil. 476 // So, attempt can be nil only inside newClientStream function when clientStream is first created. 477 // One of the first things done after clientStream's creation, is to call newAttemptLocked which either 478 // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, 479 // then newClientStream calls finish on the clientStream and returns. So, finish method is the only 480 // place where we need to check if the attempt is nil. 481 attempt *csAttempt 482 // TODO(hedging): hedging will have multiple attempts simultaneously. 483 committed bool // active attempt committed for retry? 484 onCommit func() 485 buffer []func(a *csAttempt) error // operations to replay on retry 486 bufferSize int // current size of buffer 487 } 488 489 // csAttempt implements a single transport stream attempt within a 490 // clientStream. 491 type csAttempt struct { 492 ctx context.Context 493 cs *clientStream 494 t transport.ClientTransport 495 s *transport.Stream 496 p *parser 497 done func(balancer.DoneInfo) 498 499 finished bool 500 dc Decompressor 501 decomp encoding.Compressor 502 decompSet bool 503 504 mu sync.Mutex // guards trInfo.tr 505 // trInfo may be nil (if EnableTracing is false). 506 // trInfo.tr is set when created (if EnableTracing is true), 507 // and cleared when the finish method is called. 508 trInfo *traceInfo 509 510 statsHandler stats.Handler 511 beginTime time.Time 512 } 513 514 func (cs *clientStream) commitAttemptLocked() { 515 if !cs.committed && cs.onCommit != nil { 516 cs.onCommit() 517 } 518 cs.committed = true 519 cs.buffer = nil 520 } 521 522 func (cs *clientStream) commitAttempt() { 523 cs.mu.Lock() 524 cs.commitAttemptLocked() 525 cs.mu.Unlock() 526 } 527 528 // shouldRetry returns nil if the RPC should be retried; otherwise it returns 529 // the error that should be returned by the operation. If the RPC should be 530 // retried, the bool indicates whether it is being retried transparently. 531 func (cs *clientStream) shouldRetry(err error) (bool, error) { 532 if cs.attempt.s == nil { 533 // Error from NewClientStream. 534 var nse *transport.NewStreamError 535 ok := errors.As(err, &nse) 536 if !ok { 537 // Unexpected, but assume no I/O was performed and the RPC is not 538 // fatal, so retry indefinitely. 539 return true, nil 540 } 541 542 // Unwrap and convert error. 543 err = toRPCErr(nse.Err) 544 545 // Never retry DoNotRetry errors, which indicate the RPC should not be 546 // retried due to max header list size violation, etc. 547 if nse.DoNotRetry { 548 return false, err 549 } 550 551 // In the event of a non-IO operation error from NewStream, we never 552 // attempted to write anything to the wire, so we can retry 553 // indefinitely. 554 if !nse.DoNotTransparentRetry { 555 return true, nil 556 } 557 } 558 if cs.finished || cs.committed { 559 // RPC is finished or committed; cannot retry. 560 return false, err 561 } 562 // Wait for the trailers. 563 unprocessed := false 564 if cs.attempt.s != nil { 565 <-cs.attempt.s.Done() 566 unprocessed = cs.attempt.s.Unprocessed() 567 } 568 if cs.firstAttempt && unprocessed { 569 // First attempt, stream unprocessed: transparently retry. 570 return true, nil 571 } 572 if cs.cc.dopts.disableRetry { 573 return false, err 574 } 575 576 pushback := 0 577 hasPushback := false 578 if cs.attempt.s != nil { 579 if !cs.attempt.s.TrailersOnly() { 580 return false, err 581 } 582 583 // TODO(retry): Move down if the spec changes to not check server pushback 584 // before considering this a failure for throttling. 585 sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] 586 if len(sps) == 1 { 587 var e error 588 if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { 589 channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) 590 cs.retryThrottler.throttle() // This counts as a failure for throttling. 591 return false, err 592 } 593 hasPushback = true 594 } else if len(sps) > 1 { 595 channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) 596 cs.retryThrottler.throttle() // This counts as a failure for throttling. 597 return false, err 598 } 599 } 600 601 var code codes.Code 602 if cs.attempt.s != nil { 603 code = cs.attempt.s.Status().Code() 604 } else { 605 code = status.Convert(err).Code() 606 } 607 608 rp := cs.methodConfig.RetryPolicy 609 if rp == nil || !rp.RetryableStatusCodes[code] { 610 return false, err 611 } 612 613 // Note: the ordering here is important; we count this as a failure 614 // only if the code matched a retryable code. 615 if cs.retryThrottler.throttle() { 616 return false, err 617 } 618 if cs.numRetries+1 >= rp.MaxAttempts { 619 return false, err 620 } 621 622 var dur time.Duration 623 if hasPushback { 624 dur = time.Millisecond * time.Duration(pushback) 625 cs.numRetriesSincePushback = 0 626 } else { 627 fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) 628 cur := float64(rp.InitialBackoff) * fact 629 if max := float64(rp.MaxBackoff); cur > max { 630 cur = max 631 } 632 dur = time.Duration(grpcrand.Int63n(int64(cur))) 633 cs.numRetriesSincePushback++ 634 } 635 636 // TODO(dfawley): we could eagerly fail here if dur puts us past the 637 // deadline, but unsure if it is worth doing. 638 t := time.NewTimer(dur) 639 select { 640 case <-t.C: 641 cs.numRetries++ 642 return false, nil 643 case <-cs.ctx.Done(): 644 t.Stop() 645 return false, status.FromContextError(cs.ctx.Err()).Err() 646 } 647 } 648 649 // Returns nil if a retry was performed and succeeded; error otherwise. 650 func (cs *clientStream) retryLocked(lastErr error) error { 651 for { 652 cs.attempt.finish(toRPCErr(lastErr)) 653 isTransparent, err := cs.shouldRetry(lastErr) 654 if err != nil { 655 cs.commitAttemptLocked() 656 return err 657 } 658 cs.firstAttempt = false 659 if err := cs.newAttemptLocked(isTransparent); err != nil { 660 return err 661 } 662 if lastErr = cs.replayBufferLocked(); lastErr == nil { 663 return nil 664 } 665 } 666 } 667 668 func (cs *clientStream) Context() context.Context { 669 cs.commitAttempt() 670 // No need to lock before using attempt, since we know it is committed and 671 // cannot change. 672 return cs.attempt.s.Context() 673 } 674 675 func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { 676 cs.mu.Lock() 677 for { 678 if cs.committed { 679 cs.mu.Unlock() 680 // toRPCErr is used in case the error from the attempt comes from 681 // NewClientStream, which intentionally doesn't return a status 682 // error to allow for further inspection; all other errors should 683 // already be status errors. 684 return toRPCErr(op(cs.attempt)) 685 } 686 a := cs.attempt 687 cs.mu.Unlock() 688 err := op(a) 689 cs.mu.Lock() 690 if a != cs.attempt { 691 // We started another attempt already. 692 continue 693 } 694 if err == io.EOF { 695 <-a.s.Done() 696 } 697 if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { 698 onSuccess() 699 cs.mu.Unlock() 700 return err 701 } 702 if err := cs.retryLocked(err); err != nil { 703 cs.mu.Unlock() 704 return err 705 } 706 } 707 } 708 709 func (cs *clientStream) Header() (metadata.MD, error) { 710 var m metadata.MD 711 err := cs.withRetry(func(a *csAttempt) error { 712 var err error 713 m, err = a.s.Header() 714 return toRPCErr(err) 715 }, cs.commitAttemptLocked) 716 if err != nil { 717 cs.finish(err) 718 return nil, err 719 } 720 if cs.binlog != nil && !cs.serverHeaderBinlogged { 721 // Only log if binary log is on and header has not been logged. 722 logEntry := &binarylog.ServerHeader{ 723 OnClientSide: true, 724 Header: m, 725 PeerAddr: nil, 726 } 727 if p, ok := peer.FromContext(cs.Context()); ok { 728 logEntry.PeerAddr = p.Addr 729 } 730 cs.binlog.Log(logEntry) 731 cs.serverHeaderBinlogged = true 732 } 733 return m, err 734 } 735 736 func (cs *clientStream) Trailer() metadata.MD { 737 // On RPC failure, we never need to retry, because usage requires that 738 // RecvMsg() returned a non-nil error before calling this function is valid. 739 // We would have retried earlier if necessary. 740 // 741 // Commit the attempt anyway, just in case users are not following those 742 // directions -- it will prevent races and should not meaningfully impact 743 // performance. 744 cs.commitAttempt() 745 if cs.attempt.s == nil { 746 return nil 747 } 748 return cs.attempt.s.Trailer() 749 } 750 751 func (cs *clientStream) replayBufferLocked() error { 752 a := cs.attempt 753 for _, f := range cs.buffer { 754 if err := f(a); err != nil { 755 return err 756 } 757 } 758 return nil 759 } 760 761 func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { 762 // Note: we still will buffer if retry is disabled (for transparent retries). 763 if cs.committed { 764 return 765 } 766 cs.bufferSize += sz 767 if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { 768 cs.commitAttemptLocked() 769 return 770 } 771 cs.buffer = append(cs.buffer, op) 772 } 773 774 func (cs *clientStream) SendMsg(m interface{}) (err error) { 775 defer func() { 776 if err != nil && err != io.EOF { 777 // Call finish on the client stream for errors generated by this SendMsg 778 // call, as these indicate problems created by this client. (Transport 779 // errors are converted to an io.EOF error in csAttempt.sendMsg; the real 780 // error will be returned from RecvMsg eventually in that case, or be 781 // retried.) 782 cs.finish(err) 783 } 784 }() 785 if cs.sentLast { 786 return status.Errorf(codes.Internal, "SendMsg called after CloseSend") 787 } 788 if !cs.desc.ClientStreams { 789 cs.sentLast = true 790 } 791 792 // load hdr, payload, data 793 hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) 794 if err != nil { 795 return err 796 } 797 798 // TODO(dfawley): should we be checking len(data) instead? 799 if len(payload) > *cs.callInfo.maxSendMessageSize { 800 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) 801 } 802 msgBytes := data // Store the pointer before setting to nil. For binary logging. 803 op := func(a *csAttempt) error { 804 err := a.sendMsg(m, hdr, payload, data) 805 // nil out the message and uncomp when replaying; they are only needed for 806 // stats which is disabled for subsequent attempts. 807 m, data = nil, nil 808 return err 809 } 810 err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) 811 if cs.binlog != nil && err == nil { 812 cs.binlog.Log(&binarylog.ClientMessage{ 813 OnClientSide: true, 814 Message: msgBytes, 815 }) 816 } 817 return 818 } 819 820 func (cs *clientStream) RecvMsg(m interface{}) error { 821 if cs.binlog != nil && !cs.serverHeaderBinlogged { 822 // Call Header() to binary log header if it's not already logged. 823 _, _ = cs.Header() 824 } 825 var recvInfo *payloadInfo 826 if cs.binlog != nil { 827 recvInfo = &payloadInfo{} 828 } 829 err := cs.withRetry(func(a *csAttempt) error { 830 return a.recvMsg(m, recvInfo) 831 }, cs.commitAttemptLocked) 832 if cs.binlog != nil && err == nil { 833 cs.binlog.Log(&binarylog.ServerMessage{ 834 OnClientSide: true, 835 Message: recvInfo.uncompressedBytes, 836 }) 837 } 838 if err != nil || !cs.desc.ServerStreams { 839 // err != nil or non-server-streaming indicates end of stream. 840 cs.finish(err) 841 842 if cs.binlog != nil { 843 // finish will not log Trailer. Log Trailer here. 844 logEntry := &binarylog.ServerTrailer{ 845 OnClientSide: true, 846 Trailer: cs.Trailer(), 847 Err: err, 848 } 849 if logEntry.Err == io.EOF { 850 logEntry.Err = nil 851 } 852 if p, ok := peer.FromContext(cs.Context()); ok { 853 logEntry.PeerAddr = p.Addr 854 } 855 cs.binlog.Log(logEntry) 856 } 857 } 858 return err 859 } 860 861 func (cs *clientStream) CloseSend() error { 862 if cs.sentLast { 863 // TODO: return an error and finish the stream instead, due to API misuse? 864 return nil 865 } 866 cs.sentLast = true 867 op := func(a *csAttempt) error { 868 _ = a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) 869 // Always return nil; io.EOF is the only error that might make sense 870 // instead, but there is no need to signal the client to call RecvMsg 871 // as the only use left for the stream after CloseSend is to call 872 // RecvMsg. This also matches historical behavior. 873 return nil 874 } 875 _ = cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) 876 if cs.binlog != nil { 877 cs.binlog.Log(&binarylog.ClientHalfClose{ 878 OnClientSide: true, 879 }) 880 } 881 // We never returned an error here for reasons. 882 return nil 883 } 884 885 func (cs *clientStream) finish(err error) { 886 if err == io.EOF { 887 // Ending a stream with EOF indicates a success. 888 err = nil 889 } 890 cs.mu.Lock() 891 if cs.finished { 892 cs.mu.Unlock() 893 return 894 } 895 cs.finished = true 896 cs.commitAttemptLocked() 897 if cs.attempt != nil { 898 cs.attempt.finish(err) 899 // after functions all rely upon having a stream. 900 if cs.attempt.s != nil { 901 for _, o := range cs.opts { 902 o.after(cs.callInfo, cs.attempt) 903 } 904 } 905 } 906 cs.mu.Unlock() 907 // For binary logging. only log cancel in finish (could be caused by RPC ctx 908 // canceled or ClientConn closed). Trailer will be logged in RecvMsg. 909 // 910 // Only one of cancel or trailer needs to be logged. In the cases where 911 // users don't call RecvMsg, users must have already canceled the RPC. 912 if cs.binlog != nil && status.Code(err) == codes.Canceled { 913 cs.binlog.Log(&binarylog.Cancel{ 914 OnClientSide: true, 915 }) 916 } 917 if err == nil { 918 cs.retryThrottler.successfulRPC() 919 } 920 if channelz.IsOn() { 921 if err != nil { 922 cs.cc.incrCallsFailed() 923 } else { 924 cs.cc.incrCallsSucceeded() 925 } 926 } 927 cs.cancel() 928 } 929 930 func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { 931 cs := a.cs 932 if a.trInfo != nil { 933 a.mu.Lock() 934 if a.trInfo.tr != nil { 935 a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) 936 } 937 a.mu.Unlock() 938 } 939 if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { 940 if !cs.desc.ClientStreams { 941 // For non-client-streaming RPCs, we return nil instead of EOF on error 942 // because the generated code requires it. finish is not called; RecvMsg() 943 // will call it with the stream's status independently. 944 return nil 945 } 946 return io.EOF 947 } 948 if a.statsHandler != nil { 949 a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) 950 } 951 if channelz.IsOn() { 952 a.t.IncrMsgSent() 953 } 954 return nil 955 } 956 957 func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { 958 cs := a.cs 959 if a.statsHandler != nil && payInfo == nil { 960 payInfo = &payloadInfo{} 961 } 962 963 if !a.decompSet { 964 // Block until we receive headers containing received message encoding. 965 if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { 966 if a.dc == nil || a.dc.Type() != ct { 967 // No configured decompressor, or it does not match the incoming 968 // message encoding; attempt to find a registered compressor that does. 969 a.dc = nil 970 a.decomp = encoding.GetCompressor(ct) 971 } 972 } else { 973 // No compression is used; disable our decompressor. 974 a.dc = nil 975 } 976 // Only initialize this state once per stream. 977 a.decompSet = true 978 } 979 err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) 980 if err != nil { 981 if err == io.EOF { 982 if statusErr := a.s.Status().Err(); statusErr != nil { 983 return statusErr 984 } 985 return io.EOF // indicates successful end of stream. 986 } 987 return toRPCErr(err) 988 } 989 if a.trInfo != nil { 990 a.mu.Lock() 991 if a.trInfo.tr != nil { 992 a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) 993 } 994 a.mu.Unlock() 995 } 996 if a.statsHandler != nil { 997 a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ 998 Client: true, 999 RecvTime: time.Now(), 1000 Payload: m, 1001 // TODO truncate large payload. 1002 Data: payInfo.uncompressedBytes, 1003 WireLength: payInfo.wireLength + headerLen, 1004 Length: len(payInfo.uncompressedBytes), 1005 }) 1006 } 1007 if channelz.IsOn() { 1008 a.t.IncrMsgRecv() 1009 } 1010 if cs.desc.ServerStreams { 1011 // Subsequent messages should be received by subsequent RecvMsg calls. 1012 return nil 1013 } 1014 // Special handling for non-server-stream rpcs. 1015 // This recv expects EOF or errors, so we don't collect inPayload. 1016 err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) 1017 if err == nil { 1018 return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) 1019 } 1020 if err == io.EOF { 1021 return a.s.Status().Err() // non-server streaming Recv returns nil on success 1022 } 1023 return toRPCErr(err) 1024 } 1025 1026 func (a *csAttempt) finish(err error) { 1027 a.mu.Lock() 1028 if a.finished { 1029 a.mu.Unlock() 1030 return 1031 } 1032 a.finished = true 1033 if err == io.EOF { 1034 // Ending a stream with EOF indicates a success. 1035 err = nil 1036 } 1037 var tr metadata.MD 1038 if a.s != nil { 1039 a.t.CloseStream(a.s, err) 1040 tr = a.s.Trailer() 1041 } 1042 1043 if a.done != nil { 1044 br := false 1045 if a.s != nil { 1046 br = a.s.BytesReceived() 1047 } 1048 a.done(balancer.DoneInfo{ 1049 Err: err, 1050 Trailer: tr, 1051 BytesSent: a.s != nil, 1052 BytesReceived: br, 1053 ServerLoad: balancerload.Parse(tr), 1054 }) 1055 } 1056 if a.statsHandler != nil { 1057 end := &stats.End{ 1058 Client: true, 1059 BeginTime: a.beginTime, 1060 EndTime: time.Now(), 1061 Trailer: tr, 1062 Error: err, 1063 } 1064 a.statsHandler.HandleRPC(a.ctx, end) 1065 } 1066 if a.trInfo != nil && a.trInfo.tr != nil { 1067 if err == nil { 1068 a.trInfo.tr.LazyPrintf("RPC: [OK]") 1069 } else { 1070 a.trInfo.tr.LazyPrintf("RPC: [%v]", err) 1071 a.trInfo.tr.SetError() 1072 } 1073 a.trInfo.tr.Finish() 1074 a.trInfo.tr = nil 1075 } 1076 a.mu.Unlock() 1077 } 1078 1079 // newClientStream creates a ClientStream with the specified transport, on the 1080 // given addrConn. 1081 // 1082 // It's expected that the given transport is either the same one in addrConn, or 1083 // is already closed. To avoid race, transport is specified separately, instead 1084 // of using ac.transpot. 1085 // 1086 // Main difference between this and ClientConn.NewStream: 1087 // - no retry 1088 // - no service config (or wait for service config) 1089 // - no tracing or stats 1090 func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { 1091 if t == nil { 1092 // TODO: return RPC error here? 1093 return nil, errors.New("transport provided is nil") 1094 } 1095 // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. 1096 c := &callInfo{} 1097 1098 // Possible context leak: 1099 // The cancel function for the child context we create will only be called 1100 // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if 1101 // an error is generated by SendMsg. 1102 // https://github.com/grpc/grpc-go/issues/1818. 1103 ctx, cancel := context.WithCancel(ctx) 1104 defer func() { 1105 if err != nil { 1106 cancel() 1107 } 1108 }() 1109 1110 for _, o := range opts { 1111 if err := o.before(c); err != nil { 1112 return nil, toRPCErr(err) 1113 } 1114 } 1115 c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) 1116 c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) 1117 if err := setCallInfoCodec(c); err != nil { 1118 return nil, err 1119 } 1120 1121 callHdr := &transport.CallHdr{ 1122 Host: ac.cc.authority, 1123 Method: method, 1124 ContentSubtype: c.contentSubtype, 1125 } 1126 1127 // Set our outgoing compression according to the UseCompressor CallOption, if 1128 // set. In that case, also find the compressor from the encoding package. 1129 // Otherwise, use the compressor configured by the WithCompressor DialOption, 1130 // if set. 1131 var cp Compressor 1132 var comp encoding.Compressor 1133 if ct := c.compressorType; ct != "" { 1134 callHdr.SendCompress = ct 1135 if ct != encoding.Identity { 1136 comp = encoding.GetCompressor(ct) 1137 if comp == nil { 1138 return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) 1139 } 1140 } 1141 } else if ac.cc.dopts.cp != nil { 1142 callHdr.SendCompress = ac.cc.dopts.cp.Type() 1143 cp = ac.cc.dopts.cp 1144 } 1145 if c.creds != nil { 1146 callHdr.Creds = c.creds 1147 } 1148 1149 // Use a special addrConnStream to avoid retry. 1150 as := &addrConnStream{ 1151 callHdr: callHdr, 1152 ac: ac, 1153 ctx: ctx, 1154 cancel: cancel, 1155 opts: opts, 1156 callInfo: c, 1157 desc: desc, 1158 codec: c.codec, 1159 cp: cp, 1160 comp: comp, 1161 t: t, 1162 } 1163 1164 s, err := as.t.NewStream(as.ctx, as.callHdr) 1165 if err != nil { 1166 err = toRPCErr(err) 1167 return nil, err 1168 } 1169 as.s = s 1170 as.p = &parser{r: s} 1171 ac.incrCallsStarted() 1172 if desc != unaryStreamDesc { 1173 // Listen on cc and stream contexts to cleanup when the user closes the 1174 // ClientConn or cancels the stream context. In all other cases, an error 1175 // should already be injected into the recv buffer by the transport, which 1176 // the client will eventually receive, and then we will cancel the stream's 1177 // context in clientStream.finish. 1178 go func() { 1179 select { 1180 case <-ac.ctx.Done(): 1181 as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) 1182 case <-ctx.Done(): 1183 as.finish(toRPCErr(ctx.Err())) 1184 } 1185 }() 1186 } 1187 return as, nil 1188 } 1189 1190 type addrConnStream struct { 1191 s *transport.Stream 1192 ac *addrConn 1193 callHdr *transport.CallHdr 1194 cancel context.CancelFunc 1195 opts []CallOption 1196 callInfo *callInfo 1197 t transport.ClientTransport 1198 ctx context.Context 1199 sentLast bool 1200 desc *StreamDesc 1201 codec baseCodec 1202 cp Compressor 1203 comp encoding.Compressor 1204 decompSet bool 1205 dc Decompressor 1206 decomp encoding.Compressor 1207 p *parser 1208 mu sync.Mutex 1209 finished bool 1210 } 1211 1212 func (as *addrConnStream) Header() (metadata.MD, error) { 1213 m, err := as.s.Header() 1214 if err != nil { 1215 as.finish(toRPCErr(err)) 1216 } 1217 return m, err 1218 } 1219 1220 func (as *addrConnStream) Trailer() metadata.MD { 1221 return as.s.Trailer() 1222 } 1223 1224 func (as *addrConnStream) CloseSend() error { 1225 if as.sentLast { 1226 // TODO: return an error and finish the stream instead, due to API misuse? 1227 return nil 1228 } 1229 as.sentLast = true 1230 1231 _ = as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) 1232 // Always return nil; io.EOF is the only error that might make sense 1233 // instead, but there is no need to signal the client to call RecvMsg 1234 // as the only use left for the stream after CloseSend is to call 1235 // RecvMsg. This also matches historical behavior. 1236 return nil 1237 } 1238 1239 func (as *addrConnStream) Context() context.Context { 1240 return as.s.Context() 1241 } 1242 1243 func (as *addrConnStream) SendMsg(m interface{}) (err error) { 1244 defer func() { 1245 if err != nil && err != io.EOF { 1246 // Call finish on the client stream for errors generated by this SendMsg 1247 // call, as these indicate problems created by this client. (Transport 1248 // errors are converted to an io.EOF error in csAttempt.sendMsg; the real 1249 // error will be returned from RecvMsg eventually in that case, or be 1250 // retried.) 1251 as.finish(err) 1252 } 1253 }() 1254 if as.sentLast { 1255 return status.Errorf(codes.Internal, "SendMsg called after CloseSend") 1256 } 1257 if !as.desc.ClientStreams { 1258 as.sentLast = true 1259 } 1260 1261 // load hdr, payload, data 1262 hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) 1263 if err != nil { 1264 return err 1265 } 1266 1267 // TODO(dfawley): should we be checking len(data) instead? 1268 if len(payld) > *as.callInfo.maxSendMessageSize { 1269 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) 1270 } 1271 1272 if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { 1273 if !as.desc.ClientStreams { 1274 // For non-client-streaming RPCs, we return nil instead of EOF on error 1275 // because the generated code requires it. finish is not called; RecvMsg() 1276 // will call it with the stream's status independently. 1277 return nil 1278 } 1279 return io.EOF 1280 } 1281 1282 if channelz.IsOn() { 1283 as.t.IncrMsgSent() 1284 } 1285 return nil 1286 } 1287 1288 func (as *addrConnStream) RecvMsg(m interface{}) (err error) { 1289 defer func() { 1290 if err != nil || !as.desc.ServerStreams { 1291 // err != nil or non-server-streaming indicates end of stream. 1292 as.finish(err) 1293 } 1294 }() 1295 1296 if !as.decompSet { 1297 // Block until we receive headers containing received message encoding. 1298 if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { 1299 if as.dc == nil || as.dc.Type() != ct { 1300 // No configured decompressor, or it does not match the incoming 1301 // message encoding; attempt to find a registered compressor that does. 1302 as.dc = nil 1303 as.decomp = encoding.GetCompressor(ct) 1304 } 1305 } else { 1306 // No compression is used; disable our decompressor. 1307 as.dc = nil 1308 } 1309 // Only initialize this state once per stream. 1310 as.decompSet = true 1311 } 1312 err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) 1313 if err != nil { 1314 if err == io.EOF { 1315 if statusErr := as.s.Status().Err(); statusErr != nil { 1316 return statusErr 1317 } 1318 return io.EOF // indicates successful end of stream. 1319 } 1320 return toRPCErr(err) 1321 } 1322 1323 if channelz.IsOn() { 1324 as.t.IncrMsgRecv() 1325 } 1326 if as.desc.ServerStreams { 1327 // Subsequent messages should be received by subsequent RecvMsg calls. 1328 return nil 1329 } 1330 1331 // Special handling for non-server-stream rpcs. 1332 // This recv expects EOF or errors, so we don't collect inPayload. 1333 err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) 1334 if err == nil { 1335 return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) 1336 } 1337 if err == io.EOF { 1338 return as.s.Status().Err() // non-server streaming Recv returns nil on success 1339 } 1340 return toRPCErr(err) 1341 } 1342 1343 func (as *addrConnStream) finish(err error) { 1344 as.mu.Lock() 1345 if as.finished { 1346 as.mu.Unlock() 1347 return 1348 } 1349 as.finished = true 1350 if err == io.EOF { 1351 // Ending a stream with EOF indicates a success. 1352 err = nil 1353 } 1354 if as.s != nil { 1355 as.t.CloseStream(as.s, err) 1356 } 1357 1358 if err != nil { 1359 as.ac.incrCallsFailed() 1360 } else { 1361 as.ac.incrCallsSucceeded() 1362 } 1363 as.cancel() 1364 as.mu.Unlock() 1365 } 1366 1367 // ServerStream defines the server-side behavior of a streaming RPC. 1368 // 1369 // All errors returned from ServerStream methods are compatible with the 1370 // status package. 1371 type ServerStream interface { 1372 // SetHeader sets the header metadata. It may be called multiple times. 1373 // When call multiple times, all the provided metadata will be merged. 1374 // All the metadata will be sent out when one of the following happens: 1375 // - ServerStream.SendHeader() is called; 1376 // - The first response is sent out; 1377 // - An RPC status is sent out (error or success). 1378 SetHeader(metadata.MD) error 1379 // SendHeader sends the header metadata. 1380 // The provided md and headers set by SetHeader() will be sent. 1381 // It fails if called multiple times. 1382 SendHeader(metadata.MD) error 1383 // SetTrailer sets the trailer metadata which will be sent with the RPC status. 1384 // When called more than once, all the provided metadata will be merged. 1385 SetTrailer(metadata.MD) 1386 // Context returns the context for this stream. 1387 Context() context.Context 1388 // SendMsg sends a message. On error, SendMsg aborts the stream and the 1389 // error is returned directly. 1390 // 1391 // SendMsg blocks until: 1392 // - There is sufficient flow control to schedule m with the transport, or 1393 // - The stream is done, or 1394 // - The stream breaks. 1395 // 1396 // SendMsg does not wait until the message is received by the client. An 1397 // untimely stream closure may result in lost messages. 1398 // 1399 // It is safe to have a goroutine calling SendMsg and another goroutine 1400 // calling RecvMsg on the same stream at the same time, but it is not safe 1401 // to call SendMsg on the same stream in different goroutines. 1402 SendMsg(m interface{}) error 1403 // RecvMsg blocks until it receives a message into m or the stream is 1404 // done. It returns io.EOF when the client has performed a CloseSend. On 1405 // any non-EOF error, the stream is aborted and the error contains the 1406 // RPC status. 1407 // 1408 // It is safe to have a goroutine calling SendMsg and another goroutine 1409 // calling RecvMsg on the same stream at the same time, but it is not 1410 // safe to call RecvMsg on the same stream in different goroutines. 1411 RecvMsg(m interface{}) error 1412 } 1413 1414 // serverStream implements a server side Stream. 1415 type serverStream struct { 1416 ctx context.Context 1417 t transport.ServerTransport 1418 s *transport.Stream 1419 p *parser 1420 codec baseCodec 1421 1422 cp Compressor 1423 dc Decompressor 1424 comp encoding.Compressor 1425 decomp encoding.Compressor 1426 1427 maxReceiveMessageSize int 1428 maxSendMessageSize int 1429 trInfo *traceInfo 1430 1431 statsHandler stats.Handler 1432 1433 binlog *binarylog.MethodLogger 1434 // serverHeaderBinlogged indicates whether server header has been logged. It 1435 // will happen when one of the following two happens: stream.SendHeader(), 1436 // stream.Send(). 1437 // 1438 // It's only checked in send and sendHeader, doesn't need to be 1439 // synchronized. 1440 serverHeaderBinlogged bool 1441 1442 mu sync.Mutex // protects trInfo.tr after the service handler runs. 1443 } 1444 1445 func (ss *serverStream) Context() context.Context { 1446 return ss.ctx 1447 } 1448 1449 func (ss *serverStream) SetHeader(md metadata.MD) error { 1450 if md.Len() == 0 { 1451 return nil 1452 } 1453 return ss.s.SetHeader(md) 1454 } 1455 1456 func (ss *serverStream) SendHeader(md metadata.MD) error { 1457 err := ss.t.WriteHeader(ss.s, md) 1458 if ss.binlog != nil && !ss.serverHeaderBinlogged { 1459 h, _ := ss.s.Header() 1460 ss.binlog.Log(&binarylog.ServerHeader{ 1461 Header: h, 1462 }) 1463 ss.serverHeaderBinlogged = true 1464 } 1465 return err 1466 } 1467 1468 func (ss *serverStream) SetTrailer(md metadata.MD) { 1469 if md.Len() == 0 { 1470 return 1471 } 1472 _ = ss.s.SetTrailer(md) 1473 } 1474 1475 func (ss *serverStream) SendMsg(m interface{}) (err error) { 1476 defer func() { 1477 if ss.trInfo != nil { 1478 ss.mu.Lock() 1479 if ss.trInfo.tr != nil { 1480 if err == nil { 1481 ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) 1482 } else { 1483 ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) 1484 ss.trInfo.tr.SetError() 1485 } 1486 } 1487 ss.mu.Unlock() 1488 } 1489 if err != nil && err != io.EOF { 1490 st, _ := status.FromError(toRPCErr(err)) 1491 _ = ss.t.WriteStatus(ss.s, st) 1492 // Non-user specified status was sent out. This should be an error 1493 // case (as a server side Cancel maybe). 1494 // 1495 // This is not handled specifically now. User will return a final 1496 // status from the service handler, we will log that error instead. 1497 // This behavior is similar to an interceptor. 1498 } 1499 if channelz.IsOn() && err == nil { 1500 ss.t.IncrMsgSent() 1501 } 1502 }() 1503 1504 // load hdr, payload, data 1505 hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) 1506 if err != nil { 1507 return err 1508 } 1509 1510 // TODO(dfawley): should we be checking len(data) instead? 1511 if len(payload) > ss.maxSendMessageSize { 1512 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) 1513 } 1514 if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { 1515 return toRPCErr(err) 1516 } 1517 if ss.binlog != nil { 1518 if !ss.serverHeaderBinlogged { 1519 h, _ := ss.s.Header() 1520 ss.binlog.Log(&binarylog.ServerHeader{ 1521 Header: h, 1522 }) 1523 ss.serverHeaderBinlogged = true 1524 } 1525 ss.binlog.Log(&binarylog.ServerMessage{ 1526 Message: data, 1527 }) 1528 } 1529 if ss.statsHandler != nil { 1530 ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) 1531 } 1532 return nil 1533 } 1534 1535 func (ss *serverStream) RecvMsg(m interface{}) (err error) { 1536 defer func() { 1537 if ss.trInfo != nil { 1538 ss.mu.Lock() 1539 if ss.trInfo.tr != nil { 1540 if err == nil { 1541 ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) 1542 } else if err != io.EOF { 1543 ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) 1544 ss.trInfo.tr.SetError() 1545 } 1546 } 1547 ss.mu.Unlock() 1548 } 1549 if err != nil && err != io.EOF { 1550 st, _ := status.FromError(toRPCErr(err)) 1551 _ = ss.t.WriteStatus(ss.s, st) 1552 // Non-user specified status was sent out. This should be an error 1553 // case (as a server side Cancel maybe). 1554 // 1555 // This is not handled specifically now. User will return a final 1556 // status from the service handler, we will log that error instead. 1557 // This behavior is similar to an interceptor. 1558 } 1559 if channelz.IsOn() && err == nil { 1560 ss.t.IncrMsgRecv() 1561 } 1562 }() 1563 var payInfo *payloadInfo 1564 if ss.statsHandler != nil || ss.binlog != nil { 1565 payInfo = &payloadInfo{} 1566 } 1567 if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { 1568 if err == io.EOF { 1569 if ss.binlog != nil { 1570 ss.binlog.Log(&binarylog.ClientHalfClose{}) 1571 } 1572 return err 1573 } 1574 if errors.Is(err, io.ErrUnexpectedEOF) { 1575 err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) 1576 } 1577 return toRPCErr(err) 1578 } 1579 if ss.statsHandler != nil { 1580 ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ 1581 RecvTime: time.Now(), 1582 Payload: m, 1583 // TODO truncate large payload. 1584 Data: payInfo.uncompressedBytes, 1585 WireLength: payInfo.wireLength + headerLen, 1586 Length: len(payInfo.uncompressedBytes), 1587 }) 1588 } 1589 if ss.binlog != nil { 1590 ss.binlog.Log(&binarylog.ClientMessage{ 1591 Message: payInfo.uncompressedBytes, 1592 }) 1593 } 1594 return nil 1595 } 1596 1597 // MethodFromServerStream returns the method string for the input stream. 1598 // The returned string is in the format of "/service/method". 1599 func MethodFromServerStream(stream ServerStream) (string, bool) { 1600 return Method(stream.Context()) 1601 } 1602 1603 // prepareMsg returns the hdr, payload and data 1604 // using the compressors passed or using the 1605 // passed preparedmsg 1606 func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { 1607 if preparedMsg, ok := m.(*PreparedMsg); ok { 1608 return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil 1609 } 1610 // The input interface is not a prepared msg. 1611 // Marshal and Compress the data at this point 1612 data, err = encode(codec, m) 1613 if err != nil { 1614 return nil, nil, nil, err 1615 } 1616 compData, err := compress(data, cp, comp) 1617 if err != nil { 1618 return nil, nil, nil, err 1619 } 1620 hdr, payload = msgHeader(data, compData) 1621 return hdr, payload, data, nil 1622 }