google.golang.org/grpc@v1.62.1/stream.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package grpc
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"io"
    25  	"math"
    26  	"strconv"
    27  	"sync"
    28  	"time"
    29  
    30  	"google.golang.org/grpc/balancer"
    31  	"google.golang.org/grpc/codes"
    32  	"google.golang.org/grpc/encoding"
    33  	"google.golang.org/grpc/internal"
    34  	"google.golang.org/grpc/internal/balancerload"
    35  	"google.golang.org/grpc/internal/binarylog"
    36  	"google.golang.org/grpc/internal/channelz"
    37  	"google.golang.org/grpc/internal/grpcrand"
    38  	"google.golang.org/grpc/internal/grpcutil"
    39  	imetadata "google.golang.org/grpc/internal/metadata"
    40  	iresolver "google.golang.org/grpc/internal/resolver"
    41  	"google.golang.org/grpc/internal/serviceconfig"
    42  	istatus "google.golang.org/grpc/internal/status"
    43  	"google.golang.org/grpc/internal/transport"
    44  	"google.golang.org/grpc/metadata"
    45  	"google.golang.org/grpc/peer"
    46  	"google.golang.org/grpc/stats"
    47  	"google.golang.org/grpc/status"
    48  )
    49  
    50  var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
    51  
    52  // StreamHandler defines the handler called by gRPC server to complete the
    53  // execution of a streaming RPC.
    54  //
    55  // If a StreamHandler returns an error, it should either be produced by the
    56  // status package, or be one of the context errors. Otherwise, gRPC will use
    57  // codes.Unknown as the status code and err.Error() as the status message of the
    58  // RPC.
    59  type StreamHandler func(srv any, stream ServerStream) error
    60  
    61  // StreamDesc represents a streaming RPC service's method specification.  Used
    62  // on the server when registering services and on the client when initiating
    63  // new streams.
    64  type StreamDesc struct {
    65  	// StreamName and Handler are only used when registering handlers on a
    66  	// server.
    67  	StreamName string        // the name of the method excluding the service
    68  	Handler    StreamHandler // the handler called for the method
    69  
    70  	// ServerStreams and ClientStreams are used for registering handlers on a
    71  	// server as well as defining RPC behavior when passed to NewClientStream
    72  	// and ClientConn.NewStream.  At least one must be true.
    73  	ServerStreams bool // indicates the server can perform streaming sends
    74  	ClientStreams bool // indicates the client can perform streaming sends
    75  }
    76  
    77  // Stream defines the common interface a client or server stream has to satisfy.
    78  //
    79  // Deprecated: See ClientStream and ServerStream documentation instead.
    80  type Stream interface {
    81  	// Deprecated: See ClientStream and ServerStream documentation instead.
    82  	Context() context.Context
    83  	// Deprecated: See ClientStream and ServerStream documentation instead.
    84  	SendMsg(m any) error
    85  	// Deprecated: See ClientStream and ServerStream documentation instead.
    86  	RecvMsg(m any) error
    87  }
    88  
    89  // ClientStream defines the client-side behavior of a streaming RPC.
    90  //
    91  // All errors returned from ClientStream methods are compatible with the
    92  // status package.
    93  type ClientStream interface {
    94  	// Header returns the header metadata received from the server if there
    95  	// is any. It blocks if the metadata is not ready to read.  If the metadata
    96  	// is nil and the error is also nil, then the stream was terminated without
    97  	// headers, and the status can be discovered by calling RecvMsg.
    98  	Header() (metadata.MD, error)
    99  	// Trailer returns the trailer metadata from the server, if there is any.
   100  	// It must only be called after stream.CloseAndRecv has returned, or
   101  	// stream.Recv has returned a non-nil error (including io.EOF).
   102  	Trailer() metadata.MD
   103  	// CloseSend closes the send direction of the stream. It closes the stream
   104  	// when non-nil error is met. It is also not safe to call CloseSend
   105  	// concurrently with SendMsg.
   106  	CloseSend() error
   107  	// Context returns the context for this stream.
   108  	//
   109  	// It should not be called until after Header or RecvMsg has returned. Once
   110  	// called, subsequent client-side retries are disabled.
   111  	Context() context.Context
   112  	// SendMsg is generally called by generated code. On error, SendMsg aborts
   113  	// the stream. If the error was generated by the client, the status is
   114  	// returned directly; otherwise, io.EOF is returned and the status of
   115  	// the stream may be discovered using RecvMsg.
   116  	//
   117  	// SendMsg blocks until:
   118  	//   - There is sufficient flow control to schedule m with the transport, or
   119  	//   - The stream is done, or
   120  	//   - The stream breaks.
   121  	//
   122  	// SendMsg does not wait until the message is received by the server. An
   123  	// untimely stream closure may result in lost messages. To ensure delivery,
   124  	// users should ensure the RPC completed successfully using RecvMsg.
   125  	//
   126  	// It is safe to have a goroutine calling SendMsg and another goroutine
   127  	// calling RecvMsg on the same stream at the same time, but it is not safe
   128  	// to call SendMsg on the same stream in different goroutines. It is also
   129  	// not safe to call CloseSend concurrently with SendMsg.
   130  	//
   131  	// It is not safe to modify the message after calling SendMsg. Tracing
   132  	// libraries and stats handlers may use the message lazily.
   133  	SendMsg(m any) error
   134  	// RecvMsg blocks until it receives a message into m or the stream is
   135  	// done. It returns io.EOF when the stream completes successfully. On
   136  	// any other error, the stream is aborted and the error contains the RPC
   137  	// status.
   138  	//
   139  	// It is safe to have a goroutine calling SendMsg and another goroutine
   140  	// calling RecvMsg on the same stream at the same time, but it is not
   141  	// safe to call RecvMsg on the same stream in different goroutines.
   142  	RecvMsg(m any) error
   143  }
   144  
   145  // NewStream creates a new Stream for the client side. This is typically
   146  // called by generated code. ctx is used for the lifetime of the stream.
   147  //
   148  // To ensure resources are not leaked due to the stream returned, one of the following
   149  // actions must be performed:
   150  //
   151  //  1. Call Close on the ClientConn.
   152  //  2. Cancel the context provided.
   153  //  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
   154  //     client-streaming RPC, for instance, might use the helper function
   155  //     CloseAndRecv (note that CloseSend does not Recv, therefore is not
   156  //     guaranteed to release all resources).
   157  //  4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
   158  //
   159  // If none of the above happen, a goroutine and a context will be leaked, and grpc
   160  // will not call the optionally-configured stats handler with a stats.End message.
   161  func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
   162  	// allow interceptor to see all applicable call options, which means those
   163  	// configured as defaults from dial option as well as per-call options
   164  	opts = combine(cc.dopts.callOptions, opts)
   165  
   166  	if cc.dopts.streamInt != nil {
   167  		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
   168  	}
   169  	return newClientStream(ctx, desc, cc, method, opts...)
   170  }
   171  
   172  // NewClientStream is a wrapper for ClientConn.NewStream.
   173  func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
   174  	return cc.NewStream(ctx, desc, method, opts...)
   175  }
   176  
   177  func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
   178  	// Start tracking the RPC for idleness purposes. This is where a stream is
   179  	// created for both streaming and unary RPCs, and hence is a good place to
   180  	// track active RPC count.
   181  	if err := cc.idlenessMgr.OnCallBegin(); err != nil {
   182  		return nil, err
   183  	}
   184  	// Add a calloption, to decrement the active call count, that gets executed
   185  	// when the RPC completes.
   186  	opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
   187  
   188  	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
   189  		// validate md
   190  		if err := imetadata.Validate(md); err != nil {
   191  			return nil, status.Error(codes.Internal, err.Error())
   192  		}
   193  		// validate added
   194  		for _, kvs := range added {
   195  			for i := 0; i < len(kvs); i += 2 {
   196  				if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
   197  					return nil, status.Error(codes.Internal, err.Error())
   198  				}
   199  			}
   200  		}
   201  	}
   202  	if channelz.IsOn() {
   203  		cc.incrCallsStarted()
   204  		defer func() {
   205  			if err != nil {
   206  				cc.incrCallsFailed()
   207  			}
   208  		}()
   209  	}
   210  	// Provide an opportunity for the first RPC to see the first service config
   211  	// provided by the resolver.
   212  	if err := cc.waitForResolvedAddrs(ctx); err != nil {
   213  		return nil, err
   214  	}
   215  
   216  	var mc serviceconfig.MethodConfig
   217  	var onCommit func()
   218  	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
   219  		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
   220  	}
   221  
   222  	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
   223  	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
   224  	if err != nil {
   225  		if st, ok := status.FromError(err); ok {
   226  			// Restrict the code to the list allowed by gRFC A54.
   227  			if istatus.IsRestrictedControlPlaneCode(st) {
   228  				err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
   229  			}
   230  			return nil, err
   231  		}
   232  		return nil, toRPCErr(err)
   233  	}
   234  
   235  	if rpcConfig != nil {
   236  		if rpcConfig.Context != nil {
   237  			ctx = rpcConfig.Context
   238  		}
   239  		mc = rpcConfig.MethodConfig
   240  		onCommit = rpcConfig.OnCommitted
   241  		if rpcConfig.Interceptor != nil {
   242  			rpcInfo.Context = nil
   243  			ns := newStream
   244  			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
   245  				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
   246  				if err != nil {
   247  					return nil, toRPCErr(err)
   248  				}
   249  				return cs, nil
   250  			}
   251  		}
   252  	}
   253  
   254  	return newStream(ctx, func() {})
   255  }
   256  
   257  func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
   258  	c := defaultCallInfo()
   259  	if mc.WaitForReady != nil {
   260  		c.failFast = !*mc.WaitForReady
   261  	}
   262  
   263  	// Possible context leak:
   264  	// The cancel function for the child context we create will only be called
   265  	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
   266  	// an error is generated by SendMsg.
   267  	// https://github.com/grpc/grpc-go/issues/1818.
   268  	var cancel context.CancelFunc
   269  	if mc.Timeout != nil && *mc.Timeout >= 0 {
   270  		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
   271  	} else {
   272  		ctx, cancel = context.WithCancel(ctx)
   273  	}
   274  	defer func() {
   275  		if err != nil {
   276  			cancel()
   277  		}
   278  	}()
   279  
   280  	for _, o := range opts {
   281  		if err := o.before(c); err != nil {
   282  			return nil, toRPCErr(err)
   283  		}
   284  	}
   285  	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
   286  	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
   287  	if err := setCallInfoCodec(c); err != nil {
   288  		return nil, err
   289  	}
   290  
   291  	callHdr := &transport.CallHdr{
   292  		Host:           cc.authority,
   293  		Method:         method,
   294  		ContentSubtype: c.contentSubtype,
   295  		DoneFunc:       doneFunc,
   296  	}
   297  
   298  	// Set our outgoing compression according to the UseCompressor CallOption, if
   299  	// set.  In that case, also find the compressor from the encoding package.
   300  	// Otherwise, use the compressor configured by the WithCompressor DialOption,
   301  	// if set.
   302  	var cp Compressor
   303  	var comp encoding.Compressor
   304  	if ct := c.compressorType; ct != "" {
   305  		callHdr.SendCompress = ct
   306  		if ct != encoding.Identity {
   307  			comp = encoding.GetCompressor(ct)
   308  			if comp == nil {
   309  				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
   310  			}
   311  		}
   312  	} else if cc.dopts.cp != nil {
   313  		callHdr.SendCompress = cc.dopts.cp.Type()
   314  		cp = cc.dopts.cp
   315  	}
   316  	if c.creds != nil {
   317  		callHdr.Creds = c.creds
   318  	}
   319  
   320  	cs := &clientStream{
   321  		callHdr:      callHdr,
   322  		ctx:          ctx,
   323  		methodConfig: &mc,
   324  		opts:         opts,
   325  		callInfo:     c,
   326  		cc:           cc,
   327  		desc:         desc,
   328  		codec:        c.codec,
   329  		cp:           cp,
   330  		comp:         comp,
   331  		cancel:       cancel,
   332  		firstAttempt: true,
   333  		onCommit:     onCommit,
   334  	}
   335  	if !cc.dopts.disableRetry {
   336  		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
   337  	}
   338  	if ml := binarylog.GetMethodLogger(method); ml != nil {
   339  		cs.binlogs = append(cs.binlogs, ml)
   340  	}
   341  	if cc.dopts.binaryLogger != nil {
   342  		if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
   343  			cs.binlogs = append(cs.binlogs, ml)
   344  		}
   345  	}
   346  
   347  	// Pick the transport to use and create a new stream on the transport.
   348  	// Assign cs.attempt upon success.
   349  	op := func(a *csAttempt) error {
   350  		if err := a.getTransport(); err != nil {
   351  			return err
   352  		}
   353  		if err := a.newStream(); err != nil {
   354  			return err
   355  		}
   356  		// Because this operation is always called either here (while creating
   357  		// the clientStream) or by the retry code while locked when replaying
   358  		// the operation, it is safe to access cs.attempt directly.
   359  		cs.attempt = a
   360  		return nil
   361  	}
   362  	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
   363  		return nil, err
   364  	}
   365  
   366  	if len(cs.binlogs) != 0 {
   367  		md, _ := metadata.FromOutgoingContext(ctx)
   368  		logEntry := &binarylog.ClientHeader{
   369  			OnClientSide: true,
   370  			Header:       md,
   371  			MethodName:   method,
   372  			Authority:    cs.cc.authority,
   373  		}
   374  		if deadline, ok := ctx.Deadline(); ok {
   375  			logEntry.Timeout = time.Until(deadline)
   376  			if logEntry.Timeout < 0 {
   377  				logEntry.Timeout = 0
   378  			}
   379  		}
   380  		for _, binlog := range cs.binlogs {
   381  			binlog.Log(cs.ctx, logEntry)
   382  		}
   383  	}
   384  
   385  	if desc != unaryStreamDesc {
   386  		// Listen on cc and stream contexts to cleanup when the user closes the
   387  		// ClientConn or cancels the stream context.  In all other cases, an error
   388  		// should already be injected into the recv buffer by the transport, which
   389  		// the client will eventually receive, and then we will cancel the stream's
   390  		// context in clientStream.finish.
   391  		go func() {
   392  			select {
   393  			case <-cc.ctx.Done():
   394  				cs.finish(ErrClientConnClosing)
   395  			case <-ctx.Done():
   396  				cs.finish(toRPCErr(ctx.Err()))
   397  			}
   398  		}()
   399  	}
   400  	return cs, nil
   401  }
   402  
   403  // newAttemptLocked creates a new csAttempt without a transport or stream.
   404  func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
   405  	if err := cs.ctx.Err(); err != nil {
   406  		return nil, toRPCErr(err)
   407  	}
   408  	if err := cs.cc.ctx.Err(); err != nil {
   409  		return nil, ErrClientConnClosing
   410  	}
   411  
   412  	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
   413  	method := cs.callHdr.Method
   414  	var beginTime time.Time
   415  	shs := cs.cc.dopts.copts.StatsHandlers
   416  	for _, sh := range shs {
   417  		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
   418  		beginTime = time.Now()
   419  		begin := &stats.Begin{
   420  			Client:                    true,
   421  			BeginTime:                 beginTime,
   422  			FailFast:                  cs.callInfo.failFast,
   423  			IsClientStream:            cs.desc.ClientStreams,
   424  			IsServerStream:            cs.desc.ServerStreams,
   425  			IsTransparentRetryAttempt: isTransparent,
   426  		}
   427  		sh.HandleRPC(ctx, begin)
   428  	}
   429  
   430  	var trInfo *traceInfo
   431  	if EnableTracing {
   432  		trInfo = &traceInfo{
   433  			tr: newTrace("grpc.Sent."+methodFamily(method), method),
   434  			firstLine: firstLine{
   435  				client: true,
   436  			},
   437  		}
   438  		if deadline, ok := ctx.Deadline(); ok {
   439  			trInfo.firstLine.deadline = time.Until(deadline)
   440  		}
   441  		trInfo.tr.LazyLog(&trInfo.firstLine, false)
   442  		ctx = newTraceContext(ctx, trInfo.tr)
   443  	}
   444  
   445  	if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
   446  		// Add extra metadata (metadata that will be added by transport) to context
   447  		// so the balancer can see them.
   448  		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
   449  			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
   450  		))
   451  	}
   452  
   453  	return &csAttempt{
   454  		ctx:           ctx,
   455  		beginTime:     beginTime,
   456  		cs:            cs,
   457  		dc:            cs.cc.dopts.dc,
   458  		statsHandlers: shs,
   459  		trInfo:        trInfo,
   460  	}, nil
   461  }
   462  
   463  func (a *csAttempt) getTransport() error {
   464  	cs := a.cs
   465  
   466  	var err error
   467  	a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
   468  	if err != nil {
   469  		if de, ok := err.(dropError); ok {
   470  			err = de.error
   471  			a.drop = true
   472  		}
   473  		return err
   474  	}
   475  	if a.trInfo != nil {
   476  		a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
   477  	}
   478  	return nil
   479  }
   480  
   481  func (a *csAttempt) newStream() error {
   482  	cs := a.cs
   483  	cs.callHdr.PreviousAttempts = cs.numRetries
   484  
   485  	// Merge metadata stored in PickResult, if any, with existing call metadata.
   486  	// It is safe to overwrite the csAttempt's context here, since all state
   487  	// maintained in it are local to the attempt. When the attempt has to be
   488  	// retried, a new instance of csAttempt will be created.
   489  	if a.pickResult.Metadata != nil {
   490  		// We currently do not have a function it the metadata package which
   491  		// merges given metadata with existing metadata in a context. Existing
   492  		// function `AppendToOutgoingContext()` takes a variadic argument of key
   493  		// value pairs.
   494  		//
   495  		// TODO: Make it possible to retrieve key value pairs from metadata.MD
   496  		// in a form passable to AppendToOutgoingContext(), or create a version
   497  		// of AppendToOutgoingContext() that accepts a metadata.MD.
   498  		md, _ := metadata.FromOutgoingContext(a.ctx)
   499  		md = metadata.Join(md, a.pickResult.Metadata)
   500  		a.ctx = metadata.NewOutgoingContext(a.ctx, md)
   501  	}
   502  
   503  	s, err := a.t.NewStream(a.ctx, cs.callHdr)
   504  	if err != nil {
   505  		nse, ok := err.(*transport.NewStreamError)
   506  		if !ok {
   507  			// Unexpected.
   508  			return err
   509  		}
   510  
   511  		if nse.AllowTransparentRetry {
   512  			a.allowTransparentRetry = true
   513  		}
   514  
   515  		// Unwrap and convert error.
   516  		return toRPCErr(nse.Err)
   517  	}
   518  	a.s = s
   519  	a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
   520  	return nil
   521  }
   522  
   523  // clientStream implements a client side Stream.
   524  type clientStream struct {
   525  	callHdr  *transport.CallHdr
   526  	opts     []CallOption
   527  	callInfo *callInfo
   528  	cc       *ClientConn
   529  	desc     *StreamDesc
   530  
   531  	codec baseCodec
   532  	cp    Compressor
   533  	comp  encoding.Compressor
   534  
   535  	cancel context.CancelFunc // cancels all attempts
   536  
   537  	sentLast bool // sent an end stream
   538  
   539  	methodConfig *MethodConfig
   540  
   541  	ctx context.Context // the application's context, wrapped by stats/tracing
   542  
   543  	retryThrottler *retryThrottler // The throttler active when the RPC began.
   544  
   545  	binlogs []binarylog.MethodLogger
   546  	// serverHeaderBinlogged is a boolean for whether server header has been
   547  	// logged. Server header will be logged when the first time one of those
   548  	// happens: stream.Header(), stream.Recv().
   549  	//
   550  	// It's only read and used by Recv() and Header(), so it doesn't need to be
   551  	// synchronized.
   552  	serverHeaderBinlogged bool
   553  
   554  	mu                      sync.Mutex
   555  	firstAttempt            bool // if true, transparent retry is valid
   556  	numRetries              int  // exclusive of transparent retry attempt(s)
   557  	numRetriesSincePushback int  // retries since pushback; to reset backoff
   558  	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
   559  	// attempt is the active client stream attempt.
   560  	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
   561  	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
   562  	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
   563  	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
   564  	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
   565  	// place where we need to check if the attempt is nil.
   566  	attempt *csAttempt
   567  	// TODO(hedging): hedging will have multiple attempts simultaneously.
   568  	committed  bool // active attempt committed for retry?
   569  	onCommit   func()
   570  	buffer     []func(a *csAttempt) error // operations to replay on retry
   571  	bufferSize int                        // current size of buffer
   572  }
   573  
   574  // csAttempt implements a single transport stream attempt within a
   575  // clientStream.
   576  type csAttempt struct {
   577  	ctx        context.Context
   578  	cs         *clientStream
   579  	t          transport.ClientTransport
   580  	s          *transport.Stream
   581  	p          *parser
   582  	pickResult balancer.PickResult
   583  
   584  	finished  bool
   585  	dc        Decompressor
   586  	decomp    encoding.Compressor
   587  	decompSet bool
   588  
   589  	mu sync.Mutex // guards trInfo.tr
   590  	// trInfo may be nil (if EnableTracing is false).
   591  	// trInfo.tr is set when created (if EnableTracing is true),
   592  	// and cleared when the finish method is called.
   593  	trInfo *traceInfo
   594  
   595  	statsHandlers []stats.Handler
   596  	beginTime     time.Time
   597  
   598  	// set for newStream errors that may be transparently retried
   599  	allowTransparentRetry bool
   600  	// set for pick errors that are returned as a status
   601  	drop bool
   602  }
   603  
   604  func (cs *clientStream) commitAttemptLocked() {
   605  	if !cs.committed && cs.onCommit != nil {
   606  		cs.onCommit()
   607  	}
   608  	cs.committed = true
   609  	cs.buffer = nil
   610  }
   611  
   612  func (cs *clientStream) commitAttempt() {
   613  	cs.mu.Lock()
   614  	cs.commitAttemptLocked()
   615  	cs.mu.Unlock()
   616  }
   617  
   618  // shouldRetry returns nil if the RPC should be retried; otherwise it returns
   619  // the error that should be returned by the operation.  If the RPC should be
   620  // retried, the bool indicates whether it is being retried transparently.
   621  func (a *csAttempt) shouldRetry(err error) (bool, error) {
   622  	cs := a.cs
   623  
   624  	if cs.finished || cs.committed || a.drop {
   625  		// RPC is finished or committed or was dropped by the picker; cannot retry.
   626  		return false, err
   627  	}
   628  	if a.s == nil && a.allowTransparentRetry {
   629  		return true, nil
   630  	}
   631  	// Wait for the trailers.
   632  	unprocessed := false
   633  	if a.s != nil {
   634  		<-a.s.Done()
   635  		unprocessed = a.s.Unprocessed()
   636  	}
   637  	if cs.firstAttempt && unprocessed {
   638  		// First attempt, stream unprocessed: transparently retry.
   639  		return true, nil
   640  	}
   641  	if cs.cc.dopts.disableRetry {
   642  		return false, err
   643  	}
   644  
   645  	pushback := 0
   646  	hasPushback := false
   647  	if a.s != nil {
   648  		if !a.s.TrailersOnly() {
   649  			return false, err
   650  		}
   651  
   652  		// TODO(retry): Move down if the spec changes to not check server pushback
   653  		// before considering this a failure for throttling.
   654  		sps := a.s.Trailer()["grpc-retry-pushback-ms"]
   655  		if len(sps) == 1 {
   656  			var e error
   657  			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
   658  				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
   659  				cs.retryThrottler.throttle() // This counts as a failure for throttling.
   660  				return false, err
   661  			}
   662  			hasPushback = true
   663  		} else if len(sps) > 1 {
   664  			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
   665  			cs.retryThrottler.throttle() // This counts as a failure for throttling.
   666  			return false, err
   667  		}
   668  	}
   669  
   670  	var code codes.Code
   671  	if a.s != nil {
   672  		code = a.s.Status().Code()
   673  	} else {
   674  		code = status.Code(err)
   675  	}
   676  
   677  	rp := cs.methodConfig.RetryPolicy
   678  	if rp == nil || !rp.RetryableStatusCodes[code] {
   679  		return false, err
   680  	}
   681  
   682  	// Note: the ordering here is important; we count this as a failure
   683  	// only if the code matched a retryable code.
   684  	if cs.retryThrottler.throttle() {
   685  		return false, err
   686  	}
   687  	if cs.numRetries+1 >= rp.MaxAttempts {
   688  		return false, err
   689  	}
   690  
   691  	var dur time.Duration
   692  	if hasPushback {
   693  		dur = time.Millisecond * time.Duration(pushback)
   694  		cs.numRetriesSincePushback = 0
   695  	} else {
   696  		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
   697  		cur := float64(rp.InitialBackoff) * fact
   698  		if max := float64(rp.MaxBackoff); cur > max {
   699  			cur = max
   700  		}
   701  		dur = time.Duration(grpcrand.Int63n(int64(cur)))
   702  		cs.numRetriesSincePushback++
   703  	}
   704  
   705  	// TODO(dfawley): we could eagerly fail here if dur puts us past the
   706  	// deadline, but unsure if it is worth doing.
   707  	t := time.NewTimer(dur)
   708  	select {
   709  	case <-t.C:
   710  		cs.numRetries++
   711  		return false, nil
   712  	case <-cs.ctx.Done():
   713  		t.Stop()
   714  		return false, status.FromContextError(cs.ctx.Err()).Err()
   715  	}
   716  }
   717  
   718  // Returns nil if a retry was performed and succeeded; error otherwise.
   719  func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
   720  	for {
   721  		attempt.finish(toRPCErr(lastErr))
   722  		isTransparent, err := attempt.shouldRetry(lastErr)
   723  		if err != nil {
   724  			cs.commitAttemptLocked()
   725  			return err
   726  		}
   727  		cs.firstAttempt = false
   728  		attempt, err = cs.newAttemptLocked(isTransparent)
   729  		if err != nil {
   730  			// Only returns error if the clientconn is closed or the context of
   731  			// the stream is canceled.
   732  			return err
   733  		}
   734  		// Note that the first op in the replay buffer always sets cs.attempt
   735  		// if it is able to pick a transport and create a stream.
   736  		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
   737  			return nil
   738  		}
   739  	}
   740  }
   741  
   742  func (cs *clientStream) Context() context.Context {
   743  	cs.commitAttempt()
   744  	// No need to lock before using attempt, since we know it is committed and
   745  	// cannot change.
   746  	if cs.attempt.s != nil {
   747  		return cs.attempt.s.Context()
   748  	}
   749  	return cs.ctx
   750  }
   751  
   752  func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
   753  	cs.mu.Lock()
   754  	for {
   755  		if cs.committed {
   756  			cs.mu.Unlock()
   757  			// toRPCErr is used in case the error from the attempt comes from
   758  			// NewClientStream, which intentionally doesn't return a status
   759  			// error to allow for further inspection; all other errors should
   760  			// already be status errors.
   761  			return toRPCErr(op(cs.attempt))
   762  		}
   763  		if len(cs.buffer) == 0 {
   764  			// For the first op, which controls creation of the stream and
   765  			// assigns cs.attempt, we need to create a new attempt inline
   766  			// before executing the first op.  On subsequent ops, the attempt
   767  			// is created immediately before replaying the ops.
   768  			var err error
   769  			if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
   770  				cs.mu.Unlock()
   771  				cs.finish(err)
   772  				return err
   773  			}
   774  		}
   775  		a := cs.attempt
   776  		cs.mu.Unlock()
   777  		err := op(a)
   778  		cs.mu.Lock()
   779  		if a != cs.attempt {
   780  			// We started another attempt already.
   781  			continue
   782  		}
   783  		if err == io.EOF {
   784  			<-a.s.Done()
   785  		}
   786  		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
   787  			onSuccess()
   788  			cs.mu.Unlock()
   789  			return err
   790  		}
   791  		if err := cs.retryLocked(a, err); err != nil {
   792  			cs.mu.Unlock()
   793  			return err
   794  		}
   795  	}
   796  }
   797  
   798  func (cs *clientStream) Header() (metadata.MD, error) {
   799  	var m metadata.MD
   800  	err := cs.withRetry(func(a *csAttempt) error {
   801  		var err error
   802  		m, err = a.s.Header()
   803  		return toRPCErr(err)
   804  	}, cs.commitAttemptLocked)
   805  
   806  	if m == nil && err == nil {
   807  		// The stream ended with success.  Finish the clientStream.
   808  		err = io.EOF
   809  	}
   810  
   811  	if err != nil {
   812  		cs.finish(err)
   813  		// Do not return the error.  The user should get it by calling Recv().
   814  		return nil, nil
   815  	}
   816  
   817  	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
   818  		// Only log if binary log is on and header has not been logged, and
   819  		// there is actually headers to log.
   820  		logEntry := &binarylog.ServerHeader{
   821  			OnClientSide: true,
   822  			Header:       m,
   823  			PeerAddr:     nil,
   824  		}
   825  		if peer, ok := peer.FromContext(cs.Context()); ok {
   826  			logEntry.PeerAddr = peer.Addr
   827  		}
   828  		cs.serverHeaderBinlogged = true
   829  		for _, binlog := range cs.binlogs {
   830  			binlog.Log(cs.ctx, logEntry)
   831  		}
   832  	}
   833  
   834  	return m, nil
   835  }
   836  
   837  func (cs *clientStream) Trailer() metadata.MD {
   838  	// On RPC failure, we never need to retry, because usage requires that
   839  	// RecvMsg() returned a non-nil error before calling this function is valid.
   840  	// We would have retried earlier if necessary.
   841  	//
   842  	// Commit the attempt anyway, just in case users are not following those
   843  	// directions -- it will prevent races and should not meaningfully impact
   844  	// performance.
   845  	cs.commitAttempt()
   846  	if cs.attempt.s == nil {
   847  		return nil
   848  	}
   849  	return cs.attempt.s.Trailer()
   850  }
   851  
   852  func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
   853  	for _, f := range cs.buffer {
   854  		if err := f(attempt); err != nil {
   855  			return err
   856  		}
   857  	}
   858  	return nil
   859  }
   860  
   861  func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
   862  	// Note: we still will buffer if retry is disabled (for transparent retries).
   863  	if cs.committed {
   864  		return
   865  	}
   866  	cs.bufferSize += sz
   867  	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
   868  		cs.commitAttemptLocked()
   869  		return
   870  	}
   871  	cs.buffer = append(cs.buffer, op)
   872  }
   873  
   874  func (cs *clientStream) SendMsg(m any) (err error) {
   875  	defer func() {
   876  		if err != nil && err != io.EOF {
   877  			// Call finish on the client stream for errors generated by this SendMsg
   878  			// call, as these indicate problems created by this client.  (Transport
   879  			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
   880  			// error will be returned from RecvMsg eventually in that case, or be
   881  			// retried.)
   882  			cs.finish(err)
   883  		}
   884  	}()
   885  	if cs.sentLast {
   886  		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
   887  	}
   888  	if !cs.desc.ClientStreams {
   889  		cs.sentLast = true
   890  	}
   891  
   892  	// load hdr, payload, data
   893  	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
   894  	if err != nil {
   895  		return err
   896  	}
   897  
   898  	// TODO(dfawley): should we be checking len(data) instead?
   899  	if len(payload) > *cs.callInfo.maxSendMessageSize {
   900  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
   901  	}
   902  	op := func(a *csAttempt) error {
   903  		return a.sendMsg(m, hdr, payload, data)
   904  	}
   905  	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
   906  	if len(cs.binlogs) != 0 && err == nil {
   907  		cm := &binarylog.ClientMessage{
   908  			OnClientSide: true,
   909  			Message:      data,
   910  		}
   911  		for _, binlog := range cs.binlogs {
   912  			binlog.Log(cs.ctx, cm)
   913  		}
   914  	}
   915  	return err
   916  }
   917  
   918  func (cs *clientStream) RecvMsg(m any) error {
   919  	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
   920  		// Call Header() to binary log header if it's not already logged.
   921  		cs.Header()
   922  	}
   923  	var recvInfo *payloadInfo
   924  	if len(cs.binlogs) != 0 {
   925  		recvInfo = &payloadInfo{}
   926  	}
   927  	err := cs.withRetry(func(a *csAttempt) error {
   928  		return a.recvMsg(m, recvInfo)
   929  	}, cs.commitAttemptLocked)
   930  	if len(cs.binlogs) != 0 && err == nil {
   931  		sm := &binarylog.ServerMessage{
   932  			OnClientSide: true,
   933  			Message:      recvInfo.uncompressedBytes,
   934  		}
   935  		for _, binlog := range cs.binlogs {
   936  			binlog.Log(cs.ctx, sm)
   937  		}
   938  	}
   939  	if err != nil || !cs.desc.ServerStreams {
   940  		// err != nil or non-server-streaming indicates end of stream.
   941  		cs.finish(err)
   942  	}
   943  	return err
   944  }
   945  
   946  func (cs *clientStream) CloseSend() error {
   947  	if cs.sentLast {
   948  		// TODO: return an error and finish the stream instead, due to API misuse?
   949  		return nil
   950  	}
   951  	cs.sentLast = true
   952  	op := func(a *csAttempt) error {
   953  		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
   954  		// Always return nil; io.EOF is the only error that might make sense
   955  		// instead, but there is no need to signal the client to call RecvMsg
   956  		// as the only use left for the stream after CloseSend is to call
   957  		// RecvMsg.  This also matches historical behavior.
   958  		return nil
   959  	}
   960  	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
   961  	if len(cs.binlogs) != 0 {
   962  		chc := &binarylog.ClientHalfClose{
   963  			OnClientSide: true,
   964  		}
   965  		for _, binlog := range cs.binlogs {
   966  			binlog.Log(cs.ctx, chc)
   967  		}
   968  	}
   969  	// We never returned an error here for reasons.
   970  	return nil
   971  }
   972  
   973  func (cs *clientStream) finish(err error) {
   974  	if err == io.EOF {
   975  		// Ending a stream with EOF indicates a success.
   976  		err = nil
   977  	}
   978  	cs.mu.Lock()
   979  	if cs.finished {
   980  		cs.mu.Unlock()
   981  		return
   982  	}
   983  	cs.finished = true
   984  	for _, onFinish := range cs.callInfo.onFinish {
   985  		onFinish(err)
   986  	}
   987  	cs.commitAttemptLocked()
   988  	if cs.attempt != nil {
   989  		cs.attempt.finish(err)
   990  		// after functions all rely upon having a stream.
   991  		if cs.attempt.s != nil {
   992  			for _, o := range cs.opts {
   993  				o.after(cs.callInfo, cs.attempt)
   994  			}
   995  		}
   996  	}
   997  
   998  	cs.mu.Unlock()
   999  	// Only one of cancel or trailer needs to be logged.
  1000  	if len(cs.binlogs) != 0 {
  1001  		switch err {
  1002  		case errContextCanceled, errContextDeadline, ErrClientConnClosing:
  1003  			c := &binarylog.Cancel{
  1004  				OnClientSide: true,
  1005  			}
  1006  			for _, binlog := range cs.binlogs {
  1007  				binlog.Log(cs.ctx, c)
  1008  			}
  1009  		default:
  1010  			logEntry := &binarylog.ServerTrailer{
  1011  				OnClientSide: true,
  1012  				Trailer:      cs.Trailer(),
  1013  				Err:          err,
  1014  			}
  1015  			if peer, ok := peer.FromContext(cs.Context()); ok {
  1016  				logEntry.PeerAddr = peer.Addr
  1017  			}
  1018  			for _, binlog := range cs.binlogs {
  1019  				binlog.Log(cs.ctx, logEntry)
  1020  			}
  1021  		}
  1022  	}
  1023  	if err == nil {
  1024  		cs.retryThrottler.successfulRPC()
  1025  	}
  1026  	if channelz.IsOn() {
  1027  		if err != nil {
  1028  			cs.cc.incrCallsFailed()
  1029  		} else {
  1030  			cs.cc.incrCallsSucceeded()
  1031  		}
  1032  	}
  1033  	cs.cancel()
  1034  }
  1035  
  1036  func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
  1037  	cs := a.cs
  1038  	if a.trInfo != nil {
  1039  		a.mu.Lock()
  1040  		if a.trInfo.tr != nil {
  1041  			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  1042  		}
  1043  		a.mu.Unlock()
  1044  	}
  1045  	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
  1046  		if !cs.desc.ClientStreams {
  1047  			// For non-client-streaming RPCs, we return nil instead of EOF on error
  1048  			// because the generated code requires it.  finish is not called; RecvMsg()
  1049  			// will call it with the stream's status independently.
  1050  			return nil
  1051  		}
  1052  		return io.EOF
  1053  	}
  1054  	for _, sh := range a.statsHandlers {
  1055  		sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
  1056  	}
  1057  	if channelz.IsOn() {
  1058  		a.t.IncrMsgSent()
  1059  	}
  1060  	return nil
  1061  }
  1062  
  1063  func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
  1064  	cs := a.cs
  1065  	if len(a.statsHandlers) != 0 && payInfo == nil {
  1066  		payInfo = &payloadInfo{}
  1067  	}
  1068  
  1069  	if !a.decompSet {
  1070  		// Block until we receive headers containing received message encoding.
  1071  		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  1072  			if a.dc == nil || a.dc.Type() != ct {
  1073  				// No configured decompressor, or it does not match the incoming
  1074  				// message encoding; attempt to find a registered compressor that does.
  1075  				a.dc = nil
  1076  				a.decomp = encoding.GetCompressor(ct)
  1077  			}
  1078  		} else {
  1079  			// No compression is used; disable our decompressor.
  1080  			a.dc = nil
  1081  		}
  1082  		// Only initialize this state once per stream.
  1083  		a.decompSet = true
  1084  	}
  1085  	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
  1086  	if err != nil {
  1087  		if err == io.EOF {
  1088  			if statusErr := a.s.Status().Err(); statusErr != nil {
  1089  				return statusErr
  1090  			}
  1091  			return io.EOF // indicates successful end of stream.
  1092  		}
  1093  
  1094  		return toRPCErr(err)
  1095  	}
  1096  	if a.trInfo != nil {
  1097  		a.mu.Lock()
  1098  		if a.trInfo.tr != nil {
  1099  			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  1100  		}
  1101  		a.mu.Unlock()
  1102  	}
  1103  	for _, sh := range a.statsHandlers {
  1104  		sh.HandleRPC(a.ctx, &stats.InPayload{
  1105  			Client:   true,
  1106  			RecvTime: time.Now(),
  1107  			Payload:  m,
  1108  			// TODO truncate large payload.
  1109  			Data:             payInfo.uncompressedBytes,
  1110  			WireLength:       payInfo.compressedLength + headerLen,
  1111  			CompressedLength: payInfo.compressedLength,
  1112  			Length:           len(payInfo.uncompressedBytes),
  1113  		})
  1114  	}
  1115  	if channelz.IsOn() {
  1116  		a.t.IncrMsgRecv()
  1117  	}
  1118  	if cs.desc.ServerStreams {
  1119  		// Subsequent messages should be received by subsequent RecvMsg calls.
  1120  		return nil
  1121  	}
  1122  	// Special handling for non-server-stream rpcs.
  1123  	// This recv expects EOF or errors, so we don't collect inPayload.
  1124  	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
  1125  	if err == nil {
  1126  		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1127  	}
  1128  	if err == io.EOF {
  1129  		return a.s.Status().Err() // non-server streaming Recv returns nil on success
  1130  	}
  1131  	return toRPCErr(err)
  1132  }
  1133  
  1134  func (a *csAttempt) finish(err error) {
  1135  	a.mu.Lock()
  1136  	if a.finished {
  1137  		a.mu.Unlock()
  1138  		return
  1139  	}
  1140  	a.finished = true
  1141  	if err == io.EOF {
  1142  		// Ending a stream with EOF indicates a success.
  1143  		err = nil
  1144  	}
  1145  	var tr metadata.MD
  1146  	if a.s != nil {
  1147  		a.t.CloseStream(a.s, err)
  1148  		tr = a.s.Trailer()
  1149  	}
  1150  
  1151  	if a.pickResult.Done != nil {
  1152  		br := false
  1153  		if a.s != nil {
  1154  			br = a.s.BytesReceived()
  1155  		}
  1156  		a.pickResult.Done(balancer.DoneInfo{
  1157  			Err:           err,
  1158  			Trailer:       tr,
  1159  			BytesSent:     a.s != nil,
  1160  			BytesReceived: br,
  1161  			ServerLoad:    balancerload.Parse(tr),
  1162  		})
  1163  	}
  1164  	for _, sh := range a.statsHandlers {
  1165  		end := &stats.End{
  1166  			Client:    true,
  1167  			BeginTime: a.beginTime,
  1168  			EndTime:   time.Now(),
  1169  			Trailer:   tr,
  1170  			Error:     err,
  1171  		}
  1172  		sh.HandleRPC(a.ctx, end)
  1173  	}
  1174  	if a.trInfo != nil && a.trInfo.tr != nil {
  1175  		if err == nil {
  1176  			a.trInfo.tr.LazyPrintf("RPC: [OK]")
  1177  		} else {
  1178  			a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
  1179  			a.trInfo.tr.SetError()
  1180  		}
  1181  		a.trInfo.tr.Finish()
  1182  		a.trInfo.tr = nil
  1183  	}
  1184  	a.mu.Unlock()
  1185  }
  1186  
  1187  // newClientStream creates a ClientStream with the specified transport, on the
  1188  // given addrConn.
  1189  //
  1190  // It's expected that the given transport is either the same one in addrConn, or
  1191  // is already closed. To avoid race, transport is specified separately, instead
  1192  // of using ac.transpot.
  1193  //
  1194  // Main difference between this and ClientConn.NewStream:
  1195  // - no retry
  1196  // - no service config (or wait for service config)
  1197  // - no tracing or stats
  1198  func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
  1199  	if t == nil {
  1200  		// TODO: return RPC error here?
  1201  		return nil, errors.New("transport provided is nil")
  1202  	}
  1203  	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
  1204  	c := &callInfo{}
  1205  
  1206  	// Possible context leak:
  1207  	// The cancel function for the child context we create will only be called
  1208  	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  1209  	// an error is generated by SendMsg.
  1210  	// https://github.com/grpc/grpc-go/issues/1818.
  1211  	ctx, cancel := context.WithCancel(ctx)
  1212  	defer func() {
  1213  		if err != nil {
  1214  			cancel()
  1215  		}
  1216  	}()
  1217  
  1218  	for _, o := range opts {
  1219  		if err := o.before(c); err != nil {
  1220  			return nil, toRPCErr(err)
  1221  		}
  1222  	}
  1223  	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  1224  	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
  1225  	if err := setCallInfoCodec(c); err != nil {
  1226  		return nil, err
  1227  	}
  1228  
  1229  	callHdr := &transport.CallHdr{
  1230  		Host:           ac.cc.authority,
  1231  		Method:         method,
  1232  		ContentSubtype: c.contentSubtype,
  1233  	}
  1234  
  1235  	// Set our outgoing compression according to the UseCompressor CallOption, if
  1236  	// set.  In that case, also find the compressor from the encoding package.
  1237  	// Otherwise, use the compressor configured by the WithCompressor DialOption,
  1238  	// if set.
  1239  	var cp Compressor
  1240  	var comp encoding.Compressor
  1241  	if ct := c.compressorType; ct != "" {
  1242  		callHdr.SendCompress = ct
  1243  		if ct != encoding.Identity {
  1244  			comp = encoding.GetCompressor(ct)
  1245  			if comp == nil {
  1246  				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  1247  			}
  1248  		}
  1249  	} else if ac.cc.dopts.cp != nil {
  1250  		callHdr.SendCompress = ac.cc.dopts.cp.Type()
  1251  		cp = ac.cc.dopts.cp
  1252  	}
  1253  	if c.creds != nil {
  1254  		callHdr.Creds = c.creds
  1255  	}
  1256  
  1257  	// Use a special addrConnStream to avoid retry.
  1258  	as := &addrConnStream{
  1259  		callHdr:  callHdr,
  1260  		ac:       ac,
  1261  		ctx:      ctx,
  1262  		cancel:   cancel,
  1263  		opts:     opts,
  1264  		callInfo: c,
  1265  		desc:     desc,
  1266  		codec:    c.codec,
  1267  		cp:       cp,
  1268  		comp:     comp,
  1269  		t:        t,
  1270  	}
  1271  
  1272  	s, err := as.t.NewStream(as.ctx, as.callHdr)
  1273  	if err != nil {
  1274  		err = toRPCErr(err)
  1275  		return nil, err
  1276  	}
  1277  	as.s = s
  1278  	as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
  1279  	ac.incrCallsStarted()
  1280  	if desc != unaryStreamDesc {
  1281  		// Listen on stream context to cleanup when the stream context is
  1282  		// canceled.  Also listen for the addrConn's context in case the
  1283  		// addrConn is closed or reconnects to a different address.  In all
  1284  		// other cases, an error should already be injected into the recv
  1285  		// buffer by the transport, which the client will eventually receive,
  1286  		// and then we will cancel the stream's context in
  1287  		// addrConnStream.finish.
  1288  		go func() {
  1289  			ac.mu.Lock()
  1290  			acCtx := ac.ctx
  1291  			ac.mu.Unlock()
  1292  			select {
  1293  			case <-acCtx.Done():
  1294  				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
  1295  			case <-ctx.Done():
  1296  				as.finish(toRPCErr(ctx.Err()))
  1297  			}
  1298  		}()
  1299  	}
  1300  	return as, nil
  1301  }
  1302  
  1303  type addrConnStream struct {
  1304  	s         *transport.Stream
  1305  	ac        *addrConn
  1306  	callHdr   *transport.CallHdr
  1307  	cancel    context.CancelFunc
  1308  	opts      []CallOption
  1309  	callInfo  *callInfo
  1310  	t         transport.ClientTransport
  1311  	ctx       context.Context
  1312  	sentLast  bool
  1313  	desc      *StreamDesc
  1314  	codec     baseCodec
  1315  	cp        Compressor
  1316  	comp      encoding.Compressor
  1317  	decompSet bool
  1318  	dc        Decompressor
  1319  	decomp    encoding.Compressor
  1320  	p         *parser
  1321  	mu        sync.Mutex
  1322  	finished  bool
  1323  }
  1324  
  1325  func (as *addrConnStream) Header() (metadata.MD, error) {
  1326  	m, err := as.s.Header()
  1327  	if err != nil {
  1328  		as.finish(toRPCErr(err))
  1329  	}
  1330  	return m, err
  1331  }
  1332  
  1333  func (as *addrConnStream) Trailer() metadata.MD {
  1334  	return as.s.Trailer()
  1335  }
  1336  
  1337  func (as *addrConnStream) CloseSend() error {
  1338  	if as.sentLast {
  1339  		// TODO: return an error and finish the stream instead, due to API misuse?
  1340  		return nil
  1341  	}
  1342  	as.sentLast = true
  1343  
  1344  	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
  1345  	// Always return nil; io.EOF is the only error that might make sense
  1346  	// instead, but there is no need to signal the client to call RecvMsg
  1347  	// as the only use left for the stream after CloseSend is to call
  1348  	// RecvMsg.  This also matches historical behavior.
  1349  	return nil
  1350  }
  1351  
  1352  func (as *addrConnStream) Context() context.Context {
  1353  	return as.s.Context()
  1354  }
  1355  
  1356  func (as *addrConnStream) SendMsg(m any) (err error) {
  1357  	defer func() {
  1358  		if err != nil && err != io.EOF {
  1359  			// Call finish on the client stream for errors generated by this SendMsg
  1360  			// call, as these indicate problems created by this client.  (Transport
  1361  			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  1362  			// error will be returned from RecvMsg eventually in that case, or be
  1363  			// retried.)
  1364  			as.finish(err)
  1365  		}
  1366  	}()
  1367  	if as.sentLast {
  1368  		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  1369  	}
  1370  	if !as.desc.ClientStreams {
  1371  		as.sentLast = true
  1372  	}
  1373  
  1374  	// load hdr, payload, data
  1375  	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
  1376  	if err != nil {
  1377  		return err
  1378  	}
  1379  
  1380  	// TODO(dfawley): should we be checking len(data) instead?
  1381  	if len(payld) > *as.callInfo.maxSendMessageSize {
  1382  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
  1383  	}
  1384  
  1385  	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
  1386  		if !as.desc.ClientStreams {
  1387  			// For non-client-streaming RPCs, we return nil instead of EOF on error
  1388  			// because the generated code requires it.  finish is not called; RecvMsg()
  1389  			// will call it with the stream's status independently.
  1390  			return nil
  1391  		}
  1392  		return io.EOF
  1393  	}
  1394  
  1395  	if channelz.IsOn() {
  1396  		as.t.IncrMsgSent()
  1397  	}
  1398  	return nil
  1399  }
  1400  
  1401  func (as *addrConnStream) RecvMsg(m any) (err error) {
  1402  	defer func() {
  1403  		if err != nil || !as.desc.ServerStreams {
  1404  			// err != nil or non-server-streaming indicates end of stream.
  1405  			as.finish(err)
  1406  		}
  1407  	}()
  1408  
  1409  	if !as.decompSet {
  1410  		// Block until we receive headers containing received message encoding.
  1411  		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  1412  			if as.dc == nil || as.dc.Type() != ct {
  1413  				// No configured decompressor, or it does not match the incoming
  1414  				// message encoding; attempt to find a registered compressor that does.
  1415  				as.dc = nil
  1416  				as.decomp = encoding.GetCompressor(ct)
  1417  			}
  1418  		} else {
  1419  			// No compression is used; disable our decompressor.
  1420  			as.dc = nil
  1421  		}
  1422  		// Only initialize this state once per stream.
  1423  		as.decompSet = true
  1424  	}
  1425  	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1426  	if err != nil {
  1427  		if err == io.EOF {
  1428  			if statusErr := as.s.Status().Err(); statusErr != nil {
  1429  				return statusErr
  1430  			}
  1431  			return io.EOF // indicates successful end of stream.
  1432  		}
  1433  		return toRPCErr(err)
  1434  	}
  1435  
  1436  	if channelz.IsOn() {
  1437  		as.t.IncrMsgRecv()
  1438  	}
  1439  	if as.desc.ServerStreams {
  1440  		// Subsequent messages should be received by subsequent RecvMsg calls.
  1441  		return nil
  1442  	}
  1443  
  1444  	// Special handling for non-server-stream rpcs.
  1445  	// This recv expects EOF or errors, so we don't collect inPayload.
  1446  	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1447  	if err == nil {
  1448  		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1449  	}
  1450  	if err == io.EOF {
  1451  		return as.s.Status().Err() // non-server streaming Recv returns nil on success
  1452  	}
  1453  	return toRPCErr(err)
  1454  }
  1455  
  1456  func (as *addrConnStream) finish(err error) {
  1457  	as.mu.Lock()
  1458  	if as.finished {
  1459  		as.mu.Unlock()
  1460  		return
  1461  	}
  1462  	as.finished = true
  1463  	if err == io.EOF {
  1464  		// Ending a stream with EOF indicates a success.
  1465  		err = nil
  1466  	}
  1467  	if as.s != nil {
  1468  		as.t.CloseStream(as.s, err)
  1469  	}
  1470  
  1471  	if err != nil {
  1472  		as.ac.incrCallsFailed()
  1473  	} else {
  1474  		as.ac.incrCallsSucceeded()
  1475  	}
  1476  	as.cancel()
  1477  	as.mu.Unlock()
  1478  }
  1479  
  1480  // ServerStream defines the server-side behavior of a streaming RPC.
  1481  //
  1482  // Errors returned from ServerStream methods are compatible with the status
  1483  // package.  However, the status code will often not match the RPC status as
  1484  // seen by the client application, and therefore, should not be relied upon for
  1485  // this purpose.
  1486  type ServerStream interface {
  1487  	// SetHeader sets the header metadata. It may be called multiple times.
  1488  	// When call multiple times, all the provided metadata will be merged.
  1489  	// All the metadata will be sent out when one of the following happens:
  1490  	//  - ServerStream.SendHeader() is called;
  1491  	//  - The first response is sent out;
  1492  	//  - An RPC status is sent out (error or success).
  1493  	SetHeader(metadata.MD) error
  1494  	// SendHeader sends the header metadata.
  1495  	// The provided md and headers set by SetHeader() will be sent.
  1496  	// It fails if called multiple times.
  1497  	SendHeader(metadata.MD) error
  1498  	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
  1499  	// When called more than once, all the provided metadata will be merged.
  1500  	SetTrailer(metadata.MD)
  1501  	// Context returns the context for this stream.
  1502  	Context() context.Context
  1503  	// SendMsg sends a message. On error, SendMsg aborts the stream and the
  1504  	// error is returned directly.
  1505  	//
  1506  	// SendMsg blocks until:
  1507  	//   - There is sufficient flow control to schedule m with the transport, or
  1508  	//   - The stream is done, or
  1509  	//   - The stream breaks.
  1510  	//
  1511  	// SendMsg does not wait until the message is received by the client. An
  1512  	// untimely stream closure may result in lost messages.
  1513  	//
  1514  	// It is safe to have a goroutine calling SendMsg and another goroutine
  1515  	// calling RecvMsg on the same stream at the same time, but it is not safe
  1516  	// to call SendMsg on the same stream in different goroutines.
  1517  	//
  1518  	// It is not safe to modify the message after calling SendMsg. Tracing
  1519  	// libraries and stats handlers may use the message lazily.
  1520  	SendMsg(m any) error
  1521  	// RecvMsg blocks until it receives a message into m or the stream is
  1522  	// done. It returns io.EOF when the client has performed a CloseSend. On
  1523  	// any non-EOF error, the stream is aborted and the error contains the
  1524  	// RPC status.
  1525  	//
  1526  	// It is safe to have a goroutine calling SendMsg and another goroutine
  1527  	// calling RecvMsg on the same stream at the same time, but it is not
  1528  	// safe to call RecvMsg on the same stream in different goroutines.
  1529  	RecvMsg(m any) error
  1530  }
  1531  
  1532  // serverStream implements a server side Stream.
  1533  type serverStream struct {
  1534  	ctx   context.Context
  1535  	t     transport.ServerTransport
  1536  	s     *transport.Stream
  1537  	p     *parser
  1538  	codec baseCodec
  1539  
  1540  	cp     Compressor
  1541  	dc     Decompressor
  1542  	comp   encoding.Compressor
  1543  	decomp encoding.Compressor
  1544  
  1545  	sendCompressorName string
  1546  
  1547  	maxReceiveMessageSize int
  1548  	maxSendMessageSize    int
  1549  	trInfo                *traceInfo
  1550  
  1551  	statsHandler []stats.Handler
  1552  
  1553  	binlogs []binarylog.MethodLogger
  1554  	// serverHeaderBinlogged indicates whether server header has been logged. It
  1555  	// will happen when one of the following two happens: stream.SendHeader(),
  1556  	// stream.Send().
  1557  	//
  1558  	// It's only checked in send and sendHeader, doesn't need to be
  1559  	// synchronized.
  1560  	serverHeaderBinlogged bool
  1561  
  1562  	mu sync.Mutex // protects trInfo.tr after the service handler runs.
  1563  }
  1564  
  1565  func (ss *serverStream) Context() context.Context {
  1566  	return ss.ctx
  1567  }
  1568  
  1569  func (ss *serverStream) SetHeader(md metadata.MD) error {
  1570  	if md.Len() == 0 {
  1571  		return nil
  1572  	}
  1573  	err := imetadata.Validate(md)
  1574  	if err != nil {
  1575  		return status.Error(codes.Internal, err.Error())
  1576  	}
  1577  	return ss.s.SetHeader(md)
  1578  }
  1579  
  1580  func (ss *serverStream) SendHeader(md metadata.MD) error {
  1581  	err := imetadata.Validate(md)
  1582  	if err != nil {
  1583  		return status.Error(codes.Internal, err.Error())
  1584  	}
  1585  
  1586  	err = ss.t.WriteHeader(ss.s, md)
  1587  	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
  1588  		h, _ := ss.s.Header()
  1589  		sh := &binarylog.ServerHeader{
  1590  			Header: h,
  1591  		}
  1592  		ss.serverHeaderBinlogged = true
  1593  		for _, binlog := range ss.binlogs {
  1594  			binlog.Log(ss.ctx, sh)
  1595  		}
  1596  	}
  1597  	return err
  1598  }
  1599  
  1600  func (ss *serverStream) SetTrailer(md metadata.MD) {
  1601  	if md.Len() == 0 {
  1602  		return
  1603  	}
  1604  	if err := imetadata.Validate(md); err != nil {
  1605  		logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
  1606  	}
  1607  	ss.s.SetTrailer(md)
  1608  }
  1609  
  1610  func (ss *serverStream) SendMsg(m any) (err error) {
  1611  	defer func() {
  1612  		if ss.trInfo != nil {
  1613  			ss.mu.Lock()
  1614  			if ss.trInfo.tr != nil {
  1615  				if err == nil {
  1616  					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  1617  				} else {
  1618  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1619  					ss.trInfo.tr.SetError()
  1620  				}
  1621  			}
  1622  			ss.mu.Unlock()
  1623  		}
  1624  		if err != nil && err != io.EOF {
  1625  			st, _ := status.FromError(toRPCErr(err))
  1626  			ss.t.WriteStatus(ss.s, st)
  1627  			// Non-user specified status was sent out. This should be an error
  1628  			// case (as a server side Cancel maybe).
  1629  			//
  1630  			// This is not handled specifically now. User will return a final
  1631  			// status from the service handler, we will log that error instead.
  1632  			// This behavior is similar to an interceptor.
  1633  		}
  1634  		if channelz.IsOn() && err == nil {
  1635  			ss.t.IncrMsgSent()
  1636  		}
  1637  	}()
  1638  
  1639  	// Server handler could have set new compressor by calling SetSendCompressor.
  1640  	// In case it is set, we need to use it for compressing outbound message.
  1641  	if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
  1642  		ss.comp = encoding.GetCompressor(sendCompressorsName)
  1643  		ss.sendCompressorName = sendCompressorsName
  1644  	}
  1645  
  1646  	// load hdr, payload, data
  1647  	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
  1648  	if err != nil {
  1649  		return err
  1650  	}
  1651  
  1652  	// TODO(dfawley): should we be checking len(data) instead?
  1653  	if len(payload) > ss.maxSendMessageSize {
  1654  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
  1655  	}
  1656  	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
  1657  		return toRPCErr(err)
  1658  	}
  1659  	if len(ss.binlogs) != 0 {
  1660  		if !ss.serverHeaderBinlogged {
  1661  			h, _ := ss.s.Header()
  1662  			sh := &binarylog.ServerHeader{
  1663  				Header: h,
  1664  			}
  1665  			ss.serverHeaderBinlogged = true
  1666  			for _, binlog := range ss.binlogs {
  1667  				binlog.Log(ss.ctx, sh)
  1668  			}
  1669  		}
  1670  		sm := &binarylog.ServerMessage{
  1671  			Message: data,
  1672  		}
  1673  		for _, binlog := range ss.binlogs {
  1674  			binlog.Log(ss.ctx, sm)
  1675  		}
  1676  	}
  1677  	if len(ss.statsHandler) != 0 {
  1678  		for _, sh := range ss.statsHandler {
  1679  			sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
  1680  		}
  1681  	}
  1682  	return nil
  1683  }
  1684  
  1685  func (ss *serverStream) RecvMsg(m any) (err error) {
  1686  	defer func() {
  1687  		if ss.trInfo != nil {
  1688  			ss.mu.Lock()
  1689  			if ss.trInfo.tr != nil {
  1690  				if err == nil {
  1691  					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  1692  				} else if err != io.EOF {
  1693  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1694  					ss.trInfo.tr.SetError()
  1695  				}
  1696  			}
  1697  			ss.mu.Unlock()
  1698  		}
  1699  		if err != nil && err != io.EOF {
  1700  			st, _ := status.FromError(toRPCErr(err))
  1701  			ss.t.WriteStatus(ss.s, st)
  1702  			// Non-user specified status was sent out. This should be an error
  1703  			// case (as a server side Cancel maybe).
  1704  			//
  1705  			// This is not handled specifically now. User will return a final
  1706  			// status from the service handler, we will log that error instead.
  1707  			// This behavior is similar to an interceptor.
  1708  		}
  1709  		if channelz.IsOn() && err == nil {
  1710  			ss.t.IncrMsgRecv()
  1711  		}
  1712  	}()
  1713  	var payInfo *payloadInfo
  1714  	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
  1715  		payInfo = &payloadInfo{}
  1716  	}
  1717  	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
  1718  		if err == io.EOF {
  1719  			if len(ss.binlogs) != 0 {
  1720  				chc := &binarylog.ClientHalfClose{}
  1721  				for _, binlog := range ss.binlogs {
  1722  					binlog.Log(ss.ctx, chc)
  1723  				}
  1724  			}
  1725  			return err
  1726  		}
  1727  		if err == io.ErrUnexpectedEOF {
  1728  			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
  1729  		}
  1730  		return toRPCErr(err)
  1731  	}
  1732  	if len(ss.statsHandler) != 0 {
  1733  		for _, sh := range ss.statsHandler {
  1734  			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
  1735  				RecvTime: time.Now(),
  1736  				Payload:  m,
  1737  				// TODO truncate large payload.
  1738  				Data:             payInfo.uncompressedBytes,
  1739  				Length:           len(payInfo.uncompressedBytes),
  1740  				WireLength:       payInfo.compressedLength + headerLen,
  1741  				CompressedLength: payInfo.compressedLength,
  1742  			})
  1743  		}
  1744  	}
  1745  	if len(ss.binlogs) != 0 {
  1746  		cm := &binarylog.ClientMessage{
  1747  			Message: payInfo.uncompressedBytes,
  1748  		}
  1749  		for _, binlog := range ss.binlogs {
  1750  			binlog.Log(ss.ctx, cm)
  1751  		}
  1752  	}
  1753  	return nil
  1754  }
  1755  
  1756  // MethodFromServerStream returns the method string for the input stream.
  1757  // The returned string is in the format of "/service/method".
  1758  func MethodFromServerStream(stream ServerStream) (string, bool) {
  1759  	return Method(stream.Context())
  1760  }
  1761  
  1762  // prepareMsg returns the hdr, payload and data
  1763  // using the compressors passed or using the
  1764  // passed preparedmsg
  1765  func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
  1766  	if preparedMsg, ok := m.(*PreparedMsg); ok {
  1767  		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
  1768  	}
  1769  	// The input interface is not a prepared msg.
  1770  	// Marshal and Compress the data at this point
  1771  	data, err = encode(codec, m)
  1772  	if err != nil {
  1773  		return nil, nil, nil, err
  1774  	}
  1775  	compData, err := compress(data, cp, comp)
  1776  	if err != nil {
  1777  		return nil, nil, nil, err
  1778  	}
  1779  	hdr, payload = msgHeader(data, compData)
  1780  	return hdr, payload, data, nil
  1781  }