gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/stream.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package grpc
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"io"
    25  	"math"
    26  	"strconv"
    27  	"sync"
    28  	"time"
    29  
    30  	"gitee.com/ks-custle/core-gm/grpc/balancer"
    31  	"gitee.com/ks-custle/core-gm/grpc/codes"
    32  	"gitee.com/ks-custle/core-gm/grpc/encoding"
    33  	"gitee.com/ks-custle/core-gm/grpc/internal/balancerload"
    34  	"gitee.com/ks-custle/core-gm/grpc/internal/binarylog"
    35  	"gitee.com/ks-custle/core-gm/grpc/internal/channelz"
    36  	"gitee.com/ks-custle/core-gm/grpc/internal/grpcrand"
    37  	"gitee.com/ks-custle/core-gm/grpc/internal/grpcutil"
    38  	iresolver "gitee.com/ks-custle/core-gm/grpc/internal/resolver"
    39  	"gitee.com/ks-custle/core-gm/grpc/internal/serviceconfig"
    40  	"gitee.com/ks-custle/core-gm/grpc/internal/transport"
    41  	"gitee.com/ks-custle/core-gm/grpc/metadata"
    42  	"gitee.com/ks-custle/core-gm/grpc/peer"
    43  	"gitee.com/ks-custle/core-gm/grpc/stats"
    44  	"gitee.com/ks-custle/core-gm/grpc/status"
    45  	"gitee.com/ks-custle/core-gm/net/trace"
    46  )
    47  
    48  // StreamHandler defines the handler called by gRPC server to complete the
    49  // execution of a streaming RPC. If a StreamHandler returns an error, it
    50  // should be produced by the status package, or else gRPC will use
    51  // codes.Unknown as the status code and err.Error() as the status message
    52  // of the RPC.
    53  type StreamHandler func(srv interface{}, stream ServerStream) error
    54  
    55  // StreamDesc represents a streaming RPC service's method specification.  Used
    56  // on the server when registering services and on the client when initiating
    57  // new streams.
    58  type StreamDesc struct {
    59  	// StreamName and Handler are only used when registering handlers on a
    60  	// server.
    61  	StreamName string        // the name of the method excluding the service
    62  	Handler    StreamHandler // the handler called for the method
    63  
    64  	// ServerStreams and ClientStreams are used for registering handlers on a
    65  	// server as well as defining RPC behavior when passed to NewClientStream
    66  	// and ClientConn.NewStream.  At least one must be true.
    67  	ServerStreams bool // indicates the server can perform streaming sends
    68  	ClientStreams bool // indicates the client can perform streaming sends
    69  }
    70  
    71  // Stream defines the common interface a client or server stream has to satisfy.
    72  //
    73  // Deprecated: See ClientStream and ServerStream documentation instead.
    74  type Stream interface {
    75  	// Deprecated: See ClientStream and ServerStream documentation instead.
    76  	Context() context.Context
    77  	// Deprecated: See ClientStream and ServerStream documentation instead.
    78  	SendMsg(m interface{}) error
    79  	// Deprecated: See ClientStream and ServerStream documentation instead.
    80  	RecvMsg(m interface{}) error
    81  }
    82  
    83  // ClientStream defines the client-side behavior of a streaming RPC.
    84  //
    85  // All errors returned from ClientStream methods are compatible with the
    86  // status package.
    87  type ClientStream interface {
    88  	// Header returns the header metadata received from the server if there
    89  	// is any. It blocks if the metadata is not ready to read.
    90  	Header() (metadata.MD, error)
    91  	// Trailer returns the trailer metadata from the server, if there is any.
    92  	// It must only be called after stream.CloseAndRecv has returned, or
    93  	// stream.Recv has returned a non-nil error (including io.EOF).
    94  	Trailer() metadata.MD
    95  	// CloseSend closes the send direction of the stream. It closes the stream
    96  	// when non-nil error is met. It is also not safe to call CloseSend
    97  	// concurrently with SendMsg.
    98  	CloseSend() error
    99  	// Context returns the context for this stream.
   100  	//
   101  	// It should not be called until after Header or RecvMsg has returned. Once
   102  	// called, subsequent client-side retries are disabled.
   103  	Context() context.Context
   104  	// SendMsg is generally called by generated code. On error, SendMsg aborts
   105  	// the stream. If the error was generated by the client, the status is
   106  	// returned directly; otherwise, io.EOF is returned and the status of
   107  	// the stream may be discovered using RecvMsg.
   108  	//
   109  	// SendMsg blocks until:
   110  	//   - There is sufficient flow control to schedule m with the transport, or
   111  	//   - The stream is done, or
   112  	//   - The stream breaks.
   113  	//
   114  	// SendMsg does not wait until the message is received by the server. An
   115  	// untimely stream closure may result in lost messages. To ensure delivery,
   116  	// users should ensure the RPC completed successfully using RecvMsg.
   117  	//
   118  	// It is safe to have a goroutine calling SendMsg and another goroutine
   119  	// calling RecvMsg on the same stream at the same time, but it is not safe
   120  	// to call SendMsg on the same stream in different goroutines. It is also
   121  	// not safe to call CloseSend concurrently with SendMsg.
   122  	SendMsg(m interface{}) error
   123  	// RecvMsg blocks until it receives a message into m or the stream is
   124  	// done. It returns io.EOF when the stream completes successfully. On
   125  	// any other error, the stream is aborted and the error contains the RPC
   126  	// status.
   127  	//
   128  	// It is safe to have a goroutine calling SendMsg and another goroutine
   129  	// calling RecvMsg on the same stream at the same time, but it is not
   130  	// safe to call RecvMsg on the same stream in different goroutines.
   131  	RecvMsg(m interface{}) error
   132  }
   133  
   134  // NewStream creates a new Stream for the client side. This is typically
   135  // called by generated code. ctx is used for the lifetime of the stream.
   136  //
   137  // To ensure resources are not leaked due to the stream returned, one of the following
   138  // actions must be performed:
   139  //
   140  //  1. Call Close on the ClientConn.
   141  //  2. Cancel the context provided.
   142  //  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
   143  //     client-streaming RPC, for instance, might use the helper function
   144  //     CloseAndRecv (note that CloseSend does not Recv, therefore is not
   145  //     guaranteed to release all resources).
   146  //  4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
   147  //
   148  // If none of the above happen, a goroutine and a context will be leaked, and grpc
   149  // will not call the optionally-configured stats handler with a stats.End message.
   150  func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
   151  	// allow interceptor to see all applicable call options, which means those
   152  	// configured as defaults from dial option as well as per-call options
   153  	opts = combine(cc.dopts.callOptions, opts)
   154  
   155  	if cc.dopts.streamInt != nil {
   156  		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
   157  	}
   158  	return newClientStream(ctx, desc, cc, method, opts...)
   159  }
   160  
   161  // NewClientStream is a wrapper for ClientConn.NewStream.
   162  func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
   163  	return cc.NewStream(ctx, desc, method, opts...)
   164  }
   165  
   166  func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
   167  	if channelz.IsOn() {
   168  		cc.incrCallsStarted()
   169  		defer func() {
   170  			if err != nil {
   171  				cc.incrCallsFailed()
   172  			}
   173  		}()
   174  	}
   175  	// Provide an opportunity for the first RPC to see the first service config
   176  	// provided by the resolver.
   177  	if err := cc.waitForResolvedAddrs(ctx); err != nil {
   178  		return nil, err
   179  	}
   180  
   181  	var mc serviceconfig.MethodConfig
   182  	var onCommit func()
   183  	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
   184  		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
   185  	}
   186  
   187  	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
   188  	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
   189  	if err != nil {
   190  		return nil, toRPCErr(err)
   191  	}
   192  
   193  	if rpcConfig != nil {
   194  		if rpcConfig.Context != nil {
   195  			ctx = rpcConfig.Context
   196  		}
   197  		mc = rpcConfig.MethodConfig
   198  		onCommit = rpcConfig.OnCommitted
   199  		if rpcConfig.Interceptor != nil {
   200  			rpcInfo.Context = nil
   201  			ns := newStream
   202  			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
   203  				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
   204  				if err != nil {
   205  					return nil, toRPCErr(err)
   206  				}
   207  				return cs, nil
   208  			}
   209  		}
   210  	}
   211  
   212  	return newStream(ctx, func() {})
   213  }
   214  
   215  func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
   216  	c := defaultCallInfo()
   217  	if mc.WaitForReady != nil {
   218  		c.failFast = !*mc.WaitForReady
   219  	}
   220  
   221  	// Possible context leak:
   222  	// The cancel function for the child context we create will only be called
   223  	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
   224  	// an error is generated by SendMsg.
   225  	// https://github.com/grpc/grpc-go/issues/1818.
   226  	var cancel context.CancelFunc
   227  	if mc.Timeout != nil && *mc.Timeout >= 0 {
   228  		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
   229  	} else {
   230  		ctx, cancel = context.WithCancel(ctx)
   231  	}
   232  	defer func() {
   233  		if err != nil {
   234  			cancel()
   235  		}
   236  	}()
   237  
   238  	for _, o := range opts {
   239  		if err := o.before(c); err != nil {
   240  			return nil, toRPCErr(err)
   241  		}
   242  	}
   243  	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
   244  	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
   245  	if err := setCallInfoCodec(c); err != nil {
   246  		return nil, err
   247  	}
   248  
   249  	callHdr := &transport.CallHdr{
   250  		Host:           cc.authority,
   251  		Method:         method,
   252  		ContentSubtype: c.contentSubtype,
   253  		DoneFunc:       doneFunc,
   254  	}
   255  
   256  	// Set our outgoing compression according to the UseCompressor CallOption, if
   257  	// set.  In that case, also find the compressor from the encoding package.
   258  	// Otherwise, use the compressor configured by the WithCompressor DialOption,
   259  	// if set.
   260  	var cp Compressor
   261  	var comp encoding.Compressor
   262  	if ct := c.compressorType; ct != "" {
   263  		callHdr.SendCompress = ct
   264  		if ct != encoding.Identity {
   265  			comp = encoding.GetCompressor(ct)
   266  			if comp == nil {
   267  				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
   268  			}
   269  		}
   270  	} else if cc.dopts.cp != nil {
   271  		callHdr.SendCompress = cc.dopts.cp.Type()
   272  		cp = cc.dopts.cp
   273  	}
   274  	if c.creds != nil {
   275  		callHdr.Creds = c.creds
   276  	}
   277  
   278  	cs := &clientStream{
   279  		callHdr:      callHdr,
   280  		ctx:          ctx,
   281  		methodConfig: &mc,
   282  		opts:         opts,
   283  		callInfo:     c,
   284  		cc:           cc,
   285  		desc:         desc,
   286  		codec:        c.codec,
   287  		cp:           cp,
   288  		comp:         comp,
   289  		cancel:       cancel,
   290  		firstAttempt: true,
   291  		onCommit:     onCommit,
   292  	}
   293  	if !cc.dopts.disableRetry {
   294  		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
   295  	}
   296  	cs.binlog = binarylog.GetMethodLogger(method)
   297  
   298  	if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
   299  		cs.finish(err)
   300  		return nil, err
   301  	}
   302  
   303  	op := func(a *csAttempt) error { return a.newStream() }
   304  	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
   305  		cs.finish(err)
   306  		return nil, err
   307  	}
   308  
   309  	if cs.binlog != nil {
   310  		md, _ := metadata.FromOutgoingContext(ctx)
   311  		logEntry := &binarylog.ClientHeader{
   312  			OnClientSide: true,
   313  			Header:       md,
   314  			MethodName:   method,
   315  			Authority:    cs.cc.authority,
   316  		}
   317  		if deadline, ok := ctx.Deadline(); ok {
   318  			logEntry.Timeout = time.Until(deadline)
   319  			if logEntry.Timeout < 0 {
   320  				logEntry.Timeout = 0
   321  			}
   322  		}
   323  		cs.binlog.Log(logEntry)
   324  	}
   325  
   326  	if desc != unaryStreamDesc {
   327  		// Listen on cc and stream contexts to cleanup when the user closes the
   328  		// ClientConn or cancels the stream context.  In all other cases, an error
   329  		// should already be injected into the recv buffer by the transport, which
   330  		// the client will eventually receive, and then we will cancel the stream's
   331  		// context in clientStream.finish.
   332  		go func() {
   333  			select {
   334  			case <-cc.ctx.Done():
   335  				cs.finish(ErrClientConnClosing)
   336  			case <-ctx.Done():
   337  				cs.finish(toRPCErr(ctx.Err()))
   338  			}
   339  		}()
   340  	}
   341  	return cs, nil
   342  }
   343  
   344  // newAttemptLocked creates a new attempt with a transport.
   345  // If it succeeds, then it replaces clientStream's attempt with this new attempt.
   346  func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
   347  	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
   348  	method := cs.callHdr.Method
   349  	sh := cs.cc.dopts.copts.StatsHandler
   350  	var beginTime time.Time
   351  	if sh != nil {
   352  		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
   353  		beginTime = time.Now()
   354  		begin := &stats.Begin{
   355  			Client:                    true,
   356  			BeginTime:                 beginTime,
   357  			FailFast:                  cs.callInfo.failFast,
   358  			IsClientStream:            cs.desc.ClientStreams,
   359  			IsServerStream:            cs.desc.ServerStreams,
   360  			IsTransparentRetryAttempt: isTransparent,
   361  		}
   362  		sh.HandleRPC(ctx, begin)
   363  	}
   364  
   365  	var trInfo *traceInfo
   366  	if EnableTracing {
   367  		trInfo = &traceInfo{
   368  			tr: trace.New("grpc.Sent."+methodFamily(method), method),
   369  			firstLine: firstLine{
   370  				client: true,
   371  			},
   372  		}
   373  		if deadline, ok := ctx.Deadline(); ok {
   374  			trInfo.firstLine.deadline = time.Until(deadline)
   375  		}
   376  		trInfo.tr.LazyLog(&trInfo.firstLine, false)
   377  		ctx = trace.NewContext(ctx, trInfo.tr)
   378  	}
   379  
   380  	newAttempt := &csAttempt{
   381  		ctx:          ctx,
   382  		beginTime:    beginTime,
   383  		cs:           cs,
   384  		dc:           cs.cc.dopts.dc,
   385  		statsHandler: sh,
   386  		trInfo:       trInfo,
   387  	}
   388  	defer func() {
   389  		if retErr != nil {
   390  			// This attempt is not set in the clientStream, so it's finish won't
   391  			// be called. Call it here for stats and trace in case they are not
   392  			// nil.
   393  			newAttempt.finish(retErr)
   394  		}
   395  	}()
   396  
   397  	if err := ctx.Err(); err != nil {
   398  		return toRPCErr(err)
   399  	}
   400  
   401  	// Target.Scheme is deprecated, use Target.GetScheme() instead.
   402  	//if cs.cc.parsedTarget.Scheme == "xds" {
   403  	if cs.cc.parsedTarget.GetScheme() == "xds" {
   404  		// Add extra metadata (metadata that will be added by transport) to context
   405  		// so the balancer can see them.
   406  		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
   407  			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
   408  		))
   409  	}
   410  	t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
   411  	if err != nil {
   412  		return err
   413  	}
   414  	if trInfo != nil {
   415  		trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
   416  	}
   417  	newAttempt.t = t
   418  	newAttempt.done = done
   419  	cs.attempt = newAttempt
   420  	return nil
   421  }
   422  
   423  func (a *csAttempt) newStream() error {
   424  	cs := a.cs
   425  	cs.callHdr.PreviousAttempts = cs.numRetries
   426  	s, err := a.t.NewStream(a.ctx, cs.callHdr)
   427  	if err != nil {
   428  		// Return without converting to an RPC error so retry code can
   429  		// inspect.
   430  		return err
   431  	}
   432  	cs.attempt.s = s
   433  	cs.attempt.p = &parser{r: s}
   434  	return nil
   435  }
   436  
   437  // clientStream implements a client side Stream.
   438  type clientStream struct {
   439  	callHdr  *transport.CallHdr
   440  	opts     []CallOption
   441  	callInfo *callInfo
   442  	cc       *ClientConn
   443  	desc     *StreamDesc
   444  
   445  	codec baseCodec
   446  	cp    Compressor
   447  	comp  encoding.Compressor
   448  
   449  	cancel context.CancelFunc // cancels all attempts
   450  
   451  	sentLast bool // sent an end stream
   452  
   453  	methodConfig *MethodConfig
   454  
   455  	ctx context.Context // the application's context, wrapped by stats/tracing
   456  
   457  	retryThrottler *retryThrottler // The throttler active when the RPC began.
   458  
   459  	binlog *binarylog.MethodLogger // Binary logger, can be nil.
   460  	// serverHeaderBinlogged is a boolean for whether server header has been
   461  	// logged. Server header will be logged when the first time one of those
   462  	// happens: stream.Header(), stream.Recv().
   463  	//
   464  	// It's only read and used by Recv() and Header(), so it doesn't need to be
   465  	// synchronized.
   466  	serverHeaderBinlogged bool
   467  
   468  	mu                      sync.Mutex
   469  	firstAttempt            bool // if true, transparent retry is valid
   470  	numRetries              int  // exclusive of transparent retry attempt(s)
   471  	numRetriesSincePushback int  // retries since pushback; to reset backoff
   472  	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
   473  	// attempt is the active client stream attempt.
   474  	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
   475  	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
   476  	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
   477  	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
   478  	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
   479  	// place where we need to check if the attempt is nil.
   480  	attempt *csAttempt
   481  	// TODO(hedging): hedging will have multiple attempts simultaneously.
   482  	committed  bool // active attempt committed for retry?
   483  	onCommit   func()
   484  	buffer     []func(a *csAttempt) error // operations to replay on retry
   485  	bufferSize int                        // current size of buffer
   486  }
   487  
   488  // csAttempt implements a single transport stream attempt within a
   489  // clientStream.
   490  type csAttempt struct {
   491  	ctx  context.Context
   492  	cs   *clientStream
   493  	t    transport.ClientTransport
   494  	s    *transport.Stream
   495  	p    *parser
   496  	done func(balancer.DoneInfo)
   497  
   498  	finished  bool
   499  	dc        Decompressor
   500  	decomp    encoding.Compressor
   501  	decompSet bool
   502  
   503  	mu sync.Mutex // guards trInfo.tr
   504  	// trInfo may be nil (if EnableTracing is false).
   505  	// trInfo.tr is set when created (if EnableTracing is true),
   506  	// and cleared when the finish method is called.
   507  	trInfo *traceInfo
   508  
   509  	statsHandler stats.Handler
   510  	beginTime    time.Time
   511  }
   512  
   513  func (cs *clientStream) commitAttemptLocked() {
   514  	if !cs.committed && cs.onCommit != nil {
   515  		cs.onCommit()
   516  	}
   517  	cs.committed = true
   518  	cs.buffer = nil
   519  }
   520  
   521  func (cs *clientStream) commitAttempt() {
   522  	cs.mu.Lock()
   523  	cs.commitAttemptLocked()
   524  	cs.mu.Unlock()
   525  }
   526  
   527  // shouldRetry returns nil if the RPC should be retried; otherwise it returns
   528  // the error that should be returned by the operation.  If the RPC should be
   529  // retried, the bool indicates whether it is being retried transparently.
   530  func (cs *clientStream) shouldRetry(err error) (bool, error) {
   531  	if cs.attempt.s == nil {
   532  		// Error from NewClientStream.
   533  		nse, ok := err.(*transport.NewStreamError)
   534  		if !ok {
   535  			// Unexpected, but assume no I/O was performed and the RPC is not
   536  			// fatal, so retry indefinitely.
   537  			return true, nil
   538  		}
   539  
   540  		// Unwrap and convert error.
   541  		err = toRPCErr(nse.Err)
   542  
   543  		// Never retry DoNotRetry errors, which indicate the RPC should not be
   544  		// retried due to max header list size violation, etc.
   545  		if nse.DoNotRetry {
   546  			return false, err
   547  		}
   548  
   549  		// In the event of a non-IO operation error from NewStream, we never
   550  		// attempted to write anything to the wire, so we can retry
   551  		// indefinitely.
   552  		if !nse.DoNotTransparentRetry {
   553  			return true, nil
   554  		}
   555  	}
   556  	if cs.finished || cs.committed {
   557  		// RPC is finished or committed; cannot retry.
   558  		return false, err
   559  	}
   560  	// Wait for the trailers.
   561  	unprocessed := false
   562  	if cs.attempt.s != nil {
   563  		<-cs.attempt.s.Done()
   564  		unprocessed = cs.attempt.s.Unprocessed()
   565  	}
   566  	if cs.firstAttempt && unprocessed {
   567  		// First attempt, stream unprocessed: transparently retry.
   568  		return true, nil
   569  	}
   570  	if cs.cc.dopts.disableRetry {
   571  		return false, err
   572  	}
   573  
   574  	pushback := 0
   575  	hasPushback := false
   576  	if cs.attempt.s != nil {
   577  		if !cs.attempt.s.TrailersOnly() {
   578  			return false, err
   579  		}
   580  
   581  		// TODO(retry): Move down if the spec changes to not check server pushback
   582  		// before considering this a failure for throttling.
   583  		sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
   584  		if len(sps) == 1 {
   585  			var e error
   586  			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
   587  				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
   588  				cs.retryThrottler.throttle() // This counts as a failure for throttling.
   589  				return false, err
   590  			}
   591  			hasPushback = true
   592  		} else if len(sps) > 1 {
   593  			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
   594  			cs.retryThrottler.throttle() // This counts as a failure for throttling.
   595  			return false, err
   596  		}
   597  	}
   598  
   599  	var code codes.Code
   600  	if cs.attempt.s != nil {
   601  		code = cs.attempt.s.Status().Code()
   602  	} else {
   603  		code = status.Convert(err).Code()
   604  	}
   605  
   606  	rp := cs.methodConfig.RetryPolicy
   607  	if rp == nil || !rp.RetryableStatusCodes[code] {
   608  		return false, err
   609  	}
   610  
   611  	// Note: the ordering here is important; we count this as a failure
   612  	// only if the code matched a retryable code.
   613  	if cs.retryThrottler.throttle() {
   614  		return false, err
   615  	}
   616  	if cs.numRetries+1 >= rp.MaxAttempts {
   617  		return false, err
   618  	}
   619  
   620  	var dur time.Duration
   621  	if hasPushback {
   622  		dur = time.Millisecond * time.Duration(pushback)
   623  		cs.numRetriesSincePushback = 0
   624  	} else {
   625  		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
   626  		cur := float64(rp.InitialBackoff) * fact
   627  		if max := float64(rp.MaxBackoff); cur > max {
   628  			cur = max
   629  		}
   630  		dur = time.Duration(grpcrand.Int63n(int64(cur)))
   631  		cs.numRetriesSincePushback++
   632  	}
   633  
   634  	// TODO(dfawley): we could eagerly fail here if dur puts us past the
   635  	// deadline, but unsure if it is worth doing.
   636  	t := time.NewTimer(dur)
   637  	select {
   638  	case <-t.C:
   639  		cs.numRetries++
   640  		return false, nil
   641  	case <-cs.ctx.Done():
   642  		t.Stop()
   643  		return false, status.FromContextError(cs.ctx.Err()).Err()
   644  	}
   645  }
   646  
   647  // Returns nil if a retry was performed and succeeded; error otherwise.
   648  func (cs *clientStream) retryLocked(lastErr error) error {
   649  	for {
   650  		cs.attempt.finish(toRPCErr(lastErr))
   651  		isTransparent, err := cs.shouldRetry(lastErr)
   652  		if err != nil {
   653  			cs.commitAttemptLocked()
   654  			return err
   655  		}
   656  		cs.firstAttempt = false
   657  		if err := cs.newAttemptLocked(isTransparent); err != nil {
   658  			return err
   659  		}
   660  		if lastErr = cs.replayBufferLocked(); lastErr == nil {
   661  			return nil
   662  		}
   663  	}
   664  }
   665  
   666  func (cs *clientStream) Context() context.Context {
   667  	cs.commitAttempt()
   668  	// No need to lock before using attempt, since we know it is committed and
   669  	// cannot change.
   670  	return cs.attempt.s.Context()
   671  }
   672  
   673  func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
   674  	cs.mu.Lock()
   675  	for {
   676  		if cs.committed {
   677  			cs.mu.Unlock()
   678  			// toRPCErr is used in case the error from the attempt comes from
   679  			// NewClientStream, which intentionally doesn't return a status
   680  			// error to allow for further inspection; all other errors should
   681  			// already be status errors.
   682  			return toRPCErr(op(cs.attempt))
   683  		}
   684  		a := cs.attempt
   685  		cs.mu.Unlock()
   686  		err := op(a)
   687  		cs.mu.Lock()
   688  		if a != cs.attempt {
   689  			// We started another attempt already.
   690  			continue
   691  		}
   692  		if err == io.EOF {
   693  			<-a.s.Done()
   694  		}
   695  		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
   696  			onSuccess()
   697  			cs.mu.Unlock()
   698  			return err
   699  		}
   700  		if err := cs.retryLocked(err); err != nil {
   701  			cs.mu.Unlock()
   702  			return err
   703  		}
   704  	}
   705  }
   706  
   707  func (cs *clientStream) Header() (metadata.MD, error) {
   708  	var m metadata.MD
   709  	err := cs.withRetry(func(a *csAttempt) error {
   710  		var err error
   711  		m, err = a.s.Header()
   712  		return toRPCErr(err)
   713  	}, cs.commitAttemptLocked)
   714  	if err != nil {
   715  		cs.finish(err)
   716  		return nil, err
   717  	}
   718  	if cs.binlog != nil && !cs.serverHeaderBinlogged {
   719  		// Only log if binary log is on and header has not been logged.
   720  		logEntry := &binarylog.ServerHeader{
   721  			OnClientSide: true,
   722  			Header:       m,
   723  			PeerAddr:     nil,
   724  		}
   725  		if peer, ok := peer.FromContext(cs.Context()); ok {
   726  			logEntry.PeerAddr = peer.Addr
   727  		}
   728  		cs.binlog.Log(logEntry)
   729  		cs.serverHeaderBinlogged = true
   730  	}
   731  	return m, err
   732  }
   733  
   734  func (cs *clientStream) Trailer() metadata.MD {
   735  	// On RPC failure, we never need to retry, because usage requires that
   736  	// RecvMsg() returned a non-nil error before calling this function is valid.
   737  	// We would have retried earlier if necessary.
   738  	//
   739  	// Commit the attempt anyway, just in case users are not following those
   740  	// directions -- it will prevent races and should not meaningfully impact
   741  	// performance.
   742  	cs.commitAttempt()
   743  	if cs.attempt.s == nil {
   744  		return nil
   745  	}
   746  	return cs.attempt.s.Trailer()
   747  }
   748  
   749  func (cs *clientStream) replayBufferLocked() error {
   750  	a := cs.attempt
   751  	for _, f := range cs.buffer {
   752  		if err := f(a); err != nil {
   753  			return err
   754  		}
   755  	}
   756  	return nil
   757  }
   758  
   759  func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
   760  	// Note: we still will buffer if retry is disabled (for transparent retries).
   761  	if cs.committed {
   762  		return
   763  	}
   764  	cs.bufferSize += sz
   765  	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
   766  		cs.commitAttemptLocked()
   767  		return
   768  	}
   769  	cs.buffer = append(cs.buffer, op)
   770  }
   771  
   772  func (cs *clientStream) SendMsg(m interface{}) (err error) {
   773  	defer func() {
   774  		if err != nil && err != io.EOF {
   775  			// Call finish on the client stream for errors generated by this SendMsg
   776  			// call, as these indicate problems created by this client.  (Transport
   777  			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
   778  			// error will be returned from RecvMsg eventually in that case, or be
   779  			// retried.)
   780  			cs.finish(err)
   781  		}
   782  	}()
   783  	if cs.sentLast {
   784  		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
   785  	}
   786  	if !cs.desc.ClientStreams {
   787  		cs.sentLast = true
   788  	}
   789  
   790  	// load hdr, payload, data
   791  	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
   792  	if err != nil {
   793  		return err
   794  	}
   795  
   796  	// TODO(dfawley): should we be checking len(data) instead?
   797  	if len(payload) > *cs.callInfo.maxSendMessageSize {
   798  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
   799  	}
   800  	msgBytes := data // Store the pointer before setting to nil. For binary logging.
   801  	op := func(a *csAttempt) error {
   802  		err := a.sendMsg(m, hdr, payload, data)
   803  		// nil out the message and uncomp when replaying; they are only needed for
   804  		// stats which is disabled for subsequent attempts.
   805  		m, data = nil, nil
   806  		return err
   807  	}
   808  	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
   809  	if cs.binlog != nil && err == nil {
   810  		cs.binlog.Log(&binarylog.ClientMessage{
   811  			OnClientSide: true,
   812  			Message:      msgBytes,
   813  		})
   814  	}
   815  	return
   816  }
   817  
   818  func (cs *clientStream) RecvMsg(m interface{}) error {
   819  	if cs.binlog != nil && !cs.serverHeaderBinlogged {
   820  		// Call Header() to binary log header if it's not already logged.
   821  		cs.Header()
   822  	}
   823  	var recvInfo *payloadInfo
   824  	if cs.binlog != nil {
   825  		recvInfo = &payloadInfo{}
   826  	}
   827  	err := cs.withRetry(func(a *csAttempt) error {
   828  		return a.recvMsg(m, recvInfo)
   829  	}, cs.commitAttemptLocked)
   830  	if cs.binlog != nil && err == nil {
   831  		cs.binlog.Log(&binarylog.ServerMessage{
   832  			OnClientSide: true,
   833  			Message:      recvInfo.uncompressedBytes,
   834  		})
   835  	}
   836  	if err != nil || !cs.desc.ServerStreams {
   837  		// err != nil or non-server-streaming indicates end of stream.
   838  		cs.finish(err)
   839  
   840  		if cs.binlog != nil {
   841  			// finish will not log Trailer. Log Trailer here.
   842  			logEntry := &binarylog.ServerTrailer{
   843  				OnClientSide: true,
   844  				Trailer:      cs.Trailer(),
   845  				Err:          err,
   846  			}
   847  			if logEntry.Err == io.EOF {
   848  				logEntry.Err = nil
   849  			}
   850  			if peer, ok := peer.FromContext(cs.Context()); ok {
   851  				logEntry.PeerAddr = peer.Addr
   852  			}
   853  			cs.binlog.Log(logEntry)
   854  		}
   855  	}
   856  	return err
   857  }
   858  
   859  func (cs *clientStream) CloseSend() error {
   860  	if cs.sentLast {
   861  		// TODO: return an error and finish the stream instead, due to API misuse?
   862  		return nil
   863  	}
   864  	cs.sentLast = true
   865  	op := func(a *csAttempt) error {
   866  		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
   867  		// Always return nil; io.EOF is the only error that might make sense
   868  		// instead, but there is no need to signal the client to call RecvMsg
   869  		// as the only use left for the stream after CloseSend is to call
   870  		// RecvMsg.  This also matches historical behavior.
   871  		return nil
   872  	}
   873  	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
   874  	if cs.binlog != nil {
   875  		cs.binlog.Log(&binarylog.ClientHalfClose{
   876  			OnClientSide: true,
   877  		})
   878  	}
   879  	// We never returned an error here for reasons.
   880  	return nil
   881  }
   882  
   883  func (cs *clientStream) finish(err error) {
   884  	if err == io.EOF {
   885  		// Ending a stream with EOF indicates a success.
   886  		err = nil
   887  	}
   888  	cs.mu.Lock()
   889  	if cs.finished {
   890  		cs.mu.Unlock()
   891  		return
   892  	}
   893  	cs.finished = true
   894  	cs.commitAttemptLocked()
   895  	if cs.attempt != nil {
   896  		cs.attempt.finish(err)
   897  		// after functions all rely upon having a stream.
   898  		if cs.attempt.s != nil {
   899  			for _, o := range cs.opts {
   900  				o.after(cs.callInfo, cs.attempt)
   901  			}
   902  		}
   903  	}
   904  	cs.mu.Unlock()
   905  	// For binary logging. only log cancel in finish (could be caused by RPC ctx
   906  	// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
   907  	//
   908  	// Only one of cancel or trailer needs to be logged. In the cases where
   909  	// users don't call RecvMsg, users must have already canceled the RPC.
   910  	if cs.binlog != nil && status.Code(err) == codes.Canceled {
   911  		cs.binlog.Log(&binarylog.Cancel{
   912  			OnClientSide: true,
   913  		})
   914  	}
   915  	if err == nil {
   916  		cs.retryThrottler.successfulRPC()
   917  	}
   918  	if channelz.IsOn() {
   919  		if err != nil {
   920  			cs.cc.incrCallsFailed()
   921  		} else {
   922  			cs.cc.incrCallsSucceeded()
   923  		}
   924  	}
   925  	cs.cancel()
   926  }
   927  
   928  func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
   929  	cs := a.cs
   930  	if a.trInfo != nil {
   931  		a.mu.Lock()
   932  		if a.trInfo.tr != nil {
   933  			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
   934  		}
   935  		a.mu.Unlock()
   936  	}
   937  	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
   938  		if !cs.desc.ClientStreams {
   939  			// For non-client-streaming RPCs, we return nil instead of EOF on error
   940  			// because the generated code requires it.  finish is not called; RecvMsg()
   941  			// will call it with the stream's status independently.
   942  			return nil
   943  		}
   944  		return io.EOF
   945  	}
   946  	if a.statsHandler != nil {
   947  		a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
   948  	}
   949  	if channelz.IsOn() {
   950  		a.t.IncrMsgSent()
   951  	}
   952  	return nil
   953  }
   954  
   955  func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
   956  	cs := a.cs
   957  	if a.statsHandler != nil && payInfo == nil {
   958  		payInfo = &payloadInfo{}
   959  	}
   960  
   961  	if !a.decompSet {
   962  		// Block until we receive headers containing received message encoding.
   963  		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
   964  			if a.dc == nil || a.dc.Type() != ct {
   965  				// No configured decompressor, or it does not match the incoming
   966  				// message encoding; attempt to find a registered compressor that does.
   967  				a.dc = nil
   968  				a.decomp = encoding.GetCompressor(ct)
   969  			}
   970  		} else {
   971  			// No compression is used; disable our decompressor.
   972  			a.dc = nil
   973  		}
   974  		// Only initialize this state once per stream.
   975  		a.decompSet = true
   976  	}
   977  	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
   978  	if err != nil {
   979  		if err == io.EOF {
   980  			if statusErr := a.s.Status().Err(); statusErr != nil {
   981  				return statusErr
   982  			}
   983  			return io.EOF // indicates successful end of stream.
   984  		}
   985  		return toRPCErr(err)
   986  	}
   987  	if a.trInfo != nil {
   988  		a.mu.Lock()
   989  		if a.trInfo.tr != nil {
   990  			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
   991  		}
   992  		a.mu.Unlock()
   993  	}
   994  	if a.statsHandler != nil {
   995  		a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
   996  			Client:   true,
   997  			RecvTime: time.Now(),
   998  			Payload:  m,
   999  			// TODO truncate large payload.
  1000  			Data:       payInfo.uncompressedBytes,
  1001  			WireLength: payInfo.wireLength + headerLen,
  1002  			Length:     len(payInfo.uncompressedBytes),
  1003  		})
  1004  	}
  1005  	if channelz.IsOn() {
  1006  		a.t.IncrMsgRecv()
  1007  	}
  1008  	if cs.desc.ServerStreams {
  1009  		// Subsequent messages should be received by subsequent RecvMsg calls.
  1010  		return nil
  1011  	}
  1012  	// Special handling for non-server-stream rpcs.
  1013  	// This recv expects EOF or errors, so we don't collect inPayload.
  1014  	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
  1015  	if err == nil {
  1016  		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1017  	}
  1018  	if err == io.EOF {
  1019  		return a.s.Status().Err() // non-server streaming Recv returns nil on success
  1020  	}
  1021  	return toRPCErr(err)
  1022  }
  1023  
  1024  func (a *csAttempt) finish(err error) {
  1025  	a.mu.Lock()
  1026  	if a.finished {
  1027  		a.mu.Unlock()
  1028  		return
  1029  	}
  1030  	a.finished = true
  1031  	if err == io.EOF {
  1032  		// Ending a stream with EOF indicates a success.
  1033  		err = nil
  1034  	}
  1035  	var tr metadata.MD
  1036  	if a.s != nil {
  1037  		a.t.CloseStream(a.s, err)
  1038  		tr = a.s.Trailer()
  1039  	}
  1040  
  1041  	if a.done != nil {
  1042  		br := false
  1043  		if a.s != nil {
  1044  			br = a.s.BytesReceived()
  1045  		}
  1046  		a.done(balancer.DoneInfo{
  1047  			Err:           err,
  1048  			Trailer:       tr,
  1049  			BytesSent:     a.s != nil,
  1050  			BytesReceived: br,
  1051  			ServerLoad:    balancerload.Parse(tr),
  1052  		})
  1053  	}
  1054  	if a.statsHandler != nil {
  1055  		end := &stats.End{
  1056  			Client:    true,
  1057  			BeginTime: a.beginTime,
  1058  			EndTime:   time.Now(),
  1059  			Trailer:   tr,
  1060  			Error:     err,
  1061  		}
  1062  		a.statsHandler.HandleRPC(a.ctx, end)
  1063  	}
  1064  	if a.trInfo != nil && a.trInfo.tr != nil {
  1065  		if err == nil {
  1066  			a.trInfo.tr.LazyPrintf("RPC: [OK]")
  1067  		} else {
  1068  			a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
  1069  			a.trInfo.tr.SetError()
  1070  		}
  1071  		a.trInfo.tr.Finish()
  1072  		a.trInfo.tr = nil
  1073  	}
  1074  	a.mu.Unlock()
  1075  }
  1076  
  1077  // newClientStream creates a ClientStream with the specified transport, on the
  1078  // given addrConn.
  1079  //
  1080  // It's expected that the given transport is either the same one in addrConn, or
  1081  // is already closed. To avoid race, transport is specified separately, instead
  1082  // of using ac.transpot.
  1083  //
  1084  // Main difference between this and ClientConn.NewStream:
  1085  // - no retry
  1086  // - no service config (or wait for service config)
  1087  // - no tracing or stats
  1088  func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
  1089  	if t == nil {
  1090  		// TODO: return RPC error here?
  1091  		return nil, errors.New("transport provided is nil")
  1092  	}
  1093  	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
  1094  	c := &callInfo{}
  1095  
  1096  	// Possible context leak:
  1097  	// The cancel function for the child context we create will only be called
  1098  	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
  1099  	// an error is generated by SendMsg.
  1100  	// https://github.com/grpc/grpc-go/issues/1818.
  1101  	ctx, cancel := context.WithCancel(ctx)
  1102  	defer func() {
  1103  		if err != nil {
  1104  			cancel()
  1105  		}
  1106  	}()
  1107  
  1108  	for _, o := range opts {
  1109  		if err := o.before(c); err != nil {
  1110  			return nil, toRPCErr(err)
  1111  		}
  1112  	}
  1113  	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
  1114  	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
  1115  	if err := setCallInfoCodec(c); err != nil {
  1116  		return nil, err
  1117  	}
  1118  
  1119  	callHdr := &transport.CallHdr{
  1120  		Host:           ac.cc.authority,
  1121  		Method:         method,
  1122  		ContentSubtype: c.contentSubtype,
  1123  	}
  1124  
  1125  	// Set our outgoing compression according to the UseCompressor CallOption, if
  1126  	// set.  In that case, also find the compressor from the encoding package.
  1127  	// Otherwise, use the compressor configured by the WithCompressor DialOption,
  1128  	// if set.
  1129  	var cp Compressor
  1130  	var comp encoding.Compressor
  1131  	if ct := c.compressorType; ct != "" {
  1132  		callHdr.SendCompress = ct
  1133  		if ct != encoding.Identity {
  1134  			comp = encoding.GetCompressor(ct)
  1135  			if comp == nil {
  1136  				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
  1137  			}
  1138  		}
  1139  	} else if ac.cc.dopts.cp != nil {
  1140  		callHdr.SendCompress = ac.cc.dopts.cp.Type()
  1141  		cp = ac.cc.dopts.cp
  1142  	}
  1143  	if c.creds != nil {
  1144  		callHdr.Creds = c.creds
  1145  	}
  1146  
  1147  	// Use a special addrConnStream to avoid retry.
  1148  	as := &addrConnStream{
  1149  		callHdr:  callHdr,
  1150  		ac:       ac,
  1151  		ctx:      ctx,
  1152  		cancel:   cancel,
  1153  		opts:     opts,
  1154  		callInfo: c,
  1155  		desc:     desc,
  1156  		codec:    c.codec,
  1157  		cp:       cp,
  1158  		comp:     comp,
  1159  		t:        t,
  1160  	}
  1161  
  1162  	s, err := as.t.NewStream(as.ctx, as.callHdr)
  1163  	if err != nil {
  1164  		err = toRPCErr(err)
  1165  		return nil, err
  1166  	}
  1167  	as.s = s
  1168  	as.p = &parser{r: s}
  1169  	ac.incrCallsStarted()
  1170  	if desc != unaryStreamDesc {
  1171  		// Listen on cc and stream contexts to cleanup when the user closes the
  1172  		// ClientConn or cancels the stream context.  In all other cases, an error
  1173  		// should already be injected into the recv buffer by the transport, which
  1174  		// the client will eventually receive, and then we will cancel the stream's
  1175  		// context in clientStream.finish.
  1176  		go func() {
  1177  			select {
  1178  			case <-ac.ctx.Done():
  1179  				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
  1180  			case <-ctx.Done():
  1181  				as.finish(toRPCErr(ctx.Err()))
  1182  			}
  1183  		}()
  1184  	}
  1185  	return as, nil
  1186  }
  1187  
  1188  type addrConnStream struct {
  1189  	s         *transport.Stream
  1190  	ac        *addrConn
  1191  	callHdr   *transport.CallHdr
  1192  	cancel    context.CancelFunc
  1193  	opts      []CallOption
  1194  	callInfo  *callInfo
  1195  	t         transport.ClientTransport
  1196  	ctx       context.Context
  1197  	sentLast  bool
  1198  	desc      *StreamDesc
  1199  	codec     baseCodec
  1200  	cp        Compressor
  1201  	comp      encoding.Compressor
  1202  	decompSet bool
  1203  	dc        Decompressor
  1204  	decomp    encoding.Compressor
  1205  	p         *parser
  1206  	mu        sync.Mutex
  1207  	finished  bool
  1208  }
  1209  
  1210  func (as *addrConnStream) Header() (metadata.MD, error) {
  1211  	m, err := as.s.Header()
  1212  	if err != nil {
  1213  		as.finish(toRPCErr(err))
  1214  	}
  1215  	return m, err
  1216  }
  1217  
  1218  func (as *addrConnStream) Trailer() metadata.MD {
  1219  	return as.s.Trailer()
  1220  }
  1221  
  1222  func (as *addrConnStream) CloseSend() error {
  1223  	if as.sentLast {
  1224  		// TODO: return an error and finish the stream instead, due to API misuse?
  1225  		return nil
  1226  	}
  1227  	as.sentLast = true
  1228  
  1229  	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
  1230  	// Always return nil; io.EOF is the only error that might make sense
  1231  	// instead, but there is no need to signal the client to call RecvMsg
  1232  	// as the only use left for the stream after CloseSend is to call
  1233  	// RecvMsg.  This also matches historical behavior.
  1234  	return nil
  1235  }
  1236  
  1237  func (as *addrConnStream) Context() context.Context {
  1238  	return as.s.Context()
  1239  }
  1240  
  1241  func (as *addrConnStream) SendMsg(m interface{}) (err error) {
  1242  	defer func() {
  1243  		if err != nil && err != io.EOF {
  1244  			// Call finish on the client stream for errors generated by this SendMsg
  1245  			// call, as these indicate problems created by this client.  (Transport
  1246  			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
  1247  			// error will be returned from RecvMsg eventually in that case, or be
  1248  			// retried.)
  1249  			as.finish(err)
  1250  		}
  1251  	}()
  1252  	if as.sentLast {
  1253  		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
  1254  	}
  1255  	if !as.desc.ClientStreams {
  1256  		as.sentLast = true
  1257  	}
  1258  
  1259  	// load hdr, payload, data
  1260  	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
  1261  	if err != nil {
  1262  		return err
  1263  	}
  1264  
  1265  	// TODO(dfawley): should we be checking len(data) instead?
  1266  	if len(payld) > *as.callInfo.maxSendMessageSize {
  1267  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
  1268  	}
  1269  
  1270  	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
  1271  		if !as.desc.ClientStreams {
  1272  			// For non-client-streaming RPCs, we return nil instead of EOF on error
  1273  			// because the generated code requires it.  finish is not called; RecvMsg()
  1274  			// will call it with the stream's status independently.
  1275  			return nil
  1276  		}
  1277  		return io.EOF
  1278  	}
  1279  
  1280  	if channelz.IsOn() {
  1281  		as.t.IncrMsgSent()
  1282  	}
  1283  	return nil
  1284  }
  1285  
  1286  func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
  1287  	defer func() {
  1288  		if err != nil || !as.desc.ServerStreams {
  1289  			// err != nil or non-server-streaming indicates end of stream.
  1290  			as.finish(err)
  1291  		}
  1292  	}()
  1293  
  1294  	if !as.decompSet {
  1295  		// Block until we receive headers containing received message encoding.
  1296  		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
  1297  			if as.dc == nil || as.dc.Type() != ct {
  1298  				// No configured decompressor, or it does not match the incoming
  1299  				// message encoding; attempt to find a registered compressor that does.
  1300  				as.dc = nil
  1301  				as.decomp = encoding.GetCompressor(ct)
  1302  			}
  1303  		} else {
  1304  			// No compression is used; disable our decompressor.
  1305  			as.dc = nil
  1306  		}
  1307  		// Only initialize this state once per stream.
  1308  		as.decompSet = true
  1309  	}
  1310  	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1311  	if err != nil {
  1312  		if err == io.EOF {
  1313  			if statusErr := as.s.Status().Err(); statusErr != nil {
  1314  				return statusErr
  1315  			}
  1316  			return io.EOF // indicates successful end of stream.
  1317  		}
  1318  		return toRPCErr(err)
  1319  	}
  1320  
  1321  	if channelz.IsOn() {
  1322  		as.t.IncrMsgRecv()
  1323  	}
  1324  	if as.desc.ServerStreams {
  1325  		// Subsequent messages should be received by subsequent RecvMsg calls.
  1326  		return nil
  1327  	}
  1328  
  1329  	// Special handling for non-server-stream rpcs.
  1330  	// This recv expects EOF or errors, so we don't collect inPayload.
  1331  	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
  1332  	if err == nil {
  1333  		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
  1334  	}
  1335  	if err == io.EOF {
  1336  		return as.s.Status().Err() // non-server streaming Recv returns nil on success
  1337  	}
  1338  	return toRPCErr(err)
  1339  }
  1340  
  1341  func (as *addrConnStream) finish(err error) {
  1342  	as.mu.Lock()
  1343  	if as.finished {
  1344  		as.mu.Unlock()
  1345  		return
  1346  	}
  1347  	as.finished = true
  1348  	if err == io.EOF {
  1349  		// Ending a stream with EOF indicates a success.
  1350  		err = nil
  1351  	}
  1352  	if as.s != nil {
  1353  		as.t.CloseStream(as.s, err)
  1354  	}
  1355  
  1356  	if err != nil {
  1357  		as.ac.incrCallsFailed()
  1358  	} else {
  1359  		as.ac.incrCallsSucceeded()
  1360  	}
  1361  	as.cancel()
  1362  	as.mu.Unlock()
  1363  }
  1364  
  1365  // ServerStream defines the server-side behavior of a streaming RPC.
  1366  //
  1367  // All errors returned from ServerStream methods are compatible with the
  1368  // status package.
  1369  type ServerStream interface {
  1370  	// SetHeader sets the header metadata. It may be called multiple times.
  1371  	// When call multiple times, all the provided metadata will be merged.
  1372  	// All the metadata will be sent out when one of the following happens:
  1373  	//  - ServerStream.SendHeader() is called;
  1374  	//  - The first response is sent out;
  1375  	//  - An RPC status is sent out (error or success).
  1376  	SetHeader(metadata.MD) error
  1377  	// SendHeader sends the header metadata.
  1378  	// The provided md and headers set by SetHeader() will be sent.
  1379  	// It fails if called multiple times.
  1380  	SendHeader(metadata.MD) error
  1381  	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
  1382  	// When called more than once, all the provided metadata will be merged.
  1383  	SetTrailer(metadata.MD)
  1384  	// Context returns the context for this stream.
  1385  	Context() context.Context
  1386  	// SendMsg sends a message. On error, SendMsg aborts the stream and the
  1387  	// error is returned directly.
  1388  	//
  1389  	// SendMsg blocks until:
  1390  	//   - There is sufficient flow control to schedule m with the transport, or
  1391  	//   - The stream is done, or
  1392  	//   - The stream breaks.
  1393  	//
  1394  	// SendMsg does not wait until the message is received by the client. An
  1395  	// untimely stream closure may result in lost messages.
  1396  	//
  1397  	// It is safe to have a goroutine calling SendMsg and another goroutine
  1398  	// calling RecvMsg on the same stream at the same time, but it is not safe
  1399  	// to call SendMsg on the same stream in different goroutines.
  1400  	SendMsg(m interface{}) error
  1401  	// RecvMsg blocks until it receives a message into m or the stream is
  1402  	// done. It returns io.EOF when the client has performed a CloseSend. On
  1403  	// any non-EOF error, the stream is aborted and the error contains the
  1404  	// RPC status.
  1405  	//
  1406  	// It is safe to have a goroutine calling SendMsg and another goroutine
  1407  	// calling RecvMsg on the same stream at the same time, but it is not
  1408  	// safe to call RecvMsg on the same stream in different goroutines.
  1409  	RecvMsg(m interface{}) error
  1410  }
  1411  
  1412  // serverStream implements a server side Stream.
  1413  type serverStream struct {
  1414  	ctx   context.Context
  1415  	t     transport.ServerTransport
  1416  	s     *transport.Stream
  1417  	p     *parser
  1418  	codec baseCodec
  1419  
  1420  	cp     Compressor
  1421  	dc     Decompressor
  1422  	comp   encoding.Compressor
  1423  	decomp encoding.Compressor
  1424  
  1425  	maxReceiveMessageSize int
  1426  	maxSendMessageSize    int
  1427  	trInfo                *traceInfo
  1428  
  1429  	statsHandler stats.Handler
  1430  
  1431  	binlog *binarylog.MethodLogger
  1432  	// serverHeaderBinlogged indicates whether server header has been logged. It
  1433  	// will happen when one of the following two happens: stream.SendHeader(),
  1434  	// stream.Send().
  1435  	//
  1436  	// It's only checked in send and sendHeader, doesn't need to be
  1437  	// synchronized.
  1438  	serverHeaderBinlogged bool
  1439  
  1440  	mu sync.Mutex // protects trInfo.tr after the service handler runs.
  1441  }
  1442  
  1443  func (ss *serverStream) Context() context.Context {
  1444  	return ss.ctx
  1445  }
  1446  
  1447  func (ss *serverStream) SetHeader(md metadata.MD) error {
  1448  	if md.Len() == 0 {
  1449  		return nil
  1450  	}
  1451  	return ss.s.SetHeader(md)
  1452  }
  1453  
  1454  func (ss *serverStream) SendHeader(md metadata.MD) error {
  1455  	err := ss.t.WriteHeader(ss.s, md)
  1456  	if ss.binlog != nil && !ss.serverHeaderBinlogged {
  1457  		h, _ := ss.s.Header()
  1458  		ss.binlog.Log(&binarylog.ServerHeader{
  1459  			Header: h,
  1460  		})
  1461  		ss.serverHeaderBinlogged = true
  1462  	}
  1463  	return err
  1464  }
  1465  
  1466  func (ss *serverStream) SetTrailer(md metadata.MD) {
  1467  	if md.Len() == 0 {
  1468  		return
  1469  	}
  1470  	ss.s.SetTrailer(md)
  1471  }
  1472  
  1473  func (ss *serverStream) SendMsg(m interface{}) (err error) {
  1474  	defer func() {
  1475  		if ss.trInfo != nil {
  1476  			ss.mu.Lock()
  1477  			if ss.trInfo.tr != nil {
  1478  				if err == nil {
  1479  					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
  1480  				} else {
  1481  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1482  					ss.trInfo.tr.SetError()
  1483  				}
  1484  			}
  1485  			ss.mu.Unlock()
  1486  		}
  1487  		if err != nil && err != io.EOF {
  1488  			st, _ := status.FromError(toRPCErr(err))
  1489  			ss.t.WriteStatus(ss.s, st)
  1490  			// Non-user specified status was sent out. This should be an error
  1491  			// case (as a server side Cancel maybe).
  1492  			//
  1493  			// This is not handled specifically now. User will return a final
  1494  			// status from the service handler, we will log that error instead.
  1495  			// This behavior is similar to an interceptor.
  1496  		}
  1497  		if channelz.IsOn() && err == nil {
  1498  			ss.t.IncrMsgSent()
  1499  		}
  1500  	}()
  1501  
  1502  	// load hdr, payload, data
  1503  	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
  1504  	if err != nil {
  1505  		return err
  1506  	}
  1507  
  1508  	// TODO(dfawley): should we be checking len(data) instead?
  1509  	if len(payload) > ss.maxSendMessageSize {
  1510  		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
  1511  	}
  1512  	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
  1513  		return toRPCErr(err)
  1514  	}
  1515  	if ss.binlog != nil {
  1516  		if !ss.serverHeaderBinlogged {
  1517  			h, _ := ss.s.Header()
  1518  			ss.binlog.Log(&binarylog.ServerHeader{
  1519  				Header: h,
  1520  			})
  1521  			ss.serverHeaderBinlogged = true
  1522  		}
  1523  		ss.binlog.Log(&binarylog.ServerMessage{
  1524  			Message: data,
  1525  		})
  1526  	}
  1527  	if ss.statsHandler != nil {
  1528  		ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
  1529  	}
  1530  	return nil
  1531  }
  1532  
  1533  func (ss *serverStream) RecvMsg(m interface{}) (err error) {
  1534  	defer func() {
  1535  		if ss.trInfo != nil {
  1536  			ss.mu.Lock()
  1537  			if ss.trInfo.tr != nil {
  1538  				if err == nil {
  1539  					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
  1540  				} else if err != io.EOF {
  1541  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1542  					ss.trInfo.tr.SetError()
  1543  				}
  1544  			}
  1545  			ss.mu.Unlock()
  1546  		}
  1547  		if err != nil && err != io.EOF {
  1548  			st, _ := status.FromError(toRPCErr(err))
  1549  			ss.t.WriteStatus(ss.s, st)
  1550  			// Non-user specified status was sent out. This should be an error
  1551  			// case (as a server side Cancel maybe).
  1552  			//
  1553  			// This is not handled specifically now. User will return a final
  1554  			// status from the service handler, we will log that error instead.
  1555  			// This behavior is similar to an interceptor.
  1556  		}
  1557  		if channelz.IsOn() && err == nil {
  1558  			ss.t.IncrMsgRecv()
  1559  		}
  1560  	}()
  1561  	var payInfo *payloadInfo
  1562  	if ss.statsHandler != nil || ss.binlog != nil {
  1563  		payInfo = &payloadInfo{}
  1564  	}
  1565  	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
  1566  		if err == io.EOF {
  1567  			if ss.binlog != nil {
  1568  				ss.binlog.Log(&binarylog.ClientHalfClose{})
  1569  			}
  1570  			return err
  1571  		}
  1572  		if err == io.ErrUnexpectedEOF {
  1573  			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
  1574  		}
  1575  		return toRPCErr(err)
  1576  	}
  1577  	if ss.statsHandler != nil {
  1578  		ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
  1579  			RecvTime: time.Now(),
  1580  			Payload:  m,
  1581  			// TODO truncate large payload.
  1582  			Data:       payInfo.uncompressedBytes,
  1583  			WireLength: payInfo.wireLength + headerLen,
  1584  			Length:     len(payInfo.uncompressedBytes),
  1585  		})
  1586  	}
  1587  	if ss.binlog != nil {
  1588  		ss.binlog.Log(&binarylog.ClientMessage{
  1589  			Message: payInfo.uncompressedBytes,
  1590  		})
  1591  	}
  1592  	return nil
  1593  }
  1594  
  1595  // MethodFromServerStream returns the method string for the input stream.
  1596  // The returned string is in the format of "/service/method".
  1597  func MethodFromServerStream(stream ServerStream) (string, bool) {
  1598  	return Method(stream.Context())
  1599  }
  1600  
  1601  // prepareMsg returns the hdr, payload and data
  1602  // using the compressors passed or using the
  1603  // passed preparedmsg
  1604  func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
  1605  	if preparedMsg, ok := m.(*PreparedMsg); ok {
  1606  		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
  1607  	}
  1608  	// The input interface is not a prepared msg.
  1609  	// Marshal and Compress the data at this point
  1610  	data, err = encode(codec, m)
  1611  	if err != nil {
  1612  		return nil, nil, nil, err
  1613  	}
  1614  	compData, err := compress(data, cp, comp)
  1615  	if err != nil {
  1616  		return nil, nil, nil, err
  1617  	}
  1618  	hdr, payload = msgHeader(data, compData)
  1619  	return hdr, payload, data, nil
  1620  }