google.golang.org/grpc@v1.74.2/server.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package grpc
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"math"
    27  	"net"
    28  	"net/http"
    29  	"reflect"
    30  	"runtime"
    31  	"strings"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"google.golang.org/grpc/codes"
    37  	"google.golang.org/grpc/credentials"
    38  	"google.golang.org/grpc/encoding"
    39  	"google.golang.org/grpc/encoding/proto"
    40  	estats "google.golang.org/grpc/experimental/stats"
    41  	"google.golang.org/grpc/grpclog"
    42  	"google.golang.org/grpc/internal"
    43  	"google.golang.org/grpc/internal/binarylog"
    44  	"google.golang.org/grpc/internal/channelz"
    45  	"google.golang.org/grpc/internal/grpcsync"
    46  	"google.golang.org/grpc/internal/grpcutil"
    47  	istats "google.golang.org/grpc/internal/stats"
    48  	"google.golang.org/grpc/internal/transport"
    49  	"google.golang.org/grpc/keepalive"
    50  	"google.golang.org/grpc/mem"
    51  	"google.golang.org/grpc/metadata"
    52  	"google.golang.org/grpc/peer"
    53  	"google.golang.org/grpc/stats"
    54  	"google.golang.org/grpc/status"
    55  	"google.golang.org/grpc/tap"
    56  )
    57  
    58  const (
    59  	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
    60  	defaultServerMaxSendMessageSize    = math.MaxInt32
    61  
    62  	// Server transports are tracked in a map which is keyed on listener
    63  	// address. For regular gRPC traffic, connections are accepted in Serve()
    64  	// through a call to Accept(), and we use the actual listener address as key
    65  	// when we add it to the map. But for connections received through
    66  	// ServeHTTP(), we do not have a listener and hence use this dummy value.
    67  	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
    68  )
    69  
    70  func init() {
    71  	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
    72  		return srv.opts.creds
    73  	}
    74  	internal.IsRegisteredMethod = func(srv *Server, method string) bool {
    75  		return srv.isRegisteredMethod(method)
    76  	}
    77  	internal.ServerFromContext = serverFromContext
    78  	internal.AddGlobalServerOptions = func(opt ...ServerOption) {
    79  		globalServerOptions = append(globalServerOptions, opt...)
    80  	}
    81  	internal.ClearGlobalServerOptions = func() {
    82  		globalServerOptions = nil
    83  	}
    84  	internal.BinaryLogger = binaryLogger
    85  	internal.JoinServerOptions = newJoinServerOption
    86  	internal.BufferPool = bufferPool
    87  	internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
    88  		return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
    89  	}
    90  }
    91  
    92  var statusOK = status.New(codes.OK, "")
    93  var logger = grpclog.Component("core")
    94  
    95  // MethodHandler is a function type that processes a unary RPC method call.
    96  type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
    97  
    98  // MethodDesc represents an RPC service's method specification.
    99  type MethodDesc struct {
   100  	MethodName string
   101  	Handler    MethodHandler
   102  }
   103  
   104  // ServiceDesc represents an RPC service's specification.
   105  type ServiceDesc struct {
   106  	ServiceName string
   107  	// The pointer to the service interface. Used to check whether the user
   108  	// provided implementation satisfies the interface requirements.
   109  	HandlerType any
   110  	Methods     []MethodDesc
   111  	Streams     []StreamDesc
   112  	Metadata    any
   113  }
   114  
   115  // serviceInfo wraps information about a service. It is very similar to
   116  // ServiceDesc and is constructed from it for internal purposes.
   117  type serviceInfo struct {
   118  	// Contains the implementation for the methods in this service.
   119  	serviceImpl any
   120  	methods     map[string]*MethodDesc
   121  	streams     map[string]*StreamDesc
   122  	mdata       any
   123  }
   124  
   125  // Server is a gRPC server to serve RPC requests.
   126  type Server struct {
   127  	opts serverOptions
   128  
   129  	mu  sync.Mutex // guards following
   130  	lis map[net.Listener]bool
   131  	// conns contains all active server transports. It is a map keyed on a
   132  	// listener address with the value being the set of active transports
   133  	// belonging to that listener.
   134  	conns    map[string]map[transport.ServerTransport]bool
   135  	serve    bool
   136  	drain    bool
   137  	cv       *sync.Cond              // signaled when connections close for GracefulStop
   138  	services map[string]*serviceInfo // service name -> service info
   139  	events   traceEventLog
   140  
   141  	quit               *grpcsync.Event
   142  	done               *grpcsync.Event
   143  	channelzRemoveOnce sync.Once
   144  	serveWG            sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
   145  	handlersWG         sync.WaitGroup // counts active method handler goroutines
   146  
   147  	channelz *channelz.Server
   148  
   149  	serverWorkerChannel      chan func()
   150  	serverWorkerChannelClose func()
   151  }
   152  
   153  type serverOptions struct {
   154  	creds                 credentials.TransportCredentials
   155  	codec                 baseCodec
   156  	cp                    Compressor
   157  	dc                    Decompressor
   158  	unaryInt              UnaryServerInterceptor
   159  	streamInt             StreamServerInterceptor
   160  	chainUnaryInts        []UnaryServerInterceptor
   161  	chainStreamInts       []StreamServerInterceptor
   162  	binaryLogger          binarylog.Logger
   163  	inTapHandle           tap.ServerInHandle
   164  	statsHandlers         []stats.Handler
   165  	maxConcurrentStreams  uint32
   166  	maxReceiveMessageSize int
   167  	maxSendMessageSize    int
   168  	unknownStreamDesc     *StreamDesc
   169  	keepaliveParams       keepalive.ServerParameters
   170  	keepalivePolicy       keepalive.EnforcementPolicy
   171  	initialWindowSize     int32
   172  	initialConnWindowSize int32
   173  	writeBufferSize       int
   174  	readBufferSize        int
   175  	sharedWriteBuffer     bool
   176  	connectionTimeout     time.Duration
   177  	maxHeaderListSize     *uint32
   178  	headerTableSize       *uint32
   179  	numServerWorkers      uint32
   180  	bufferPool            mem.BufferPool
   181  	waitForHandlers       bool
   182  	staticWindowSize      bool
   183  }
   184  
   185  var defaultServerOptions = serverOptions{
   186  	maxConcurrentStreams:  math.MaxUint32,
   187  	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
   188  	maxSendMessageSize:    defaultServerMaxSendMessageSize,
   189  	connectionTimeout:     120 * time.Second,
   190  	writeBufferSize:       defaultWriteBufSize,
   191  	readBufferSize:        defaultReadBufSize,
   192  	bufferPool:            mem.DefaultBufferPool(),
   193  }
   194  var globalServerOptions []ServerOption
   195  
   196  // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
   197  type ServerOption interface {
   198  	apply(*serverOptions)
   199  }
   200  
   201  // EmptyServerOption does not alter the server configuration. It can be embedded
   202  // in another structure to build custom server options.
   203  //
   204  // # Experimental
   205  //
   206  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
   207  // later release.
   208  type EmptyServerOption struct{}
   209  
   210  func (EmptyServerOption) apply(*serverOptions) {}
   211  
   212  // funcServerOption wraps a function that modifies serverOptions into an
   213  // implementation of the ServerOption interface.
   214  type funcServerOption struct {
   215  	f func(*serverOptions)
   216  }
   217  
   218  func (fdo *funcServerOption) apply(do *serverOptions) {
   219  	fdo.f(do)
   220  }
   221  
   222  func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
   223  	return &funcServerOption{
   224  		f: f,
   225  	}
   226  }
   227  
   228  // joinServerOption provides a way to combine arbitrary number of server
   229  // options into one.
   230  type joinServerOption struct {
   231  	opts []ServerOption
   232  }
   233  
   234  func (mdo *joinServerOption) apply(do *serverOptions) {
   235  	for _, opt := range mdo.opts {
   236  		opt.apply(do)
   237  	}
   238  }
   239  
   240  func newJoinServerOption(opts ...ServerOption) ServerOption {
   241  	return &joinServerOption{opts: opts}
   242  }
   243  
   244  // SharedWriteBuffer allows reusing per-connection transport write buffer.
   245  // If this option is set to true every connection will release the buffer after
   246  // flushing the data on the wire.
   247  //
   248  // # Experimental
   249  //
   250  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   251  // later release.
   252  func SharedWriteBuffer(val bool) ServerOption {
   253  	return newFuncServerOption(func(o *serverOptions) {
   254  		o.sharedWriteBuffer = val
   255  	})
   256  }
   257  
   258  // WriteBufferSize determines how much data can be batched before doing a write
   259  // on the wire. The default value for this buffer is 32KB. Zero or negative
   260  // values will disable the write buffer such that each write will be on underlying
   261  // connection. Note: A Send call may not directly translate to a write.
   262  func WriteBufferSize(s int) ServerOption {
   263  	return newFuncServerOption(func(o *serverOptions) {
   264  		o.writeBufferSize = s
   265  	})
   266  }
   267  
   268  // ReadBufferSize lets you set the size of read buffer, this determines how much
   269  // data can be read at most for one read syscall. The default value for this
   270  // buffer is 32KB. Zero or negative values will disable read buffer for a
   271  // connection so data framer can access the underlying conn directly.
   272  func ReadBufferSize(s int) ServerOption {
   273  	return newFuncServerOption(func(o *serverOptions) {
   274  		o.readBufferSize = s
   275  	})
   276  }
   277  
   278  // InitialWindowSize returns a ServerOption that sets window size for stream.
   279  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   280  func InitialWindowSize(s int32) ServerOption {
   281  	return newFuncServerOption(func(o *serverOptions) {
   282  		o.initialWindowSize = s
   283  		o.staticWindowSize = true
   284  	})
   285  }
   286  
   287  // InitialConnWindowSize returns a ServerOption that sets window size for a connection.
   288  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   289  func InitialConnWindowSize(s int32) ServerOption {
   290  	return newFuncServerOption(func(o *serverOptions) {
   291  		o.initialConnWindowSize = s
   292  		o.staticWindowSize = true
   293  	})
   294  }
   295  
   296  // StaticStreamWindowSize returns a ServerOption to set the initial stream
   297  // window size to the value provided and disables dynamic flow control.
   298  // The lower bound for window size is 64K and any value smaller than that
   299  // will be ignored.
   300  func StaticStreamWindowSize(s int32) ServerOption {
   301  	return newFuncServerOption(func(o *serverOptions) {
   302  		o.initialWindowSize = s
   303  		o.staticWindowSize = true
   304  	})
   305  }
   306  
   307  // StaticConnWindowSize returns a ServerOption to set the initial connection
   308  // window size to the value provided and disables dynamic flow control.
   309  // The lower bound for window size is 64K and any value smaller than that
   310  // will be ignored.
   311  func StaticConnWindowSize(s int32) ServerOption {
   312  	return newFuncServerOption(func(o *serverOptions) {
   313  		o.initialConnWindowSize = s
   314  		o.staticWindowSize = true
   315  	})
   316  }
   317  
   318  // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
   319  func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
   320  	if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
   321  		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
   322  		kp.Time = internal.KeepaliveMinServerPingTime
   323  	}
   324  
   325  	return newFuncServerOption(func(o *serverOptions) {
   326  		o.keepaliveParams = kp
   327  	})
   328  }
   329  
   330  // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
   331  func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
   332  	return newFuncServerOption(func(o *serverOptions) {
   333  		o.keepalivePolicy = kep
   334  	})
   335  }
   336  
   337  // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
   338  //
   339  // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
   340  //
   341  // Deprecated: register codecs using encoding.RegisterCodec. The server will
   342  // automatically use registered codecs based on the incoming requests' headers.
   343  // See also
   344  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   345  // Will be supported throughout 1.x.
   346  func CustomCodec(codec Codec) ServerOption {
   347  	return newFuncServerOption(func(o *serverOptions) {
   348  		o.codec = newCodecV0Bridge(codec)
   349  	})
   350  }
   351  
   352  // ForceServerCodec returns a ServerOption that sets a codec for message
   353  // marshaling and unmarshaling.
   354  //
   355  // This will override any lookups by content-subtype for Codecs registered
   356  // with RegisterCodec.
   357  //
   358  // See Content-Type on
   359  // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
   360  // more details. Also see the documentation on RegisterCodec and
   361  // CallContentSubtype for more details on the interaction between encoding.Codec
   362  // and content-subtype.
   363  //
   364  // This function is provided for advanced users; prefer to register codecs
   365  // using encoding.RegisterCodec.
   366  // The server will automatically use registered codecs based on the incoming
   367  // requests' headers. See also
   368  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   369  // Will be supported throughout 1.x.
   370  //
   371  // # Experimental
   372  //
   373  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   374  // later release.
   375  func ForceServerCodec(codec encoding.Codec) ServerOption {
   376  	return newFuncServerOption(func(o *serverOptions) {
   377  		o.codec = newCodecV1Bridge(codec)
   378  	})
   379  }
   380  
   381  // ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
   382  // CodecV2 interface.
   383  //
   384  // Will be supported throughout 1.x.
   385  //
   386  // # Experimental
   387  //
   388  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   389  // later release.
   390  func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
   391  	return newFuncServerOption(func(o *serverOptions) {
   392  		o.codec = codecV2
   393  	})
   394  }
   395  
   396  // RPCCompressor returns a ServerOption that sets a compressor for outbound
   397  // messages.  For backward compatibility, all outbound messages will be sent
   398  // using this compressor, regardless of incoming message compression.  By
   399  // default, server messages will be sent using the same compressor with which
   400  // request messages were sent.
   401  //
   402  // Deprecated: use encoding.RegisterCompressor instead. Will be supported
   403  // throughout 1.x.
   404  func RPCCompressor(cp Compressor) ServerOption {
   405  	return newFuncServerOption(func(o *serverOptions) {
   406  		o.cp = cp
   407  	})
   408  }
   409  
   410  // RPCDecompressor returns a ServerOption that sets a decompressor for inbound
   411  // messages.  It has higher priority than decompressors registered via
   412  // encoding.RegisterCompressor.
   413  //
   414  // Deprecated: use encoding.RegisterCompressor instead. Will be supported
   415  // throughout 1.x.
   416  func RPCDecompressor(dc Decompressor) ServerOption {
   417  	return newFuncServerOption(func(o *serverOptions) {
   418  		o.dc = dc
   419  	})
   420  }
   421  
   422  // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   423  // If this is not set, gRPC uses the default limit.
   424  //
   425  // Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
   426  func MaxMsgSize(m int) ServerOption {
   427  	return MaxRecvMsgSize(m)
   428  }
   429  
   430  // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   431  // If this is not set, gRPC uses the default 4MB.
   432  func MaxRecvMsgSize(m int) ServerOption {
   433  	return newFuncServerOption(func(o *serverOptions) {
   434  		o.maxReceiveMessageSize = m
   435  	})
   436  }
   437  
   438  // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
   439  // If this is not set, gRPC uses the default `math.MaxInt32`.
   440  func MaxSendMsgSize(m int) ServerOption {
   441  	return newFuncServerOption(func(o *serverOptions) {
   442  		o.maxSendMessageSize = m
   443  	})
   444  }
   445  
   446  // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
   447  // of concurrent streams to each ServerTransport.
   448  func MaxConcurrentStreams(n uint32) ServerOption {
   449  	if n == 0 {
   450  		n = math.MaxUint32
   451  	}
   452  	return newFuncServerOption(func(o *serverOptions) {
   453  		o.maxConcurrentStreams = n
   454  	})
   455  }
   456  
   457  // Creds returns a ServerOption that sets credentials for server connections.
   458  func Creds(c credentials.TransportCredentials) ServerOption {
   459  	return newFuncServerOption(func(o *serverOptions) {
   460  		o.creds = c
   461  	})
   462  }
   463  
   464  // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
   465  // server. Only one unary interceptor can be installed. The construction of multiple
   466  // interceptors (e.g., chaining) can be implemented at the caller.
   467  func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
   468  	return newFuncServerOption(func(o *serverOptions) {
   469  		if o.unaryInt != nil {
   470  			panic("The unary server interceptor was already set and may not be reset.")
   471  		}
   472  		o.unaryInt = i
   473  	})
   474  }
   475  
   476  // ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
   477  // for unary RPCs. The first interceptor will be the outer most,
   478  // while the last interceptor will be the inner most wrapper around the real call.
   479  // All unary interceptors added by this method will be chained.
   480  func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
   481  	return newFuncServerOption(func(o *serverOptions) {
   482  		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
   483  	})
   484  }
   485  
   486  // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
   487  // server. Only one stream interceptor can be installed.
   488  func StreamInterceptor(i StreamServerInterceptor) ServerOption {
   489  	return newFuncServerOption(func(o *serverOptions) {
   490  		if o.streamInt != nil {
   491  			panic("The stream server interceptor was already set and may not be reset.")
   492  		}
   493  		o.streamInt = i
   494  	})
   495  }
   496  
   497  // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
   498  // for streaming RPCs. The first interceptor will be the outer most,
   499  // while the last interceptor will be the inner most wrapper around the real call.
   500  // All stream interceptors added by this method will be chained.
   501  func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
   502  	return newFuncServerOption(func(o *serverOptions) {
   503  		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
   504  	})
   505  }
   506  
   507  // InTapHandle returns a ServerOption that sets the tap handle for all the server
   508  // transport to be created. Only one can be installed.
   509  //
   510  // # Experimental
   511  //
   512  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   513  // later release.
   514  func InTapHandle(h tap.ServerInHandle) ServerOption {
   515  	return newFuncServerOption(func(o *serverOptions) {
   516  		if o.inTapHandle != nil {
   517  			panic("The tap handle was already set and may not be reset.")
   518  		}
   519  		o.inTapHandle = h
   520  	})
   521  }
   522  
   523  // StatsHandler returns a ServerOption that sets the stats handler for the server.
   524  func StatsHandler(h stats.Handler) ServerOption {
   525  	return newFuncServerOption(func(o *serverOptions) {
   526  		if h == nil {
   527  			logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
   528  			// Do not allow a nil stats handler, which would otherwise cause
   529  			// panics.
   530  			return
   531  		}
   532  		o.statsHandlers = append(o.statsHandlers, h)
   533  	})
   534  }
   535  
   536  // binaryLogger returns a ServerOption that can set the binary logger for the
   537  // server.
   538  func binaryLogger(bl binarylog.Logger) ServerOption {
   539  	return newFuncServerOption(func(o *serverOptions) {
   540  		o.binaryLogger = bl
   541  	})
   542  }
   543  
   544  // UnknownServiceHandler returns a ServerOption that allows for adding a custom
   545  // unknown service handler. The provided method is a bidi-streaming RPC service
   546  // handler that will be invoked instead of returning the "unimplemented" gRPC
   547  // error whenever a request is received for an unregistered service or method.
   548  // The handling function and stream interceptor (if set) have full access to
   549  // the ServerStream, including its Context.
   550  func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
   551  	return newFuncServerOption(func(o *serverOptions) {
   552  		o.unknownStreamDesc = &StreamDesc{
   553  			StreamName: "unknown_service_handler",
   554  			Handler:    streamHandler,
   555  			// We need to assume that the users of the streamHandler will want to use both.
   556  			ClientStreams: true,
   557  			ServerStreams: true,
   558  		}
   559  	})
   560  }
   561  
   562  // ConnectionTimeout returns a ServerOption that sets the timeout for
   563  // connection establishment (up to and including HTTP/2 handshaking) for all
   564  // new connections.  If this is not set, the default is 120 seconds.  A zero or
   565  // negative value will result in an immediate timeout.
   566  //
   567  // # Experimental
   568  //
   569  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   570  // later release.
   571  func ConnectionTimeout(d time.Duration) ServerOption {
   572  	return newFuncServerOption(func(o *serverOptions) {
   573  		o.connectionTimeout = d
   574  	})
   575  }
   576  
   577  // MaxHeaderListSizeServerOption is a ServerOption that sets the max
   578  // (uncompressed) size of header list that the server is prepared to accept.
   579  type MaxHeaderListSizeServerOption struct {
   580  	MaxHeaderListSize uint32
   581  }
   582  
   583  func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
   584  	so.maxHeaderListSize = &o.MaxHeaderListSize
   585  }
   586  
   587  // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
   588  // of header list that the server is prepared to accept.
   589  func MaxHeaderListSize(s uint32) ServerOption {
   590  	return MaxHeaderListSizeServerOption{
   591  		MaxHeaderListSize: s,
   592  	}
   593  }
   594  
   595  // HeaderTableSize returns a ServerOption that sets the size of dynamic
   596  // header table for stream.
   597  //
   598  // # Experimental
   599  //
   600  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   601  // later release.
   602  func HeaderTableSize(s uint32) ServerOption {
   603  	return newFuncServerOption(func(o *serverOptions) {
   604  		o.headerTableSize = &s
   605  	})
   606  }
   607  
   608  // NumStreamWorkers returns a ServerOption that sets the number of worker
   609  // goroutines that should be used to process incoming streams. Setting this to
   610  // zero (default) will disable workers and spawn a new goroutine for each
   611  // stream.
   612  //
   613  // # Experimental
   614  //
   615  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   616  // later release.
   617  func NumStreamWorkers(numServerWorkers uint32) ServerOption {
   618  	// TODO: If/when this API gets stabilized (i.e. stream workers become the
   619  	// only way streams are processed), change the behavior of the zero value to
   620  	// a sane default. Preliminary experiments suggest that a value equal to the
   621  	// number of CPUs available is most performant; requires thorough testing.
   622  	return newFuncServerOption(func(o *serverOptions) {
   623  		o.numServerWorkers = numServerWorkers
   624  	})
   625  }
   626  
   627  // WaitForHandlers cause Stop to wait until all outstanding method handlers have
   628  // exited before returning.  If false, Stop will return as soon as all
   629  // connections have closed, but method handlers may still be running. By
   630  // default, Stop does not wait for method handlers to return.
   631  //
   632  // # Experimental
   633  //
   634  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   635  // later release.
   636  func WaitForHandlers(w bool) ServerOption {
   637  	return newFuncServerOption(func(o *serverOptions) {
   638  		o.waitForHandlers = w
   639  	})
   640  }
   641  
   642  func bufferPool(bufferPool mem.BufferPool) ServerOption {
   643  	return newFuncServerOption(func(o *serverOptions) {
   644  		o.bufferPool = bufferPool
   645  	})
   646  }
   647  
   648  // serverWorkerResetThreshold defines how often the stack must be reset. Every
   649  // N requests, by spawning a new goroutine in its place, a worker can reset its
   650  // stack so that large stacks don't live in memory forever. 2^16 should allow
   651  // each goroutine stack to live for at least a few seconds in a typical
   652  // workload (assuming a QPS of a few thousand requests/sec).
   653  const serverWorkerResetThreshold = 1 << 16
   654  
   655  // serverWorker blocks on a *transport.ServerStream channel forever and waits
   656  // for data to be fed by serveStreams. This allows multiple requests to be
   657  // processed by the same goroutine, removing the need for expensive stack
   658  // re-allocations (see the runtime.morestack problem [1]).
   659  //
   660  // [1] https://github.com/golang/go/issues/18138
   661  func (s *Server) serverWorker() {
   662  	for completed := 0; completed < serverWorkerResetThreshold; completed++ {
   663  		f, ok := <-s.serverWorkerChannel
   664  		if !ok {
   665  			return
   666  		}
   667  		f()
   668  	}
   669  	go s.serverWorker()
   670  }
   671  
   672  // initServerWorkers creates worker goroutines and a channel to process incoming
   673  // connections to reduce the time spent overall on runtime.morestack.
   674  func (s *Server) initServerWorkers() {
   675  	s.serverWorkerChannel = make(chan func())
   676  	s.serverWorkerChannelClose = sync.OnceFunc(func() {
   677  		close(s.serverWorkerChannel)
   678  	})
   679  	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
   680  		go s.serverWorker()
   681  	}
   682  }
   683  
   684  // NewServer creates a gRPC server which has no service registered and has not
   685  // started to accept requests yet.
   686  func NewServer(opt ...ServerOption) *Server {
   687  	opts := defaultServerOptions
   688  	for _, o := range globalServerOptions {
   689  		o.apply(&opts)
   690  	}
   691  	for _, o := range opt {
   692  		o.apply(&opts)
   693  	}
   694  	s := &Server{
   695  		lis:      make(map[net.Listener]bool),
   696  		opts:     opts,
   697  		conns:    make(map[string]map[transport.ServerTransport]bool),
   698  		services: make(map[string]*serviceInfo),
   699  		quit:     grpcsync.NewEvent(),
   700  		done:     grpcsync.NewEvent(),
   701  		channelz: channelz.RegisterServer(""),
   702  	}
   703  	chainUnaryServerInterceptors(s)
   704  	chainStreamServerInterceptors(s)
   705  	s.cv = sync.NewCond(&s.mu)
   706  	if EnableTracing {
   707  		_, file, line, _ := runtime.Caller(1)
   708  		s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
   709  	}
   710  
   711  	if s.opts.numServerWorkers > 0 {
   712  		s.initServerWorkers()
   713  	}
   714  
   715  	channelz.Info(logger, s.channelz, "Server created")
   716  	return s
   717  }
   718  
   719  // printf records an event in s's event log, unless s has been stopped.
   720  // REQUIRES s.mu is held.
   721  func (s *Server) printf(format string, a ...any) {
   722  	if s.events != nil {
   723  		s.events.Printf(format, a...)
   724  	}
   725  }
   726  
   727  // errorf records an error in s's event log, unless s has been stopped.
   728  // REQUIRES s.mu is held.
   729  func (s *Server) errorf(format string, a ...any) {
   730  	if s.events != nil {
   731  		s.events.Errorf(format, a...)
   732  	}
   733  }
   734  
   735  // ServiceRegistrar wraps a single method that supports service registration. It
   736  // enables users to pass concrete types other than grpc.Server to the service
   737  // registration methods exported by the IDL generated code.
   738  type ServiceRegistrar interface {
   739  	// RegisterService registers a service and its implementation to the
   740  	// concrete type implementing this interface.  It may not be called
   741  	// once the server has started serving.
   742  	// desc describes the service and its methods and handlers. impl is the
   743  	// service implementation which is passed to the method handlers.
   744  	RegisterService(desc *ServiceDesc, impl any)
   745  }
   746  
   747  // RegisterService registers a service and its implementation to the gRPC
   748  // server. It is called from the IDL generated code. This must be called before
   749  // invoking Serve. If ss is non-nil (for legacy code), its type is checked to
   750  // ensure it implements sd.HandlerType.
   751  func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
   752  	if ss != nil {
   753  		ht := reflect.TypeOf(sd.HandlerType).Elem()
   754  		st := reflect.TypeOf(ss)
   755  		if !st.Implements(ht) {
   756  			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
   757  		}
   758  	}
   759  	s.register(sd, ss)
   760  }
   761  
   762  func (s *Server) register(sd *ServiceDesc, ss any) {
   763  	s.mu.Lock()
   764  	defer s.mu.Unlock()
   765  	s.printf("RegisterService(%q)", sd.ServiceName)
   766  	if s.serve {
   767  		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
   768  	}
   769  	if _, ok := s.services[sd.ServiceName]; ok {
   770  		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
   771  	}
   772  	info := &serviceInfo{
   773  		serviceImpl: ss,
   774  		methods:     make(map[string]*MethodDesc),
   775  		streams:     make(map[string]*StreamDesc),
   776  		mdata:       sd.Metadata,
   777  	}
   778  	for i := range sd.Methods {
   779  		d := &sd.Methods[i]
   780  		info.methods[d.MethodName] = d
   781  	}
   782  	for i := range sd.Streams {
   783  		d := &sd.Streams[i]
   784  		info.streams[d.StreamName] = d
   785  	}
   786  	s.services[sd.ServiceName] = info
   787  }
   788  
   789  // MethodInfo contains the information of an RPC including its method name and type.
   790  type MethodInfo struct {
   791  	// Name is the method name only, without the service name or package name.
   792  	Name string
   793  	// IsClientStream indicates whether the RPC is a client streaming RPC.
   794  	IsClientStream bool
   795  	// IsServerStream indicates whether the RPC is a server streaming RPC.
   796  	IsServerStream bool
   797  }
   798  
   799  // ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
   800  type ServiceInfo struct {
   801  	Methods []MethodInfo
   802  	// Metadata is the metadata specified in ServiceDesc when registering service.
   803  	Metadata any
   804  }
   805  
   806  // GetServiceInfo returns a map from service names to ServiceInfo.
   807  // Service names include the package names, in the form of <package>.<service>.
   808  func (s *Server) GetServiceInfo() map[string]ServiceInfo {
   809  	ret := make(map[string]ServiceInfo)
   810  	for n, srv := range s.services {
   811  		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
   812  		for m := range srv.methods {
   813  			methods = append(methods, MethodInfo{
   814  				Name:           m,
   815  				IsClientStream: false,
   816  				IsServerStream: false,
   817  			})
   818  		}
   819  		for m, d := range srv.streams {
   820  			methods = append(methods, MethodInfo{
   821  				Name:           m,
   822  				IsClientStream: d.ClientStreams,
   823  				IsServerStream: d.ServerStreams,
   824  			})
   825  		}
   826  
   827  		ret[n] = ServiceInfo{
   828  			Methods:  methods,
   829  			Metadata: srv.mdata,
   830  		}
   831  	}
   832  	return ret
   833  }
   834  
   835  // ErrServerStopped indicates that the operation is now illegal because of
   836  // the server being stopped.
   837  var ErrServerStopped = errors.New("grpc: the server has been stopped")
   838  
   839  type listenSocket struct {
   840  	net.Listener
   841  	channelz *channelz.Socket
   842  }
   843  
   844  func (l *listenSocket) Close() error {
   845  	err := l.Listener.Close()
   846  	channelz.RemoveEntry(l.channelz.ID)
   847  	channelz.Info(logger, l.channelz, "ListenSocket deleted")
   848  	return err
   849  }
   850  
   851  // Serve accepts incoming connections on the listener lis, creating a new
   852  // ServerTransport and service goroutine for each. The service goroutines
   853  // read gRPC requests and then call the registered handlers to reply to them.
   854  // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
   855  // this method returns.
   856  // Serve will return a non-nil error unless Stop or GracefulStop is called.
   857  //
   858  // Note: All supported releases of Go (as of December 2023) override the OS
   859  // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
   860  // with OS defaults for keepalive time and interval, callers need to do the
   861  // following two things:
   862  //   - pass a net.Listener created by calling the Listen method on a
   863  //     net.ListenConfig with the `KeepAlive` field set to a negative value. This
   864  //     will result in the Go standard library not overriding OS defaults for TCP
   865  //     keepalive interval and time. But this will also result in the Go standard
   866  //     library not enabling TCP keepalives by default.
   867  //   - override the Accept method on the passed in net.Listener and set the
   868  //     SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
   869  func (s *Server) Serve(lis net.Listener) error {
   870  	s.mu.Lock()
   871  	s.printf("serving")
   872  	s.serve = true
   873  	if s.lis == nil {
   874  		// Serve called after Stop or GracefulStop.
   875  		s.mu.Unlock()
   876  		lis.Close()
   877  		return ErrServerStopped
   878  	}
   879  
   880  	s.serveWG.Add(1)
   881  	defer func() {
   882  		s.serveWG.Done()
   883  		if s.quit.HasFired() {
   884  			// Stop or GracefulStop called; block until done and return nil.
   885  			<-s.done.Done()
   886  		}
   887  	}()
   888  
   889  	ls := &listenSocket{
   890  		Listener: lis,
   891  		channelz: channelz.RegisterSocket(&channelz.Socket{
   892  			SocketType:    channelz.SocketTypeListen,
   893  			Parent:        s.channelz,
   894  			RefName:       lis.Addr().String(),
   895  			LocalAddr:     lis.Addr(),
   896  			SocketOptions: channelz.GetSocketOption(lis)},
   897  		),
   898  	}
   899  	s.lis[ls] = true
   900  
   901  	defer func() {
   902  		s.mu.Lock()
   903  		if s.lis != nil && s.lis[ls] {
   904  			ls.Close()
   905  			delete(s.lis, ls)
   906  		}
   907  		s.mu.Unlock()
   908  	}()
   909  
   910  	s.mu.Unlock()
   911  	channelz.Info(logger, ls.channelz, "ListenSocket created")
   912  
   913  	var tempDelay time.Duration // how long to sleep on accept failure
   914  	for {
   915  		rawConn, err := lis.Accept()
   916  		if err != nil {
   917  			if ne, ok := err.(interface {
   918  				Temporary() bool
   919  			}); ok && ne.Temporary() {
   920  				if tempDelay == 0 {
   921  					tempDelay = 5 * time.Millisecond
   922  				} else {
   923  					tempDelay *= 2
   924  				}
   925  				if max := 1 * time.Second; tempDelay > max {
   926  					tempDelay = max
   927  				}
   928  				s.mu.Lock()
   929  				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
   930  				s.mu.Unlock()
   931  				timer := time.NewTimer(tempDelay)
   932  				select {
   933  				case <-timer.C:
   934  				case <-s.quit.Done():
   935  					timer.Stop()
   936  					return nil
   937  				}
   938  				continue
   939  			}
   940  			s.mu.Lock()
   941  			s.printf("done serving; Accept = %v", err)
   942  			s.mu.Unlock()
   943  
   944  			if s.quit.HasFired() {
   945  				return nil
   946  			}
   947  			return err
   948  		}
   949  		tempDelay = 0
   950  		// Start a new goroutine to deal with rawConn so we don't stall this Accept
   951  		// loop goroutine.
   952  		//
   953  		// Make sure we account for the goroutine so GracefulStop doesn't nil out
   954  		// s.conns before this conn can be added.
   955  		s.serveWG.Add(1)
   956  		go func() {
   957  			s.handleRawConn(lis.Addr().String(), rawConn)
   958  			s.serveWG.Done()
   959  		}()
   960  	}
   961  }
   962  
   963  // handleRawConn forks a goroutine to handle a just-accepted connection that
   964  // has not had any I/O performed on it yet.
   965  func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
   966  	if s.quit.HasFired() {
   967  		rawConn.Close()
   968  		return
   969  	}
   970  	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
   971  
   972  	// Finish handshaking (HTTP2)
   973  	st := s.newHTTP2Transport(rawConn)
   974  	rawConn.SetDeadline(time.Time{})
   975  	if st == nil {
   976  		return
   977  	}
   978  
   979  	if cc, ok := rawConn.(interface {
   980  		PassServerTransport(transport.ServerTransport)
   981  	}); ok {
   982  		cc.PassServerTransport(st)
   983  	}
   984  
   985  	if !s.addConn(lisAddr, st) {
   986  		return
   987  	}
   988  	go func() {
   989  		s.serveStreams(context.Background(), st, rawConn)
   990  		s.removeConn(lisAddr, st)
   991  	}()
   992  }
   993  
   994  // newHTTP2Transport sets up a http/2 transport (using the
   995  // gRPC http2 server transport in transport/http2_server.go).
   996  func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
   997  	config := &transport.ServerConfig{
   998  		MaxStreams:            s.opts.maxConcurrentStreams,
   999  		ConnectionTimeout:     s.opts.connectionTimeout,
  1000  		Credentials:           s.opts.creds,
  1001  		InTapHandle:           s.opts.inTapHandle,
  1002  		StatsHandlers:         s.opts.statsHandlers,
  1003  		KeepaliveParams:       s.opts.keepaliveParams,
  1004  		KeepalivePolicy:       s.opts.keepalivePolicy,
  1005  		InitialWindowSize:     s.opts.initialWindowSize,
  1006  		InitialConnWindowSize: s.opts.initialConnWindowSize,
  1007  		WriteBufferSize:       s.opts.writeBufferSize,
  1008  		ReadBufferSize:        s.opts.readBufferSize,
  1009  		SharedWriteBuffer:     s.opts.sharedWriteBuffer,
  1010  		ChannelzParent:        s.channelz,
  1011  		MaxHeaderListSize:     s.opts.maxHeaderListSize,
  1012  		HeaderTableSize:       s.opts.headerTableSize,
  1013  		BufferPool:            s.opts.bufferPool,
  1014  		StaticWindowSize:      s.opts.staticWindowSize,
  1015  	}
  1016  	st, err := transport.NewServerTransport(c, config)
  1017  	if err != nil {
  1018  		s.mu.Lock()
  1019  		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
  1020  		s.mu.Unlock()
  1021  		// ErrConnDispatched means that the connection was dispatched away from
  1022  		// gRPC; those connections should be left open.
  1023  		if err != credentials.ErrConnDispatched {
  1024  			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
  1025  			if err != io.EOF {
  1026  				channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
  1027  			}
  1028  			c.Close()
  1029  		}
  1030  		return nil
  1031  	}
  1032  
  1033  	return st
  1034  }
  1035  
  1036  func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
  1037  	ctx = transport.SetConnection(ctx, rawConn)
  1038  	ctx = peer.NewContext(ctx, st.Peer())
  1039  	for _, sh := range s.opts.statsHandlers {
  1040  		ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
  1041  			RemoteAddr: st.Peer().Addr,
  1042  			LocalAddr:  st.Peer().LocalAddr,
  1043  		})
  1044  		sh.HandleConn(ctx, &stats.ConnBegin{})
  1045  	}
  1046  
  1047  	defer func() {
  1048  		st.Close(errors.New("finished serving streams for the server transport"))
  1049  		for _, sh := range s.opts.statsHandlers {
  1050  			sh.HandleConn(ctx, &stats.ConnEnd{})
  1051  		}
  1052  	}()
  1053  
  1054  	streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
  1055  	st.HandleStreams(ctx, func(stream *transport.ServerStream) {
  1056  		s.handlersWG.Add(1)
  1057  		streamQuota.acquire()
  1058  		f := func() {
  1059  			defer streamQuota.release()
  1060  			defer s.handlersWG.Done()
  1061  			s.handleStream(st, stream)
  1062  		}
  1063  
  1064  		if s.opts.numServerWorkers > 0 {
  1065  			select {
  1066  			case s.serverWorkerChannel <- f:
  1067  				return
  1068  			default:
  1069  				// If all stream workers are busy, fallback to the default code path.
  1070  			}
  1071  		}
  1072  		go f()
  1073  	})
  1074  }
  1075  
  1076  var _ http.Handler = (*Server)(nil)
  1077  
  1078  // ServeHTTP implements the Go standard library's http.Handler
  1079  // interface by responding to the gRPC request r, by looking up
  1080  // the requested gRPC method in the gRPC server s.
  1081  //
  1082  // The provided HTTP request must have arrived on an HTTP/2
  1083  // connection. When using the Go standard library's server,
  1084  // practically this means that the Request must also have arrived
  1085  // over TLS.
  1086  //
  1087  // To share one port (such as 443 for https) between gRPC and an
  1088  // existing http.Handler, use a root http.Handler such as:
  1089  //
  1090  //	if r.ProtoMajor == 2 && strings.HasPrefix(
  1091  //		r.Header.Get("Content-Type"), "application/grpc") {
  1092  //		grpcServer.ServeHTTP(w, r)
  1093  //	} else {
  1094  //		yourMux.ServeHTTP(w, r)
  1095  //	}
  1096  //
  1097  // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
  1098  // separate from grpc-go's HTTP/2 server. Performance and features may vary
  1099  // between the two paths. ServeHTTP does not support some gRPC features
  1100  // available through grpc-go's HTTP/2 server.
  1101  //
  1102  // # Experimental
  1103  //
  1104  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1105  // later release.
  1106  func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
  1107  	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
  1108  	if err != nil {
  1109  		// Errors returned from transport.NewServerHandlerTransport have
  1110  		// already been written to w.
  1111  		return
  1112  	}
  1113  	if !s.addConn(listenerAddressForServeHTTP, st) {
  1114  		return
  1115  	}
  1116  	defer s.removeConn(listenerAddressForServeHTTP, st)
  1117  	s.serveStreams(r.Context(), st, nil)
  1118  }
  1119  
  1120  func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
  1121  	s.mu.Lock()
  1122  	defer s.mu.Unlock()
  1123  	if s.conns == nil {
  1124  		st.Close(errors.New("Server.addConn called when server has already been stopped"))
  1125  		return false
  1126  	}
  1127  	if s.drain {
  1128  		// Transport added after we drained our existing conns: drain it
  1129  		// immediately.
  1130  		st.Drain("")
  1131  	}
  1132  
  1133  	if s.conns[addr] == nil {
  1134  		// Create a map entry if this is the first connection on this listener.
  1135  		s.conns[addr] = make(map[transport.ServerTransport]bool)
  1136  	}
  1137  	s.conns[addr][st] = true
  1138  	return true
  1139  }
  1140  
  1141  func (s *Server) removeConn(addr string, st transport.ServerTransport) {
  1142  	s.mu.Lock()
  1143  	defer s.mu.Unlock()
  1144  
  1145  	conns := s.conns[addr]
  1146  	if conns != nil {
  1147  		delete(conns, st)
  1148  		if len(conns) == 0 {
  1149  			// If the last connection for this address is being removed, also
  1150  			// remove the map entry corresponding to the address. This is used
  1151  			// in GracefulStop() when waiting for all connections to be closed.
  1152  			delete(s.conns, addr)
  1153  		}
  1154  		s.cv.Broadcast()
  1155  	}
  1156  }
  1157  
  1158  func (s *Server) incrCallsStarted() {
  1159  	s.channelz.ServerMetrics.CallsStarted.Add(1)
  1160  	s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
  1161  }
  1162  
  1163  func (s *Server) incrCallsSucceeded() {
  1164  	s.channelz.ServerMetrics.CallsSucceeded.Add(1)
  1165  }
  1166  
  1167  func (s *Server) incrCallsFailed() {
  1168  	s.channelz.ServerMetrics.CallsFailed.Add(1)
  1169  }
  1170  
  1171  func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
  1172  	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
  1173  	if err != nil {
  1174  		channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
  1175  		return err
  1176  	}
  1177  
  1178  	compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
  1179  	if err != nil {
  1180  		data.Free()
  1181  		channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
  1182  		return err
  1183  	}
  1184  
  1185  	hdr, payload := msgHeader(data, compData, pf)
  1186  
  1187  	defer func() {
  1188  		compData.Free()
  1189  		data.Free()
  1190  		// payload does not need to be freed here, it is either data or compData, both of
  1191  		// which are already freed.
  1192  	}()
  1193  
  1194  	dataLen := data.Len()
  1195  	payloadLen := payload.Len()
  1196  	// TODO(dfawley): should we be checking len(data) instead?
  1197  	if payloadLen > s.opts.maxSendMessageSize {
  1198  		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
  1199  	}
  1200  	err = stream.Write(hdr, payload, opts)
  1201  	if err == nil {
  1202  		if len(s.opts.statsHandlers) != 0 {
  1203  			for _, sh := range s.opts.statsHandlers {
  1204  				sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
  1205  			}
  1206  		}
  1207  	}
  1208  	return err
  1209  }
  1210  
  1211  // chainUnaryServerInterceptors chains all unary server interceptors into one.
  1212  func chainUnaryServerInterceptors(s *Server) {
  1213  	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
  1214  	// be executed before any other chained interceptors.
  1215  	interceptors := s.opts.chainUnaryInts
  1216  	if s.opts.unaryInt != nil {
  1217  		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
  1218  	}
  1219  
  1220  	var chainedInt UnaryServerInterceptor
  1221  	if len(interceptors) == 0 {
  1222  		chainedInt = nil
  1223  	} else if len(interceptors) == 1 {
  1224  		chainedInt = interceptors[0]
  1225  	} else {
  1226  		chainedInt = chainUnaryInterceptors(interceptors)
  1227  	}
  1228  
  1229  	s.opts.unaryInt = chainedInt
  1230  }
  1231  
  1232  func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
  1233  	return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
  1234  		return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
  1235  	}
  1236  }
  1237  
  1238  func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
  1239  	if curr == len(interceptors)-1 {
  1240  		return finalHandler
  1241  	}
  1242  	return func(ctx context.Context, req any) (any, error) {
  1243  		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
  1244  	}
  1245  }
  1246  
  1247  func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
  1248  	shs := s.opts.statsHandlers
  1249  	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
  1250  		if channelz.IsOn() {
  1251  			s.incrCallsStarted()
  1252  		}
  1253  		var statsBegin *stats.Begin
  1254  		for _, sh := range shs {
  1255  			beginTime := time.Now()
  1256  			statsBegin = &stats.Begin{
  1257  				BeginTime:      beginTime,
  1258  				IsClientStream: false,
  1259  				IsServerStream: false,
  1260  			}
  1261  			sh.HandleRPC(ctx, statsBegin)
  1262  		}
  1263  		if trInfo != nil {
  1264  			trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1265  		}
  1266  		// The deferred error handling for tracing, stats handler and channelz are
  1267  		// combined into one function to reduce stack usage -- a defer takes ~56-64
  1268  		// bytes on the stack, so overflowing the stack will require a stack
  1269  		// re-allocation, which is expensive.
  1270  		//
  1271  		// To maintain behavior similar to separate deferred statements, statements
  1272  		// should be executed in the reverse order. That is, tracing first, stats
  1273  		// handler second, and channelz last. Note that panics *within* defers will
  1274  		// lead to different behavior, but that's an acceptable compromise; that
  1275  		// would be undefined behavior territory anyway.
  1276  		defer func() {
  1277  			if trInfo != nil {
  1278  				if err != nil && err != io.EOF {
  1279  					trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1280  					trInfo.tr.SetError()
  1281  				}
  1282  				trInfo.tr.Finish()
  1283  			}
  1284  
  1285  			for _, sh := range shs {
  1286  				end := &stats.End{
  1287  					BeginTime: statsBegin.BeginTime,
  1288  					EndTime:   time.Now(),
  1289  				}
  1290  				if err != nil && err != io.EOF {
  1291  					end.Error = toRPCErr(err)
  1292  				}
  1293  				sh.HandleRPC(ctx, end)
  1294  			}
  1295  
  1296  			if channelz.IsOn() {
  1297  				if err != nil && err != io.EOF {
  1298  					s.incrCallsFailed()
  1299  				} else {
  1300  					s.incrCallsSucceeded()
  1301  				}
  1302  			}
  1303  		}()
  1304  	}
  1305  	var binlogs []binarylog.MethodLogger
  1306  	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
  1307  		binlogs = append(binlogs, ml)
  1308  	}
  1309  	if s.opts.binaryLogger != nil {
  1310  		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
  1311  			binlogs = append(binlogs, ml)
  1312  		}
  1313  	}
  1314  	if len(binlogs) != 0 {
  1315  		md, _ := metadata.FromIncomingContext(ctx)
  1316  		logEntry := &binarylog.ClientHeader{
  1317  			Header:     md,
  1318  			MethodName: stream.Method(),
  1319  			PeerAddr:   nil,
  1320  		}
  1321  		if deadline, ok := ctx.Deadline(); ok {
  1322  			logEntry.Timeout = time.Until(deadline)
  1323  			if logEntry.Timeout < 0 {
  1324  				logEntry.Timeout = 0
  1325  			}
  1326  		}
  1327  		if a := md[":authority"]; len(a) > 0 {
  1328  			logEntry.Authority = a[0]
  1329  		}
  1330  		if peer, ok := peer.FromContext(ctx); ok {
  1331  			logEntry.PeerAddr = peer.Addr
  1332  		}
  1333  		for _, binlog := range binlogs {
  1334  			binlog.Log(ctx, logEntry)
  1335  		}
  1336  	}
  1337  
  1338  	// comp and cp are used for compression.  decomp and dc are used for
  1339  	// decompression.  If comp and decomp are both set, they are the same;
  1340  	// however they are kept separate to ensure that at most one of the
  1341  	// compressor/decompressor variable pairs are set for use later.
  1342  	var comp, decomp encoding.Compressor
  1343  	var cp Compressor
  1344  	var dc Decompressor
  1345  	var sendCompressorName string
  1346  
  1347  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1348  	// to find a matching registered compressor for decomp.
  1349  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1350  		dc = s.opts.dc
  1351  	} else if rc != "" && rc != encoding.Identity {
  1352  		decomp = encoding.GetCompressor(rc)
  1353  		if decomp == nil {
  1354  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1355  			stream.WriteStatus(st)
  1356  			return st.Err()
  1357  		}
  1358  	}
  1359  
  1360  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1361  	// the incoming message compression method.
  1362  	//
  1363  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1364  	if s.opts.cp != nil {
  1365  		cp = s.opts.cp
  1366  		sendCompressorName = cp.Type()
  1367  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1368  		// Legacy compressor not specified; attempt to respond with same encoding.
  1369  		comp = encoding.GetCompressor(rc)
  1370  		if comp != nil {
  1371  			sendCompressorName = comp.Name()
  1372  		}
  1373  	}
  1374  
  1375  	if sendCompressorName != "" {
  1376  		if err := stream.SetSendCompress(sendCompressorName); err != nil {
  1377  			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
  1378  		}
  1379  	}
  1380  
  1381  	var payInfo *payloadInfo
  1382  	if len(shs) != 0 || len(binlogs) != 0 {
  1383  		payInfo = &payloadInfo{}
  1384  		defer payInfo.free()
  1385  	}
  1386  
  1387  	d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
  1388  	if err != nil {
  1389  		if e := stream.WriteStatus(status.Convert(err)); e != nil {
  1390  			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1391  		}
  1392  		return err
  1393  	}
  1394  	freed := false
  1395  	dataFree := func() {
  1396  		if !freed {
  1397  			d.Free()
  1398  			freed = true
  1399  		}
  1400  	}
  1401  	defer dataFree()
  1402  	df := func(v any) error {
  1403  		defer dataFree()
  1404  		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
  1405  			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
  1406  		}
  1407  
  1408  		for _, sh := range shs {
  1409  			sh.HandleRPC(ctx, &stats.InPayload{
  1410  				RecvTime:         time.Now(),
  1411  				Payload:          v,
  1412  				Length:           d.Len(),
  1413  				WireLength:       payInfo.compressedLength + headerLen,
  1414  				CompressedLength: payInfo.compressedLength,
  1415  			})
  1416  		}
  1417  		if len(binlogs) != 0 {
  1418  			cm := &binarylog.ClientMessage{
  1419  				Message: d.Materialize(),
  1420  			}
  1421  			for _, binlog := range binlogs {
  1422  				binlog.Log(ctx, cm)
  1423  			}
  1424  		}
  1425  		if trInfo != nil {
  1426  			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
  1427  		}
  1428  		return nil
  1429  	}
  1430  	ctx = NewContextWithServerTransportStream(ctx, stream)
  1431  	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
  1432  	if appErr != nil {
  1433  		appStatus, ok := status.FromError(appErr)
  1434  		if !ok {
  1435  			// Convert non-status application error to a status error with code
  1436  			// Unknown, but handle context errors specifically.
  1437  			appStatus = status.FromContextError(appErr)
  1438  			appErr = appStatus.Err()
  1439  		}
  1440  		if trInfo != nil {
  1441  			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1442  			trInfo.tr.SetError()
  1443  		}
  1444  		if e := stream.WriteStatus(appStatus); e != nil {
  1445  			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1446  		}
  1447  		if len(binlogs) != 0 {
  1448  			if h, _ := stream.Header(); h.Len() > 0 {
  1449  				// Only log serverHeader if there was header. Otherwise it can
  1450  				// be trailer only.
  1451  				sh := &binarylog.ServerHeader{
  1452  					Header: h,
  1453  				}
  1454  				for _, binlog := range binlogs {
  1455  					binlog.Log(ctx, sh)
  1456  				}
  1457  			}
  1458  			st := &binarylog.ServerTrailer{
  1459  				Trailer: stream.Trailer(),
  1460  				Err:     appErr,
  1461  			}
  1462  			for _, binlog := range binlogs {
  1463  				binlog.Log(ctx, st)
  1464  			}
  1465  		}
  1466  		return appErr
  1467  	}
  1468  	if trInfo != nil {
  1469  		trInfo.tr.LazyLog(stringer("OK"), false)
  1470  	}
  1471  	opts := &transport.WriteOptions{Last: true}
  1472  
  1473  	// Server handler could have set new compressor by calling SetSendCompressor.
  1474  	// In case it is set, we need to use it for compressing outbound message.
  1475  	if stream.SendCompress() != sendCompressorName {
  1476  		comp = encoding.GetCompressor(stream.SendCompress())
  1477  	}
  1478  	if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
  1479  		if err == io.EOF {
  1480  			// The entire stream is done (for unary RPC only).
  1481  			return err
  1482  		}
  1483  		if sts, ok := status.FromError(err); ok {
  1484  			if e := stream.WriteStatus(sts); e != nil {
  1485  				channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1486  			}
  1487  		} else {
  1488  			switch st := err.(type) {
  1489  			case transport.ConnectionError:
  1490  				// Nothing to do here.
  1491  			default:
  1492  				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
  1493  			}
  1494  		}
  1495  		if len(binlogs) != 0 {
  1496  			h, _ := stream.Header()
  1497  			sh := &binarylog.ServerHeader{
  1498  				Header: h,
  1499  			}
  1500  			st := &binarylog.ServerTrailer{
  1501  				Trailer: stream.Trailer(),
  1502  				Err:     appErr,
  1503  			}
  1504  			for _, binlog := range binlogs {
  1505  				binlog.Log(ctx, sh)
  1506  				binlog.Log(ctx, st)
  1507  			}
  1508  		}
  1509  		return err
  1510  	}
  1511  	if len(binlogs) != 0 {
  1512  		h, _ := stream.Header()
  1513  		sh := &binarylog.ServerHeader{
  1514  			Header: h,
  1515  		}
  1516  		sm := &binarylog.ServerMessage{
  1517  			Message: reply,
  1518  		}
  1519  		for _, binlog := range binlogs {
  1520  			binlog.Log(ctx, sh)
  1521  			binlog.Log(ctx, sm)
  1522  		}
  1523  	}
  1524  	if trInfo != nil {
  1525  		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
  1526  	}
  1527  	// TODO: Should we be logging if writing status failed here, like above?
  1528  	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
  1529  	// error or allow the stats handler to see it?
  1530  	if len(binlogs) != 0 {
  1531  		st := &binarylog.ServerTrailer{
  1532  			Trailer: stream.Trailer(),
  1533  			Err:     appErr,
  1534  		}
  1535  		for _, binlog := range binlogs {
  1536  			binlog.Log(ctx, st)
  1537  		}
  1538  	}
  1539  	return stream.WriteStatus(statusOK)
  1540  }
  1541  
  1542  // chainStreamServerInterceptors chains all stream server interceptors into one.
  1543  func chainStreamServerInterceptors(s *Server) {
  1544  	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
  1545  	// be executed before any other chained interceptors.
  1546  	interceptors := s.opts.chainStreamInts
  1547  	if s.opts.streamInt != nil {
  1548  		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
  1549  	}
  1550  
  1551  	var chainedInt StreamServerInterceptor
  1552  	if len(interceptors) == 0 {
  1553  		chainedInt = nil
  1554  	} else if len(interceptors) == 1 {
  1555  		chainedInt = interceptors[0]
  1556  	} else {
  1557  		chainedInt = chainStreamInterceptors(interceptors)
  1558  	}
  1559  
  1560  	s.opts.streamInt = chainedInt
  1561  }
  1562  
  1563  func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
  1564  	return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
  1565  		return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
  1566  	}
  1567  }
  1568  
  1569  func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
  1570  	if curr == len(interceptors)-1 {
  1571  		return finalHandler
  1572  	}
  1573  	return func(srv any, stream ServerStream) error {
  1574  		return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
  1575  	}
  1576  }
  1577  
  1578  func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
  1579  	if channelz.IsOn() {
  1580  		s.incrCallsStarted()
  1581  	}
  1582  	shs := s.opts.statsHandlers
  1583  	var statsBegin *stats.Begin
  1584  	if len(shs) != 0 {
  1585  		beginTime := time.Now()
  1586  		statsBegin = &stats.Begin{
  1587  			BeginTime:      beginTime,
  1588  			IsClientStream: sd.ClientStreams,
  1589  			IsServerStream: sd.ServerStreams,
  1590  		}
  1591  		for _, sh := range shs {
  1592  			sh.HandleRPC(ctx, statsBegin)
  1593  		}
  1594  	}
  1595  	ctx = NewContextWithServerTransportStream(ctx, stream)
  1596  	ss := &serverStream{
  1597  		ctx:                   ctx,
  1598  		s:                     stream,
  1599  		p:                     &parser{r: stream, bufferPool: s.opts.bufferPool},
  1600  		codec:                 s.getCodec(stream.ContentSubtype()),
  1601  		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
  1602  		maxSendMessageSize:    s.opts.maxSendMessageSize,
  1603  		trInfo:                trInfo,
  1604  		statsHandler:          shs,
  1605  	}
  1606  
  1607  	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
  1608  		// See comment in processUnaryRPC on defers.
  1609  		defer func() {
  1610  			if trInfo != nil {
  1611  				ss.mu.Lock()
  1612  				if err != nil && err != io.EOF {
  1613  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1614  					ss.trInfo.tr.SetError()
  1615  				}
  1616  				ss.trInfo.tr.Finish()
  1617  				ss.trInfo.tr = nil
  1618  				ss.mu.Unlock()
  1619  			}
  1620  
  1621  			if len(shs) != 0 {
  1622  				end := &stats.End{
  1623  					BeginTime: statsBegin.BeginTime,
  1624  					EndTime:   time.Now(),
  1625  				}
  1626  				if err != nil && err != io.EOF {
  1627  					end.Error = toRPCErr(err)
  1628  				}
  1629  				for _, sh := range shs {
  1630  					sh.HandleRPC(ctx, end)
  1631  				}
  1632  			}
  1633  
  1634  			if channelz.IsOn() {
  1635  				if err != nil && err != io.EOF {
  1636  					s.incrCallsFailed()
  1637  				} else {
  1638  					s.incrCallsSucceeded()
  1639  				}
  1640  			}
  1641  		}()
  1642  	}
  1643  
  1644  	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
  1645  		ss.binlogs = append(ss.binlogs, ml)
  1646  	}
  1647  	if s.opts.binaryLogger != nil {
  1648  		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
  1649  			ss.binlogs = append(ss.binlogs, ml)
  1650  		}
  1651  	}
  1652  	if len(ss.binlogs) != 0 {
  1653  		md, _ := metadata.FromIncomingContext(ctx)
  1654  		logEntry := &binarylog.ClientHeader{
  1655  			Header:     md,
  1656  			MethodName: stream.Method(),
  1657  			PeerAddr:   nil,
  1658  		}
  1659  		if deadline, ok := ctx.Deadline(); ok {
  1660  			logEntry.Timeout = time.Until(deadline)
  1661  			if logEntry.Timeout < 0 {
  1662  				logEntry.Timeout = 0
  1663  			}
  1664  		}
  1665  		if a := md[":authority"]; len(a) > 0 {
  1666  			logEntry.Authority = a[0]
  1667  		}
  1668  		if peer, ok := peer.FromContext(ss.Context()); ok {
  1669  			logEntry.PeerAddr = peer.Addr
  1670  		}
  1671  		for _, binlog := range ss.binlogs {
  1672  			binlog.Log(ctx, logEntry)
  1673  		}
  1674  	}
  1675  
  1676  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1677  	// to find a matching registered compressor for decomp.
  1678  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1679  		ss.decompressorV0 = s.opts.dc
  1680  	} else if rc != "" && rc != encoding.Identity {
  1681  		ss.decompressorV1 = encoding.GetCompressor(rc)
  1682  		if ss.decompressorV1 == nil {
  1683  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1684  			ss.s.WriteStatus(st)
  1685  			return st.Err()
  1686  		}
  1687  	}
  1688  
  1689  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1690  	// the incoming message compression method.
  1691  	//
  1692  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1693  	if s.opts.cp != nil {
  1694  		ss.compressorV0 = s.opts.cp
  1695  		ss.sendCompressorName = s.opts.cp.Type()
  1696  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1697  		// Legacy compressor not specified; attempt to respond with same encoding.
  1698  		ss.compressorV1 = encoding.GetCompressor(rc)
  1699  		if ss.compressorV1 != nil {
  1700  			ss.sendCompressorName = rc
  1701  		}
  1702  	}
  1703  
  1704  	if ss.sendCompressorName != "" {
  1705  		if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
  1706  			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
  1707  		}
  1708  	}
  1709  
  1710  	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
  1711  
  1712  	if trInfo != nil {
  1713  		trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1714  	}
  1715  	var appErr error
  1716  	var server any
  1717  	if info != nil {
  1718  		server = info.serviceImpl
  1719  	}
  1720  	if s.opts.streamInt == nil {
  1721  		appErr = sd.Handler(server, ss)
  1722  	} else {
  1723  		info := &StreamServerInfo{
  1724  			FullMethod:     stream.Method(),
  1725  			IsClientStream: sd.ClientStreams,
  1726  			IsServerStream: sd.ServerStreams,
  1727  		}
  1728  		appErr = s.opts.streamInt(server, ss, info, sd.Handler)
  1729  	}
  1730  	if appErr != nil {
  1731  		appStatus, ok := status.FromError(appErr)
  1732  		if !ok {
  1733  			// Convert non-status application error to a status error with code
  1734  			// Unknown, but handle context errors specifically.
  1735  			appStatus = status.FromContextError(appErr)
  1736  			appErr = appStatus.Err()
  1737  		}
  1738  		if trInfo != nil {
  1739  			ss.mu.Lock()
  1740  			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1741  			ss.trInfo.tr.SetError()
  1742  			ss.mu.Unlock()
  1743  		}
  1744  		if len(ss.binlogs) != 0 {
  1745  			st := &binarylog.ServerTrailer{
  1746  				Trailer: ss.s.Trailer(),
  1747  				Err:     appErr,
  1748  			}
  1749  			for _, binlog := range ss.binlogs {
  1750  				binlog.Log(ctx, st)
  1751  			}
  1752  		}
  1753  		ss.s.WriteStatus(appStatus)
  1754  		// TODO: Should we log an error from WriteStatus here and below?
  1755  		return appErr
  1756  	}
  1757  	if trInfo != nil {
  1758  		ss.mu.Lock()
  1759  		ss.trInfo.tr.LazyLog(stringer("OK"), false)
  1760  		ss.mu.Unlock()
  1761  	}
  1762  	if len(ss.binlogs) != 0 {
  1763  		st := &binarylog.ServerTrailer{
  1764  			Trailer: ss.s.Trailer(),
  1765  			Err:     appErr,
  1766  		}
  1767  		for _, binlog := range ss.binlogs {
  1768  			binlog.Log(ctx, st)
  1769  		}
  1770  	}
  1771  	return ss.s.WriteStatus(statusOK)
  1772  }
  1773  
  1774  func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
  1775  	ctx := stream.Context()
  1776  	ctx = contextWithServer(ctx, s)
  1777  	var ti *traceInfo
  1778  	if EnableTracing {
  1779  		tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
  1780  		ctx = newTraceContext(ctx, tr)
  1781  		ti = &traceInfo{
  1782  			tr: tr,
  1783  			firstLine: firstLine{
  1784  				client:     false,
  1785  				remoteAddr: t.Peer().Addr,
  1786  			},
  1787  		}
  1788  		if dl, ok := ctx.Deadline(); ok {
  1789  			ti.firstLine.deadline = time.Until(dl)
  1790  		}
  1791  	}
  1792  
  1793  	sm := stream.Method()
  1794  	if sm != "" && sm[0] == '/' {
  1795  		sm = sm[1:]
  1796  	}
  1797  	pos := strings.LastIndex(sm, "/")
  1798  	if pos == -1 {
  1799  		if ti != nil {
  1800  			ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
  1801  			ti.tr.SetError()
  1802  		}
  1803  		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
  1804  		if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
  1805  			if ti != nil {
  1806  				ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1807  				ti.tr.SetError()
  1808  			}
  1809  			channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
  1810  		}
  1811  		if ti != nil {
  1812  			ti.tr.Finish()
  1813  		}
  1814  		return
  1815  	}
  1816  	service := sm[:pos]
  1817  	method := sm[pos+1:]
  1818  
  1819  	// FromIncomingContext is expensive: skip if there are no statsHandlers
  1820  	if len(s.opts.statsHandlers) > 0 {
  1821  		md, _ := metadata.FromIncomingContext(ctx)
  1822  		for _, sh := range s.opts.statsHandlers {
  1823  			ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
  1824  			sh.HandleRPC(ctx, &stats.InHeader{
  1825  				FullMethod:  stream.Method(),
  1826  				RemoteAddr:  t.Peer().Addr,
  1827  				LocalAddr:   t.Peer().LocalAddr,
  1828  				Compression: stream.RecvCompress(),
  1829  				WireLength:  stream.HeaderWireLength(),
  1830  				Header:      md,
  1831  			})
  1832  		}
  1833  	}
  1834  	// To have calls in stream callouts work. Will delete once all stats handler
  1835  	// calls come from the gRPC layer.
  1836  	stream.SetContext(ctx)
  1837  
  1838  	srv, knownService := s.services[service]
  1839  	if knownService {
  1840  		if md, ok := srv.methods[method]; ok {
  1841  			s.processUnaryRPC(ctx, stream, srv, md, ti)
  1842  			return
  1843  		}
  1844  		if sd, ok := srv.streams[method]; ok {
  1845  			s.processStreamingRPC(ctx, stream, srv, sd, ti)
  1846  			return
  1847  		}
  1848  	}
  1849  	// Unknown service, or known server unknown method.
  1850  	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
  1851  		s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
  1852  		return
  1853  	}
  1854  	var errDesc string
  1855  	if !knownService {
  1856  		errDesc = fmt.Sprintf("unknown service %v", service)
  1857  	} else {
  1858  		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
  1859  	}
  1860  	if ti != nil {
  1861  		ti.tr.LazyPrintf("%s", errDesc)
  1862  		ti.tr.SetError()
  1863  	}
  1864  	if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
  1865  		if ti != nil {
  1866  			ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
  1867  			ti.tr.SetError()
  1868  		}
  1869  		channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
  1870  	}
  1871  	if ti != nil {
  1872  		ti.tr.Finish()
  1873  	}
  1874  }
  1875  
  1876  // The key to save ServerTransportStream in the context.
  1877  type streamKey struct{}
  1878  
  1879  // NewContextWithServerTransportStream creates a new context from ctx and
  1880  // attaches stream to it.
  1881  //
  1882  // # Experimental
  1883  //
  1884  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1885  // later release.
  1886  func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
  1887  	return context.WithValue(ctx, streamKey{}, stream)
  1888  }
  1889  
  1890  // ServerTransportStream is a minimal interface that a transport stream must
  1891  // implement. This can be used to mock an actual transport stream for tests of
  1892  // handler code that use, for example, grpc.SetHeader (which requires some
  1893  // stream to be in context).
  1894  //
  1895  // See also NewContextWithServerTransportStream.
  1896  //
  1897  // # Experimental
  1898  //
  1899  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
  1900  // later release.
  1901  type ServerTransportStream interface {
  1902  	Method() string
  1903  	SetHeader(md metadata.MD) error
  1904  	SendHeader(md metadata.MD) error
  1905  	SetTrailer(md metadata.MD) error
  1906  }
  1907  
  1908  // ServerTransportStreamFromContext returns the ServerTransportStream saved in
  1909  // ctx. Returns nil if the given context has no stream associated with it
  1910  // (which implies it is not an RPC invocation context).
  1911  //
  1912  // # Experimental
  1913  //
  1914  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1915  // later release.
  1916  func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
  1917  	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
  1918  	return s
  1919  }
  1920  
  1921  // Stop stops the gRPC server. It immediately closes all open
  1922  // connections and listeners.
  1923  // It cancels all active RPCs on the server side and the corresponding
  1924  // pending RPCs on the client side will get notified by connection
  1925  // errors.
  1926  func (s *Server) Stop() {
  1927  	s.stop(false)
  1928  }
  1929  
  1930  // GracefulStop stops the gRPC server gracefully. It stops the server from
  1931  // accepting new connections and RPCs and blocks until all the pending RPCs are
  1932  // finished.
  1933  func (s *Server) GracefulStop() {
  1934  	s.stop(true)
  1935  }
  1936  
  1937  func (s *Server) stop(graceful bool) {
  1938  	s.quit.Fire()
  1939  	defer s.done.Fire()
  1940  
  1941  	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
  1942  	s.mu.Lock()
  1943  	s.closeListenersLocked()
  1944  	// Wait for serving threads to be ready to exit.  Only then can we be sure no
  1945  	// new conns will be created.
  1946  	s.mu.Unlock()
  1947  	s.serveWG.Wait()
  1948  
  1949  	s.mu.Lock()
  1950  	defer s.mu.Unlock()
  1951  
  1952  	if graceful {
  1953  		s.drainAllServerTransportsLocked()
  1954  	} else {
  1955  		s.closeServerTransportsLocked()
  1956  	}
  1957  
  1958  	for len(s.conns) != 0 {
  1959  		s.cv.Wait()
  1960  	}
  1961  	s.conns = nil
  1962  
  1963  	if s.opts.numServerWorkers > 0 {
  1964  		// Closing the channel (only once, via sync.OnceFunc) after all the
  1965  		// connections have been closed above ensures that there are no
  1966  		// goroutines executing the callback passed to st.HandleStreams (where
  1967  		// the channel is written to).
  1968  		s.serverWorkerChannelClose()
  1969  	}
  1970  
  1971  	if graceful || s.opts.waitForHandlers {
  1972  		s.handlersWG.Wait()
  1973  	}
  1974  
  1975  	if s.events != nil {
  1976  		s.events.Finish()
  1977  		s.events = nil
  1978  	}
  1979  }
  1980  
  1981  // s.mu must be held by the caller.
  1982  func (s *Server) closeServerTransportsLocked() {
  1983  	for _, conns := range s.conns {
  1984  		for st := range conns {
  1985  			st.Close(errors.New("Server.Stop called"))
  1986  		}
  1987  	}
  1988  }
  1989  
  1990  // s.mu must be held by the caller.
  1991  func (s *Server) drainAllServerTransportsLocked() {
  1992  	if !s.drain {
  1993  		for _, conns := range s.conns {
  1994  			for st := range conns {
  1995  				st.Drain("graceful_stop")
  1996  			}
  1997  		}
  1998  		s.drain = true
  1999  	}
  2000  }
  2001  
  2002  // s.mu must be held by the caller.
  2003  func (s *Server) closeListenersLocked() {
  2004  	for lis := range s.lis {
  2005  		lis.Close()
  2006  	}
  2007  	s.lis = nil
  2008  }
  2009  
  2010  // contentSubtype must be lowercase
  2011  // cannot return nil
  2012  func (s *Server) getCodec(contentSubtype string) baseCodec {
  2013  	if s.opts.codec != nil {
  2014  		return s.opts.codec
  2015  	}
  2016  	if contentSubtype == "" {
  2017  		return getCodec(proto.Name)
  2018  	}
  2019  	codec := getCodec(contentSubtype)
  2020  	if codec == nil {
  2021  		logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
  2022  		return getCodec(proto.Name)
  2023  	}
  2024  	return codec
  2025  }
  2026  
  2027  type serverKey struct{}
  2028  
  2029  // serverFromContext gets the Server from the context.
  2030  func serverFromContext(ctx context.Context) *Server {
  2031  	s, _ := ctx.Value(serverKey{}).(*Server)
  2032  	return s
  2033  }
  2034  
  2035  // contextWithServer sets the Server in the context.
  2036  func contextWithServer(ctx context.Context, server *Server) context.Context {
  2037  	return context.WithValue(ctx, serverKey{}, server)
  2038  }
  2039  
  2040  // isRegisteredMethod returns whether the passed in method is registered as a
  2041  // method on the server. /service/method and service/method will match if the
  2042  // service and method are registered on the server.
  2043  func (s *Server) isRegisteredMethod(serviceMethod string) bool {
  2044  	if serviceMethod != "" && serviceMethod[0] == '/' {
  2045  		serviceMethod = serviceMethod[1:]
  2046  	}
  2047  	pos := strings.LastIndex(serviceMethod, "/")
  2048  	if pos == -1 { // Invalid method name syntax.
  2049  		return false
  2050  	}
  2051  	service := serviceMethod[:pos]
  2052  	method := serviceMethod[pos+1:]
  2053  	srv, knownService := s.services[service]
  2054  	if knownService {
  2055  		if _, ok := srv.methods[method]; ok {
  2056  			return true
  2057  		}
  2058  		if _, ok := srv.streams[method]; ok {
  2059  			return true
  2060  		}
  2061  	}
  2062  	return false
  2063  }
  2064  
  2065  // SetHeader sets the header metadata to be sent from the server to the client.
  2066  // The context provided must be the context passed to the server's handler.
  2067  //
  2068  // Streaming RPCs should prefer the SetHeader method of the ServerStream.
  2069  //
  2070  // When called multiple times, all the provided metadata will be merged.  All
  2071  // the metadata will be sent out when one of the following happens:
  2072  //
  2073  //   - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
  2074  //   - The first response message is sent.  For unary handlers, this occurs when
  2075  //     the handler returns; for streaming handlers, this can happen when stream's
  2076  //     SendMsg method is called.
  2077  //   - An RPC status is sent out (error or success).  This occurs when the handler
  2078  //     returns.
  2079  //
  2080  // SetHeader will fail if called after any of the events above.
  2081  //
  2082  // The error returned is compatible with the status package.  However, the
  2083  // status code will often not match the RPC status as seen by the client
  2084  // application, and therefore, should not be relied upon for this purpose.
  2085  func SetHeader(ctx context.Context, md metadata.MD) error {
  2086  	if md.Len() == 0 {
  2087  		return nil
  2088  	}
  2089  	stream := ServerTransportStreamFromContext(ctx)
  2090  	if stream == nil {
  2091  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  2092  	}
  2093  	return stream.SetHeader(md)
  2094  }
  2095  
  2096  // SendHeader sends header metadata. It may be called at most once, and may not
  2097  // be called after any event that causes headers to be sent (see SetHeader for
  2098  // a complete list).  The provided md and headers set by SetHeader() will be
  2099  // sent.
  2100  //
  2101  // The error returned is compatible with the status package.  However, the
  2102  // status code will often not match the RPC status as seen by the client
  2103  // application, and therefore, should not be relied upon for this purpose.
  2104  func SendHeader(ctx context.Context, md metadata.MD) error {
  2105  	stream := ServerTransportStreamFromContext(ctx)
  2106  	if stream == nil {
  2107  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  2108  	}
  2109  	if err := stream.SendHeader(md); err != nil {
  2110  		return toRPCErr(err)
  2111  	}
  2112  	return nil
  2113  }
  2114  
  2115  // SetSendCompressor sets a compressor for outbound messages from the server.
  2116  // It must not be called after any event that causes headers to be sent
  2117  // (see ServerStream.SetHeader for the complete list). Provided compressor is
  2118  // used when below conditions are met:
  2119  //
  2120  //   - compressor is registered via encoding.RegisterCompressor
  2121  //   - compressor name must exist in the client advertised compressor names
  2122  //     sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
  2123  //     get client supported compressor names.
  2124  //
  2125  // The context provided must be the context passed to the server's handler.
  2126  // It must be noted that compressor name encoding.Identity disables the
  2127  // outbound compression.
  2128  // By default, server messages will be sent using the same compressor with
  2129  // which request messages were sent.
  2130  //
  2131  // It is not safe to call SetSendCompressor concurrently with SendHeader and
  2132  // SendMsg.
  2133  //
  2134  // # Experimental
  2135  //
  2136  // Notice: This function is EXPERIMENTAL and may be changed or removed in a
  2137  // later release.
  2138  func SetSendCompressor(ctx context.Context, name string) error {
  2139  	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
  2140  	if !ok || stream == nil {
  2141  		return fmt.Errorf("failed to fetch the stream from the given context")
  2142  	}
  2143  
  2144  	if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
  2145  		return fmt.Errorf("unable to set send compressor: %w", err)
  2146  	}
  2147  
  2148  	return stream.SetSendCompress(name)
  2149  }
  2150  
  2151  // ClientSupportedCompressors returns compressor names advertised by the client
  2152  // via grpc-accept-encoding header.
  2153  //
  2154  // The context provided must be the context passed to the server's handler.
  2155  //
  2156  // # Experimental
  2157  //
  2158  // Notice: This function is EXPERIMENTAL and may be changed or removed in a
  2159  // later release.
  2160  func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
  2161  	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
  2162  	if !ok || stream == nil {
  2163  		return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
  2164  	}
  2165  
  2166  	return stream.ClientAdvertisedCompressors(), nil
  2167  }
  2168  
  2169  // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
  2170  // When called more than once, all the provided metadata will be merged.
  2171  //
  2172  // The error returned is compatible with the status package.  However, the
  2173  // status code will often not match the RPC status as seen by the client
  2174  // application, and therefore, should not be relied upon for this purpose.
  2175  func SetTrailer(ctx context.Context, md metadata.MD) error {
  2176  	if md.Len() == 0 {
  2177  		return nil
  2178  	}
  2179  	stream := ServerTransportStreamFromContext(ctx)
  2180  	if stream == nil {
  2181  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  2182  	}
  2183  	return stream.SetTrailer(md)
  2184  }
  2185  
  2186  // Method returns the method string for the server context.  The returned
  2187  // string is in the format of "/service/method".
  2188  func Method(ctx context.Context) (string, bool) {
  2189  	s := ServerTransportStreamFromContext(ctx)
  2190  	if s == nil {
  2191  		return "", false
  2192  	}
  2193  	return s.Method(), true
  2194  }
  2195  
  2196  // validateSendCompressor returns an error when given compressor name cannot be
  2197  // handled by the server or the client based on the advertised compressors.
  2198  func validateSendCompressor(name string, clientCompressors []string) error {
  2199  	if name == encoding.Identity {
  2200  		return nil
  2201  	}
  2202  
  2203  	if !grpcutil.IsCompressorNameRegistered(name) {
  2204  		return fmt.Errorf("compressor not registered %q", name)
  2205  	}
  2206  
  2207  	for _, c := range clientCompressors {
  2208  		if c == name {
  2209  			return nil // found match
  2210  		}
  2211  	}
  2212  	return fmt.Errorf("client does not support compressor %q", name)
  2213  }
  2214  
  2215  // atomicSemaphore implements a blocking, counting semaphore. acquire should be
  2216  // called synchronously; release may be called asynchronously.
  2217  type atomicSemaphore struct {
  2218  	n    atomic.Int64
  2219  	wait chan struct{}
  2220  }
  2221  
  2222  func (q *atomicSemaphore) acquire() {
  2223  	if q.n.Add(-1) < 0 {
  2224  		// We ran out of quota.  Block until a release happens.
  2225  		<-q.wait
  2226  	}
  2227  }
  2228  
  2229  func (q *atomicSemaphore) release() {
  2230  	// N.B. the "<= 0" check below should allow for this to work with multiple
  2231  	// concurrent calls to acquire, but also note that with synchronous calls to
  2232  	// acquire, as our system does, n will never be less than -1.  There are
  2233  	// fairness issues (queuing) to consider if this was to be generalized.
  2234  	if q.n.Add(1) <= 0 {
  2235  		// An acquire was waiting on us.  Unblock it.
  2236  		q.wait <- struct{}{}
  2237  	}
  2238  }
  2239  
  2240  func newHandlerQuota(n uint32) *atomicSemaphore {
  2241  	a := &atomicSemaphore{wait: make(chan struct{}, 1)}
  2242  	a.n.Store(int64(n))
  2243  	return a
  2244  }