gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/server.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package grpc
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"math"
    27  	"net"
    28  	"reflect"
    29  	"runtime"
    30  	"strings"
    31  	"sync"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	http "gitee.com/zhaochuninhefei/gmgo/gmhttp"
    36  
    37  	"gitee.com/zhaochuninhefei/gmgo/net/trace"
    38  
    39  	"gitee.com/zhaochuninhefei/gmgo/grpc/codes"
    40  	"gitee.com/zhaochuninhefei/gmgo/grpc/credentials"
    41  	"gitee.com/zhaochuninhefei/gmgo/grpc/encoding"
    42  	"gitee.com/zhaochuninhefei/gmgo/grpc/encoding/proto"
    43  	"gitee.com/zhaochuninhefei/gmgo/grpc/grpclog"
    44  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal"
    45  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/binarylog"
    46  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/channelz"
    47  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcrand"
    48  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcsync"
    49  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/transport"
    50  	"gitee.com/zhaochuninhefei/gmgo/grpc/keepalive"
    51  	"gitee.com/zhaochuninhefei/gmgo/grpc/metadata"
    52  	"gitee.com/zhaochuninhefei/gmgo/grpc/peer"
    53  	"gitee.com/zhaochuninhefei/gmgo/grpc/stats"
    54  	"gitee.com/zhaochuninhefei/gmgo/grpc/status"
    55  	"gitee.com/zhaochuninhefei/gmgo/grpc/tap"
    56  )
    57  
    58  const (
    59  	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
    60  	defaultServerMaxSendMessageSize    = math.MaxInt32
    61  
    62  	// Server transports are tracked in a map which is keyed on listener
    63  	// address. For regular gRPC traffic, connections are accepted in Serve()
    64  	// through a call to Accept(), and we use the actual listener address as key
    65  	// when we add it to the map. But for connections received through
    66  	// ServeHTTP(), we do not have a listener and hence use this dummy value.
    67  	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
    68  )
    69  
    70  func init() {
    71  	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
    72  		return srv.opts.creds
    73  	}
    74  	internal.DrainServerTransports = func(srv *Server, addr string) {
    75  		srv.drainServerTransports(addr)
    76  	}
    77  }
    78  
    79  var statusOK = status.New(codes.OK, "")
    80  var logger = grpclog.Component("core")
    81  
    82  type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
    83  
    84  // MethodDesc represents an RPC service's method specification.
    85  type MethodDesc struct {
    86  	MethodName string
    87  	Handler    methodHandler
    88  }
    89  
    90  // ServiceDesc represents an RPC service's specification.
    91  type ServiceDesc struct {
    92  	ServiceName string
    93  	// The pointer to the service interface. Used to check whether the user
    94  	// provided implementation satisfies the interface requirements.
    95  	HandlerType interface{}
    96  	Methods     []MethodDesc
    97  	Streams     []StreamDesc
    98  	Metadata    interface{}
    99  }
   100  
   101  // serviceInfo wraps information about a service. It is very similar to
   102  // ServiceDesc and is constructed from it for internal purposes.
   103  type serviceInfo struct {
   104  	// Contains the implementation for the methods in this service.
   105  	serviceImpl interface{}
   106  	methods     map[string]*MethodDesc
   107  	streams     map[string]*StreamDesc
   108  	mdata       interface{}
   109  }
   110  
   111  type serverWorkerData struct {
   112  	st     transport.ServerTransport
   113  	wg     *sync.WaitGroup
   114  	stream *transport.Stream
   115  }
   116  
   117  // Server is a gRPC server to serve RPC requests.
   118  type Server struct {
   119  	opts serverOptions
   120  
   121  	mu  sync.Mutex // guards following
   122  	lis map[net.Listener]bool
   123  	// conns contains all active server transports. It is a map keyed on a
   124  	// listener address with the value being the set of active transports
   125  	// belonging to that listener.
   126  	conns    map[string]map[transport.ServerTransport]bool
   127  	serve    bool
   128  	drain    bool
   129  	cv       *sync.Cond              // signaled when connections close for GracefulStop
   130  	services map[string]*serviceInfo // service name -> service info
   131  	events   trace.EventLog
   132  
   133  	quit               *grpcsync.Event
   134  	done               *grpcsync.Event
   135  	channelzRemoveOnce sync.Once
   136  	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
   137  
   138  	channelzID int64 // channelz unique identification number
   139  	czData     *channelzData
   140  
   141  	serverWorkerChannels []chan *serverWorkerData
   142  }
   143  
   144  type serverOptions struct {
   145  	creds                 credentials.TransportCredentials
   146  	codec                 baseCodec
   147  	cp                    Compressor
   148  	dc                    Decompressor
   149  	unaryInt              UnaryServerInterceptor
   150  	streamInt             StreamServerInterceptor
   151  	chainUnaryInts        []UnaryServerInterceptor
   152  	chainStreamInts       []StreamServerInterceptor
   153  	inTapHandle           tap.ServerInHandle
   154  	statsHandler          stats.Handler
   155  	maxConcurrentStreams  uint32
   156  	maxReceiveMessageSize int
   157  	maxSendMessageSize    int
   158  	unknownStreamDesc     *StreamDesc
   159  	keepaliveParams       keepalive.ServerParameters
   160  	keepalivePolicy       keepalive.EnforcementPolicy
   161  	initialWindowSize     int32
   162  	initialConnWindowSize int32
   163  	writeBufferSize       int
   164  	readBufferSize        int
   165  	connectionTimeout     time.Duration
   166  	maxHeaderListSize     *uint32
   167  	headerTableSize       *uint32
   168  	numServerWorkers      uint32
   169  }
   170  
   171  var defaultServerOptions = serverOptions{
   172  	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
   173  	maxSendMessageSize:    defaultServerMaxSendMessageSize,
   174  	connectionTimeout:     120 * time.Second,
   175  	writeBufferSize:       defaultWriteBufSize,
   176  	readBufferSize:        defaultReadBufSize,
   177  }
   178  
   179  // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
   180  type ServerOption interface {
   181  	apply(*serverOptions)
   182  }
   183  
   184  // EmptyServerOption does not alter the server configuration. It can be embedded
   185  // in another structure to build custom server options.
   186  //
   187  // Experimental
   188  //
   189  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
   190  // later release.
   191  type EmptyServerOption struct{}
   192  
   193  func (EmptyServerOption) apply(*serverOptions) {}
   194  
   195  // funcServerOption wraps a function that modifies serverOptions into an
   196  // implementation of the ServerOption interface.
   197  type funcServerOption struct {
   198  	f func(*serverOptions)
   199  }
   200  
   201  func (fdo *funcServerOption) apply(do *serverOptions) {
   202  	fdo.f(do)
   203  }
   204  
   205  func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
   206  	return &funcServerOption{
   207  		f: f,
   208  	}
   209  }
   210  
   211  // WriteBufferSize determines how much data can be batched before doing a write on the wire.
   212  // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
   213  // The default value for this buffer is 32KB.
   214  // Zero will disable the write buffer such that each write will be on underlying connection.
   215  // Note: A Send call may not directly translate to a write.
   216  func WriteBufferSize(s int) ServerOption {
   217  	return newFuncServerOption(func(o *serverOptions) {
   218  		o.writeBufferSize = s
   219  	})
   220  }
   221  
   222  // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
   223  // for one read syscall.
   224  // The default value for this buffer is 32KB.
   225  // Zero will disable read buffer for a connection so data framer can access the underlying
   226  // conn directly.
   227  func ReadBufferSize(s int) ServerOption {
   228  	return newFuncServerOption(func(o *serverOptions) {
   229  		o.readBufferSize = s
   230  	})
   231  }
   232  
   233  // InitialWindowSize returns a ServerOption that sets window size for stream.
   234  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   235  func InitialWindowSize(s int32) ServerOption {
   236  	return newFuncServerOption(func(o *serverOptions) {
   237  		o.initialWindowSize = s
   238  	})
   239  }
   240  
   241  // InitialConnWindowSize returns a ServerOption that sets window size for a connection.
   242  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   243  func InitialConnWindowSize(s int32) ServerOption {
   244  	return newFuncServerOption(func(o *serverOptions) {
   245  		o.initialConnWindowSize = s
   246  	})
   247  }
   248  
   249  // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
   250  func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
   251  	if kp.Time > 0 && kp.Time < time.Second {
   252  		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
   253  		kp.Time = time.Second
   254  	}
   255  
   256  	return newFuncServerOption(func(o *serverOptions) {
   257  		o.keepaliveParams = kp
   258  	})
   259  }
   260  
   261  // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
   262  func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
   263  	return newFuncServerOption(func(o *serverOptions) {
   264  		o.keepalivePolicy = kep
   265  	})
   266  }
   267  
   268  // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
   269  //
   270  // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
   271  //
   272  // ToDeprecated: register codecs using encoding.RegisterCodec. The server will
   273  // automatically use registered codecs based on the incoming requests' headers.
   274  // See also
   275  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   276  // Will be supported throughout 1.x.
   277  func CustomCodec(codec encoding.Codec) ServerOption {
   278  	return newFuncServerOption(func(o *serverOptions) {
   279  		o.codec = codec
   280  	})
   281  }
   282  
   283  // ForceServerCodec returns a ServerOption that sets a codec for message
   284  // marshaling and unmarshaling.
   285  //
   286  // This will override any lookups by content-subtype for Codecs registered
   287  // with RegisterCodec.
   288  //
   289  // See Content-Type on
   290  // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
   291  // more details. Also see the documentation on RegisterCodec and
   292  // CallContentSubtype for more details on the interaction between encoding.Codec
   293  // and content-subtype.
   294  //
   295  // This function is provided for advanced users; prefer to register codecs
   296  // using encoding.RegisterCodec.
   297  // The server will automatically use registered codecs based on the incoming
   298  // requests' headers. See also
   299  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   300  // Will be supported throughout 1.x.
   301  //
   302  // Experimental
   303  //
   304  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   305  // later release.
   306  func ForceServerCodec(codec encoding.Codec) ServerOption {
   307  	return newFuncServerOption(func(o *serverOptions) {
   308  		o.codec = codec
   309  	})
   310  }
   311  
   312  // RPCCompressor returns a ServerOption that sets a compressor for outbound
   313  // messages.  For backward compatibility, all outbound messages will be sent
   314  // using this compressor, regardless of incoming message compression.  By
   315  // default, server messages will be sent using the same compressor with which
   316  // request messages were sent.
   317  //
   318  // ToDeprecated: use encoding.RegisterCompressor instead. Will be supported
   319  // throughout 1.x.
   320  func RPCCompressor(cp Compressor) ServerOption {
   321  	return newFuncServerOption(func(o *serverOptions) {
   322  		o.cp = cp
   323  	})
   324  }
   325  
   326  // RPCDecompressor returns a ServerOption that sets a decompressor for inbound
   327  // messages.  It has higher priority than decompressors registered via
   328  // encoding.RegisterCompressor.
   329  //
   330  // ToDeprecated: use encoding.RegisterCompressor instead. Will be supported
   331  // throughout 1.x.
   332  func RPCDecompressor(dc Decompressor) ServerOption {
   333  	return newFuncServerOption(func(o *serverOptions) {
   334  		o.dc = dc
   335  	})
   336  }
   337  
   338  // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   339  // If this is not set, gRPC uses the default limit.
   340  //
   341  // ToDeprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
   342  func MaxMsgSize(m int) ServerOption {
   343  	return MaxRecvMsgSize(m)
   344  }
   345  
   346  // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   347  // If this is not set, gRPC uses the default 4MB.
   348  func MaxRecvMsgSize(m int) ServerOption {
   349  	return newFuncServerOption(func(o *serverOptions) {
   350  		o.maxReceiveMessageSize = m
   351  	})
   352  }
   353  
   354  // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
   355  // If this is not set, gRPC uses the default `math.MaxInt32`.
   356  func MaxSendMsgSize(m int) ServerOption {
   357  	return newFuncServerOption(func(o *serverOptions) {
   358  		o.maxSendMessageSize = m
   359  	})
   360  }
   361  
   362  // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
   363  // of concurrent streams to each ServerTransport.
   364  func MaxConcurrentStreams(n uint32) ServerOption {
   365  	return newFuncServerOption(func(o *serverOptions) {
   366  		o.maxConcurrentStreams = n
   367  	})
   368  }
   369  
   370  // Creds returns a ServerOption that sets credentials for server connections.
   371  func Creds(c credentials.TransportCredentials) ServerOption {
   372  	return newFuncServerOption(func(o *serverOptions) {
   373  		o.creds = c
   374  	})
   375  }
   376  
   377  // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
   378  // server. Only one unary interceptor can be installed. The construction of multiple
   379  // interceptors (e.g., chaining) can be implemented at the caller.
   380  func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
   381  	return newFuncServerOption(func(o *serverOptions) {
   382  		if o.unaryInt != nil {
   383  			panic("The unary server interceptor was already set and may not be reset.")
   384  		}
   385  		o.unaryInt = i
   386  	})
   387  }
   388  
   389  // ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
   390  // for unary RPCs. The first interceptor will be the outer most,
   391  // while the last interceptor will be the inner most wrapper around the real call.
   392  // All unary interceptors added by this method will be chained.
   393  func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
   394  	return newFuncServerOption(func(o *serverOptions) {
   395  		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
   396  	})
   397  }
   398  
   399  // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
   400  // server. Only one stream interceptor can be installed.
   401  func StreamInterceptor(i StreamServerInterceptor) ServerOption {
   402  	return newFuncServerOption(func(o *serverOptions) {
   403  		if o.streamInt != nil {
   404  			panic("The stream server interceptor was already set and may not be reset.")
   405  		}
   406  		o.streamInt = i
   407  	})
   408  }
   409  
   410  // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
   411  // for streaming RPCs. The first interceptor will be the outer most,
   412  // while the last interceptor will be the inner most wrapper around the real call.
   413  // All stream interceptors added by this method will be chained.
   414  func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
   415  	return newFuncServerOption(func(o *serverOptions) {
   416  		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
   417  	})
   418  }
   419  
   420  // InTapHandle returns a ServerOption that sets the tap handle for all the server
   421  // transport to be created. Only one can be installed.
   422  //
   423  // Experimental
   424  //
   425  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   426  // later release.
   427  func InTapHandle(h tap.ServerInHandle) ServerOption {
   428  	return newFuncServerOption(func(o *serverOptions) {
   429  		if o.inTapHandle != nil {
   430  			panic("The tap handle was already set and may not be reset.")
   431  		}
   432  		o.inTapHandle = h
   433  	})
   434  }
   435  
   436  // StatsHandler returns a ServerOption that sets the stats handler for the server.
   437  func StatsHandler(h stats.Handler) ServerOption {
   438  	return newFuncServerOption(func(o *serverOptions) {
   439  		o.statsHandler = h
   440  	})
   441  }
   442  
   443  // UnknownServiceHandler returns a ServerOption that allows for adding a custom
   444  // unknown service handler. The provided method is a bidi-streaming RPC service
   445  // handler that will be invoked instead of returning the "unimplemented" gRPC
   446  // error whenever a request is received for an unregistered service or method.
   447  // The handling function and stream interceptor (if set) have full access to
   448  // the ServerStream, including its Context.
   449  func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
   450  	return newFuncServerOption(func(o *serverOptions) {
   451  		o.unknownStreamDesc = &StreamDesc{
   452  			StreamName: "unknown_service_handler",
   453  			Handler:    streamHandler,
   454  			// We need to assume that the users of the streamHandler will want to use both.
   455  			ClientStreams: true,
   456  			ServerStreams: true,
   457  		}
   458  	})
   459  }
   460  
   461  // ConnectionTimeout returns a ServerOption that sets the timeout for
   462  // connection establishment (up to and including HTTP/2 handshaking) for all
   463  // new connections.  If this is not set, the default is 120 seconds.  A zero or
   464  // negative value will result in an immediate timeout.
   465  //
   466  // Experimental
   467  //
   468  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   469  // later release.
   470  //goland:noinspection GoUnusedExportedFunction
   471  func ConnectionTimeout(d time.Duration) ServerOption {
   472  	return newFuncServerOption(func(o *serverOptions) {
   473  		o.connectionTimeout = d
   474  	})
   475  }
   476  
   477  // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
   478  // of header list that the server is prepared to accept.
   479  func MaxHeaderListSize(s uint32) ServerOption {
   480  	return newFuncServerOption(func(o *serverOptions) {
   481  		o.maxHeaderListSize = &s
   482  	})
   483  }
   484  
   485  // HeaderTableSize returns a ServerOption that sets the size of dynamic
   486  // header table for stream.
   487  //
   488  // Experimental
   489  //
   490  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   491  // later release.
   492  //goland:noinspection GoUnusedExportedFunction
   493  func HeaderTableSize(s uint32) ServerOption {
   494  	return newFuncServerOption(func(o *serverOptions) {
   495  		o.headerTableSize = &s
   496  	})
   497  }
   498  
   499  // NumStreamWorkers returns a ServerOption that sets the number of worker
   500  // goroutines that should be used to process incoming streams. Setting this to
   501  // zero (default) will disable workers and spawn a new goroutine for each
   502  // stream.
   503  //
   504  // Experimental
   505  //
   506  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   507  // later release.
   508  //goland:noinspection GoUnusedExportedFunction
   509  func NumStreamWorkers(numServerWorkers uint32) ServerOption {
   510  	// TODO: If/when this API gets stabilized (i.e. stream workers become the
   511  	// only way streams are processed), change the behavior of the zero value to
   512  	// a sane default. Preliminary experiments suggest that a value equal to the
   513  	// number of CPUs available is most performant; requires thorough testing.
   514  	return newFuncServerOption(func(o *serverOptions) {
   515  		o.numServerWorkers = numServerWorkers
   516  	})
   517  }
   518  
   519  // serverWorkerResetThreshold defines how often the stack must be reset. Every
   520  // N requests, by spawning a new goroutine in its place, a worker can reset its
   521  // stack so that large stacks don't live in memory forever. 2^16 should allow
   522  // each goroutine stack to live for at least a few seconds in a typical
   523  // workload (assuming a QPS of a few thousand requests/sec).
   524  const serverWorkerResetThreshold = 1 << 16
   525  
   526  // serverWorkers blocks on a *transport.Stream channel forever and waits for
   527  // data to be fed by serveStreams. This allows different requests to be
   528  // processed by the same goroutine, removing the need for expensive stack
   529  // re-allocations (see the runtime.morestack problem [1]).
   530  //
   531  // [1] https://github.com/golang/go/issues/18138
   532  func (s *Server) serverWorker(ch chan *serverWorkerData) {
   533  	// To make sure all server workers don't reset at the same time, choose a
   534  	// random number of iterations before resetting.
   535  	threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
   536  	for completed := 0; completed < threshold; completed++ {
   537  		data, ok := <-ch
   538  		if !ok {
   539  			return
   540  		}
   541  		s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
   542  		data.wg.Done()
   543  	}
   544  	go s.serverWorker(ch)
   545  }
   546  
   547  // initServerWorkers creates worker goroutines and channels to process incoming
   548  // connections to reduce the time spent overall on runtime.morestack.
   549  func (s *Server) initServerWorkers() {
   550  	s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
   551  	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
   552  		s.serverWorkerChannels[i] = make(chan *serverWorkerData)
   553  		go s.serverWorker(s.serverWorkerChannels[i])
   554  	}
   555  }
   556  
   557  func (s *Server) stopServerWorkers() {
   558  	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
   559  		close(s.serverWorkerChannels[i])
   560  	}
   561  }
   562  
   563  // NewServer creates a gRPC server which has no service registered and has not
   564  // started to accept requests yet.
   565  func NewServer(opt ...ServerOption) *Server {
   566  	opts := defaultServerOptions
   567  	for _, o := range opt {
   568  		o.apply(&opts)
   569  	}
   570  	s := &Server{
   571  		lis:      make(map[net.Listener]bool),
   572  		opts:     opts,
   573  		conns:    make(map[string]map[transport.ServerTransport]bool),
   574  		services: make(map[string]*serviceInfo),
   575  		quit:     grpcsync.NewEvent(),
   576  		done:     grpcsync.NewEvent(),
   577  		czData:   new(channelzData),
   578  	}
   579  	chainUnaryServerInterceptors(s)
   580  	chainStreamServerInterceptors(s)
   581  	s.cv = sync.NewCond(&s.mu)
   582  	if EnableTracing {
   583  		_, file, line, _ := runtime.Caller(1)
   584  		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
   585  	}
   586  
   587  	if s.opts.numServerWorkers > 0 {
   588  		s.initServerWorkers()
   589  	}
   590  
   591  	if channelz.IsOn() {
   592  		s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
   593  	}
   594  	return s
   595  }
   596  
   597  // printf records an event in s's event log, unless s has been stopped.
   598  // REQUIRES s.mu is held.
   599  func (s *Server) printf(format string, a ...interface{}) {
   600  	if s.events != nil {
   601  		s.events.Printf(format, a...)
   602  	}
   603  }
   604  
   605  // errorf records an error in s's event log, unless s has been stopped.
   606  // REQUIRES s.mu is held.
   607  func (s *Server) errorf(format string, a ...interface{}) {
   608  	if s.events != nil {
   609  		s.events.Errorf(format, a...)
   610  	}
   611  }
   612  
   613  // ServiceRegistrar wraps a single method that supports service registration. It
   614  // enables users to pass concrete types other than grpc.Server to the service
   615  // registration methods exported by the IDL generated code.
   616  type ServiceRegistrar interface {
   617  	// RegisterService registers a service and its implementation to the
   618  	// concrete type implementing this interface.  It may not be called
   619  	// once the server has started serving.
   620  	// desc describes the service and its methods and handlers. impl is the
   621  	// service implementation which is passed to the method handlers.
   622  	RegisterService(desc *ServiceDesc, impl interface{})
   623  }
   624  
   625  // RegisterService registers a service and its implementation to the gRPC
   626  // server. It is called from the IDL generated code. This must be called before
   627  // invoking Serve. If ss is non-nil (for legacy code), its type is checked to
   628  // ensure it implements sd.HandlerType.
   629  func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
   630  	if ss != nil {
   631  		ht := reflect.TypeOf(sd.HandlerType).Elem()
   632  		st := reflect.TypeOf(ss)
   633  		if !st.Implements(ht) {
   634  			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
   635  		}
   636  	}
   637  	s.register(sd, ss)
   638  }
   639  
   640  func (s *Server) register(sd *ServiceDesc, ss interface{}) {
   641  	s.mu.Lock()
   642  	defer s.mu.Unlock()
   643  	s.printf("RegisterService(%q)", sd.ServiceName)
   644  	if s.serve {
   645  		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
   646  	}
   647  	if _, ok := s.services[sd.ServiceName]; ok {
   648  		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
   649  	}
   650  	info := &serviceInfo{
   651  		serviceImpl: ss,
   652  		methods:     make(map[string]*MethodDesc),
   653  		streams:     make(map[string]*StreamDesc),
   654  		mdata:       sd.Metadata,
   655  	}
   656  	for i := range sd.Methods {
   657  		d := &sd.Methods[i]
   658  		info.methods[d.MethodName] = d
   659  	}
   660  	for i := range sd.Streams {
   661  		d := &sd.Streams[i]
   662  		info.streams[d.StreamName] = d
   663  	}
   664  	s.services[sd.ServiceName] = info
   665  }
   666  
   667  // MethodInfo contains the information of an RPC including its method name and type.
   668  type MethodInfo struct {
   669  	// Name is the method name only, without the service name or package name.
   670  	Name string
   671  	// IsClientStream indicates whether the RPC is a client streaming RPC.
   672  	IsClientStream bool
   673  	// IsServerStream indicates whether the RPC is a server streaming RPC.
   674  	IsServerStream bool
   675  }
   676  
   677  // ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
   678  type ServiceInfo struct {
   679  	Methods []MethodInfo
   680  	// Metadata is the metadata specified in ServiceDesc when registering service.
   681  	Metadata interface{}
   682  }
   683  
   684  // GetServiceInfo returns a map from service names to ServiceInfo.
   685  // Service names include the package names, in the form of <package>.<service>.
   686  func (s *Server) GetServiceInfo() map[string]ServiceInfo {
   687  	ret := make(map[string]ServiceInfo)
   688  	for n, srv := range s.services {
   689  		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
   690  		for m := range srv.methods {
   691  			methods = append(methods, MethodInfo{
   692  				Name:           m,
   693  				IsClientStream: false,
   694  				IsServerStream: false,
   695  			})
   696  		}
   697  		for m, d := range srv.streams {
   698  			methods = append(methods, MethodInfo{
   699  				Name:           m,
   700  				IsClientStream: d.ClientStreams,
   701  				IsServerStream: d.ServerStreams,
   702  			})
   703  		}
   704  
   705  		ret[n] = ServiceInfo{
   706  			Methods:  methods,
   707  			Metadata: srv.mdata,
   708  		}
   709  	}
   710  	return ret
   711  }
   712  
   713  // ErrServerStopped indicates that the operation is now illegal because of
   714  // the server being stopped.
   715  var ErrServerStopped = errors.New("grpc: the server has been stopped")
   716  
   717  type listenSocket struct {
   718  	net.Listener
   719  	channelzID int64
   720  }
   721  
   722  func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
   723  	return &channelz.SocketInternalMetric{
   724  		SocketOptions: channelz.GetSocketOption(l.Listener),
   725  		LocalAddr:     l.Listener.Addr(),
   726  	}
   727  }
   728  
   729  func (l *listenSocket) Close() error {
   730  	err := l.Listener.Close()
   731  	if channelz.IsOn() {
   732  		channelz.RemoveEntry(l.channelzID)
   733  	}
   734  	return err
   735  }
   736  
   737  // Serve accepts incoming connections on the listener lis, creating a new
   738  // ServerTransport and service goroutine for each. The service goroutines
   739  // read gRPC requests and then call the registered handlers to reply to them.
   740  // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
   741  // this method returns.
   742  // Serve will return a non-nil error unless Stop or GracefulStop is called.
   743  func (s *Server) Serve(lis net.Listener) error {
   744  	s.mu.Lock()
   745  	s.printf("serving")
   746  	s.serve = true
   747  	if s.lis == nil {
   748  		// Serve called after Stop or GracefulStop.
   749  		s.mu.Unlock()
   750  		_ = lis.Close()
   751  		return ErrServerStopped
   752  	}
   753  
   754  	s.serveWG.Add(1)
   755  	defer func() {
   756  		s.serveWG.Done()
   757  		if s.quit.HasFired() {
   758  			// Stop or GracefulStop called; block until done and return nil.
   759  			<-s.done.Done()
   760  		}
   761  	}()
   762  
   763  	ls := &listenSocket{Listener: lis}
   764  	s.lis[ls] = true
   765  
   766  	if channelz.IsOn() {
   767  		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
   768  	}
   769  	s.mu.Unlock()
   770  
   771  	defer func() {
   772  		s.mu.Lock()
   773  		if s.lis != nil && s.lis[ls] {
   774  			_ = ls.Close()
   775  			delete(s.lis, ls)
   776  		}
   777  		s.mu.Unlock()
   778  	}()
   779  
   780  	var tempDelay time.Duration // how long to sleep on accept failure
   781  
   782  	for {
   783  		rawConn, err := lis.Accept()
   784  		if err != nil {
   785  			if ne, ok := err.(interface {
   786  				Temporary() bool
   787  			}); ok && ne.Temporary() {
   788  				if tempDelay == 0 {
   789  					tempDelay = 5 * time.Millisecond
   790  				} else {
   791  					tempDelay *= 2
   792  				}
   793  				if max := 1 * time.Second; tempDelay > max {
   794  					tempDelay = max
   795  				}
   796  				s.mu.Lock()
   797  				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
   798  				s.mu.Unlock()
   799  				timer := time.NewTimer(tempDelay)
   800  				select {
   801  				case <-timer.C:
   802  				case <-s.quit.Done():
   803  					timer.Stop()
   804  					return nil
   805  				}
   806  				continue
   807  			}
   808  			s.mu.Lock()
   809  			s.printf("done serving; Accept = %v", err)
   810  			s.mu.Unlock()
   811  
   812  			if s.quit.HasFired() {
   813  				return nil
   814  			}
   815  			return err
   816  		}
   817  		tempDelay = 0
   818  		// Start a new goroutine to deal with rawConn so we don't stall this Accept
   819  		// loop goroutine.
   820  		//
   821  		// Make sure we account for the goroutine so GracefulStop doesn't nil out
   822  		// s.conns before this conn can be added.
   823  		s.serveWG.Add(1)
   824  		go func() {
   825  			s.handleRawConn(lis.Addr().String(), rawConn)
   826  			s.serveWG.Done()
   827  		}()
   828  	}
   829  }
   830  
   831  // handleRawConn forks a goroutine to handle a just-accepted connection that
   832  // has not had any I/O performed on it yet.
   833  func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
   834  	if s.quit.HasFired() {
   835  		_ = rawConn.Close()
   836  		return
   837  	}
   838  	_ = rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
   839  
   840  	// Finish handshaking (HTTP2)
   841  	st := s.newHTTP2Transport(rawConn)
   842  	_ = rawConn.SetDeadline(time.Time{})
   843  	if st == nil {
   844  		return
   845  	}
   846  
   847  	if !s.addConn(lisAddr, st) {
   848  		return
   849  	}
   850  	go func() {
   851  		s.serveStreams(st)
   852  		s.removeConn(lisAddr, st)
   853  	}()
   854  }
   855  
   856  func (s *Server) drainServerTransports(addr string) {
   857  	s.mu.Lock()
   858  	conns := s.conns[addr]
   859  	for st := range conns {
   860  		st.Drain()
   861  	}
   862  	s.mu.Unlock()
   863  }
   864  
   865  // newHTTP2Transport sets up a http/2 transport (using the
   866  // gRPC http2 server transport in transport/http2_server.go).
   867  func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
   868  	config := &transport.ServerConfig{
   869  		MaxStreams:            s.opts.maxConcurrentStreams,
   870  		ConnectionTimeout:     s.opts.connectionTimeout,
   871  		Credentials:           s.opts.creds,
   872  		InTapHandle:           s.opts.inTapHandle,
   873  		StatsHandler:          s.opts.statsHandler,
   874  		KeepaliveParams:       s.opts.keepaliveParams,
   875  		KeepalivePolicy:       s.opts.keepalivePolicy,
   876  		InitialWindowSize:     s.opts.initialWindowSize,
   877  		InitialConnWindowSize: s.opts.initialConnWindowSize,
   878  		WriteBufferSize:       s.opts.writeBufferSize,
   879  		ReadBufferSize:        s.opts.readBufferSize,
   880  		ChannelzParentID:      s.channelzID,
   881  		MaxHeaderListSize:     s.opts.maxHeaderListSize,
   882  		HeaderTableSize:       s.opts.headerTableSize,
   883  	}
   884  	st, err := transport.NewServerTransport(c, config)
   885  	if err != nil {
   886  		s.mu.Lock()
   887  		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
   888  		s.mu.Unlock()
   889  		// ErrConnDispatched means that the connection was dispatched away from
   890  		// gRPC; those connections should be left open.
   891  		if !errors.Is(err, credentials.ErrConnDispatched) {
   892  			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
   893  			if err != io.EOF {
   894  				channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
   895  			}
   896  			_ = c.Close()
   897  		}
   898  		return nil
   899  	}
   900  
   901  	return st
   902  }
   903  
   904  func (s *Server) serveStreams(st transport.ServerTransport) {
   905  	defer st.Close()
   906  	var wg sync.WaitGroup
   907  
   908  	var roundRobinCounter uint32
   909  	st.HandleStreams(func(stream *transport.Stream) {
   910  		wg.Add(1)
   911  		if s.opts.numServerWorkers > 0 {
   912  			data := &serverWorkerData{st: st, wg: &wg, stream: stream}
   913  			select {
   914  			case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
   915  			default:
   916  				// If all stream workers are busy, fallback to the default code path.
   917  				go func() {
   918  					s.handleStream(st, stream, s.traceInfo(st, stream))
   919  					wg.Done()
   920  				}()
   921  			}
   922  		} else {
   923  			go func() {
   924  				defer wg.Done()
   925  				s.handleStream(st, stream, s.traceInfo(st, stream))
   926  			}()
   927  		}
   928  	}, func(ctx context.Context, method string) context.Context {
   929  		if !EnableTracing {
   930  			return ctx
   931  		}
   932  		tr := trace.New("grpc.Recv."+methodFamily(method), method)
   933  		return trace.NewContext(ctx, tr)
   934  	})
   935  	wg.Wait()
   936  }
   937  
   938  var _ http.Handler = (*Server)(nil)
   939  
   940  // ServeHTTP implements the Go standard library's http.Handler
   941  // interface by responding to the gRPC request r, by looking up
   942  // the requested gRPC method in the gRPC server s.
   943  //
   944  // The provided HTTP request must have arrived on an HTTP/2
   945  // connection. When using the Go standard library's server,
   946  // practically this means that the Request must also have arrived
   947  // over TLS.
   948  //
   949  // To share one port (such as 443 for https) between gRPC and an
   950  // existing http.Handler, use a root http.Handler such as:
   951  //
   952  //   if r.ProtoMajor == 2 && strings.HasPrefix(
   953  //   	r.Header.Get("Content-Type"), "application/grpc") {
   954  //   	grpcServer.ServeHTTP(w, r)
   955  //   } else {
   956  //   	yourMux.ServeHTTP(w, r)
   957  //   }
   958  //
   959  // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
   960  // separate from grpc-go's HTTP/2 server. Performance and features may vary
   961  // between the two paths. ServeHTTP does not support some gRPC features
   962  // available through grpc-go's HTTP/2 server.
   963  //
   964  // Experimental
   965  //
   966  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   967  // later release.
   968  func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
   969  	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
   970  	if err != nil {
   971  		http.Error(w, err.Error(), http.StatusInternalServerError)
   972  		return
   973  	}
   974  	if !s.addConn(listenerAddressForServeHTTP, st) {
   975  		return
   976  	}
   977  	defer s.removeConn(listenerAddressForServeHTTP, st)
   978  	s.serveStreams(st)
   979  }
   980  
   981  // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
   982  // If tracing is not enabled, it returns nil.
   983  func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
   984  	if !EnableTracing {
   985  		return nil
   986  	}
   987  	tr, ok := trace.FromContext(stream.Context())
   988  	if !ok {
   989  		return nil
   990  	}
   991  
   992  	trInfo = &traceInfo{
   993  		tr: tr,
   994  		firstLine: firstLine{
   995  			client:     false,
   996  			remoteAddr: st.RemoteAddr(),
   997  		},
   998  	}
   999  	if dl, ok := stream.Context().Deadline(); ok {
  1000  		trInfo.firstLine.deadline = time.Until(dl)
  1001  	}
  1002  	return trInfo
  1003  }
  1004  
  1005  func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
  1006  	s.mu.Lock()
  1007  	defer s.mu.Unlock()
  1008  	if s.conns == nil {
  1009  		st.Close()
  1010  		return false
  1011  	}
  1012  	if s.drain {
  1013  		// Transport added after we drained our existing conns: drain it
  1014  		// immediately.
  1015  		st.Drain()
  1016  	}
  1017  
  1018  	if s.conns[addr] == nil {
  1019  		// Create a map entry if this is the first connection on this listener.
  1020  		s.conns[addr] = make(map[transport.ServerTransport]bool)
  1021  	}
  1022  	s.conns[addr][st] = true
  1023  	return true
  1024  }
  1025  
  1026  func (s *Server) removeConn(addr string, st transport.ServerTransport) {
  1027  	s.mu.Lock()
  1028  	defer s.mu.Unlock()
  1029  
  1030  	conns := s.conns[addr]
  1031  	if conns != nil {
  1032  		delete(conns, st)
  1033  		if len(conns) == 0 {
  1034  			// If the last connection for this address is being removed, also
  1035  			// remove the map entry corresponding to the address. This is used
  1036  			// in GracefulStop() when waiting for all connections to be closed.
  1037  			delete(s.conns, addr)
  1038  		}
  1039  		s.cv.Broadcast()
  1040  	}
  1041  }
  1042  
  1043  func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
  1044  	return &channelz.ServerInternalMetric{
  1045  		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
  1046  		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
  1047  		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
  1048  		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
  1049  	}
  1050  }
  1051  
  1052  func (s *Server) incrCallsStarted() {
  1053  	atomic.AddInt64(&s.czData.callsStarted, 1)
  1054  	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
  1055  }
  1056  
  1057  func (s *Server) incrCallsSucceeded() {
  1058  	atomic.AddInt64(&s.czData.callsSucceeded, 1)
  1059  }
  1060  
  1061  func (s *Server) incrCallsFailed() {
  1062  	atomic.AddInt64(&s.czData.callsFailed, 1)
  1063  }
  1064  
  1065  func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
  1066  	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
  1067  	if err != nil {
  1068  		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
  1069  		return err
  1070  	}
  1071  	compData, err := compress(data, cp, comp)
  1072  	if err != nil {
  1073  		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
  1074  		return err
  1075  	}
  1076  	hdr, payload := msgHeader(data, compData)
  1077  	// TODO(dfawley): should we be checking len(data) instead?
  1078  	if len(payload) > s.opts.maxSendMessageSize {
  1079  		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
  1080  	}
  1081  	err = t.Write(stream, hdr, payload, opts)
  1082  	if err == nil && s.opts.statsHandler != nil {
  1083  		s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
  1084  	}
  1085  	return err
  1086  }
  1087  
  1088  // chainUnaryServerInterceptors chains all unary server interceptors into one.
  1089  func chainUnaryServerInterceptors(s *Server) {
  1090  	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
  1091  	// be executed before any other chained interceptors.
  1092  	interceptors := s.opts.chainUnaryInts
  1093  	if s.opts.unaryInt != nil {
  1094  		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
  1095  	}
  1096  
  1097  	var chainedInt UnaryServerInterceptor
  1098  	if len(interceptors) == 0 {
  1099  		chainedInt = nil
  1100  	} else if len(interceptors) == 1 {
  1101  		chainedInt = interceptors[0]
  1102  	} else {
  1103  		chainedInt = chainUnaryInterceptors(interceptors)
  1104  	}
  1105  
  1106  	s.opts.unaryInt = chainedInt
  1107  }
  1108  
  1109  func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
  1110  	return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
  1111  		// the struct ensures the variables are allocated together, rather than separately, since we
  1112  		// know they should be garbage collected together. This saves 1 allocation and decreases
  1113  		// time/call by about 10% on the microbenchmark.
  1114  		var state struct {
  1115  			i    int
  1116  			next UnaryHandler
  1117  		}
  1118  		state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
  1119  			if state.i == len(interceptors)-1 {
  1120  				return interceptors[state.i](ctx, req, info, handler)
  1121  			}
  1122  			state.i++
  1123  			return interceptors[state.i-1](ctx, req, info, state.next)
  1124  		}
  1125  		return state.next(ctx, req)
  1126  	}
  1127  }
  1128  
  1129  func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
  1130  	sh := s.opts.statsHandler
  1131  	if sh != nil || trInfo != nil || channelz.IsOn() {
  1132  		if channelz.IsOn() {
  1133  			s.incrCallsStarted()
  1134  		}
  1135  		var statsBegin *stats.Begin
  1136  		if sh != nil {
  1137  			beginTime := time.Now()
  1138  			statsBegin = &stats.Begin{
  1139  				BeginTime:      beginTime,
  1140  				IsClientStream: false,
  1141  				IsServerStream: false,
  1142  			}
  1143  			sh.HandleRPC(stream.Context(), statsBegin)
  1144  		}
  1145  		if trInfo != nil {
  1146  			trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1147  		}
  1148  		// The deferred error handling for tracing, stats handler and channelz are
  1149  		// combined into one function to reduce stack usage -- a defer takes ~56-64
  1150  		// bytes on the stack, so overflowing the stack will require a stack
  1151  		// re-allocation, which is expensive.
  1152  		//
  1153  		// To maintain behavior similar to separate deferred statements, statements
  1154  		// should be executed in the reverse order. That is, tracing first, stats
  1155  		// handler second, and channelz last. Note that panics *within* defers will
  1156  		// lead to different behavior, but that's an acceptable compromise; that
  1157  		// would be undefined behavior territory anyway.
  1158  		defer func() {
  1159  			if trInfo != nil {
  1160  				if err != nil && err != io.EOF {
  1161  					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1162  					trInfo.tr.SetError()
  1163  				}
  1164  				trInfo.tr.Finish()
  1165  			}
  1166  
  1167  			if sh != nil {
  1168  				end := &stats.End{
  1169  					BeginTime: statsBegin.BeginTime,
  1170  					EndTime:   time.Now(),
  1171  				}
  1172  				if err != nil && err != io.EOF {
  1173  					end.Error = toRPCErr(err)
  1174  				}
  1175  				sh.HandleRPC(stream.Context(), end)
  1176  			}
  1177  
  1178  			if channelz.IsOn() {
  1179  				if err != nil && err != io.EOF {
  1180  					s.incrCallsFailed()
  1181  				} else {
  1182  					s.incrCallsSucceeded()
  1183  				}
  1184  			}
  1185  		}()
  1186  	}
  1187  
  1188  	binlog := binarylog.GetMethodLogger(stream.Method())
  1189  	if binlog != nil {
  1190  		ctx := stream.Context()
  1191  		md, _ := metadata.FromIncomingContext(ctx)
  1192  		logEntry := &binarylog.ClientHeader{
  1193  			Header:     md,
  1194  			MethodName: stream.Method(),
  1195  			PeerAddr:   nil,
  1196  		}
  1197  		if deadline, ok := ctx.Deadline(); ok {
  1198  			logEntry.Timeout = time.Until(deadline)
  1199  			if logEntry.Timeout < 0 {
  1200  				logEntry.Timeout = 0
  1201  			}
  1202  		}
  1203  		if a := md[":authority"]; len(a) > 0 {
  1204  			logEntry.Authority = a[0]
  1205  		}
  1206  		if pr, ok := peer.FromContext(ctx); ok {
  1207  			logEntry.PeerAddr = pr.Addr
  1208  		}
  1209  		binlog.Log(logEntry)
  1210  	}
  1211  
  1212  	// comp and cp are used for compression.  decomp and dc are used for
  1213  	// decompression.  If comp and decomp are both set, they are the same;
  1214  	// however they are kept separate to ensure that at most one of the
  1215  	// compressor/decompressor variable pairs are set for use later.
  1216  	var comp, decomp encoding.Compressor
  1217  	var cp Compressor
  1218  	var dc Decompressor
  1219  
  1220  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1221  	// to find a matching registered compressor for decomp.
  1222  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1223  		dc = s.opts.dc
  1224  	} else if rc != "" && rc != encoding.Identity {
  1225  		decomp = encoding.GetCompressor(rc)
  1226  		if decomp == nil {
  1227  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1228  			_ = t.WriteStatus(stream, st)
  1229  			return st.Err()
  1230  		}
  1231  	}
  1232  
  1233  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1234  	// the incoming message compression method.
  1235  	//
  1236  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1237  	if s.opts.cp != nil {
  1238  		cp = s.opts.cp
  1239  		stream.SetSendCompress(cp.Type())
  1240  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1241  		// Legacy compressor not specified; attempt to respond with same encoding.
  1242  		comp = encoding.GetCompressor(rc)
  1243  		if comp != nil {
  1244  			stream.SetSendCompress(rc)
  1245  		}
  1246  	}
  1247  
  1248  	var payInfo *payloadInfo
  1249  	if sh != nil || binlog != nil {
  1250  		payInfo = &payloadInfo{}
  1251  	}
  1252  	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
  1253  	if err != nil {
  1254  		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
  1255  			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
  1256  		}
  1257  		return err
  1258  	}
  1259  	if channelz.IsOn() {
  1260  		t.IncrMsgRecv()
  1261  	}
  1262  	df := func(v interface{}) error {
  1263  		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
  1264  			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
  1265  		}
  1266  		if sh != nil {
  1267  			sh.HandleRPC(stream.Context(), &stats.InPayload{
  1268  				RecvTime:   time.Now(),
  1269  				Payload:    v,
  1270  				WireLength: payInfo.wireLength + headerLen,
  1271  				Data:       d,
  1272  				Length:     len(d),
  1273  			})
  1274  		}
  1275  		if binlog != nil {
  1276  			binlog.Log(&binarylog.ClientMessage{
  1277  				Message: d,
  1278  			})
  1279  		}
  1280  		if trInfo != nil {
  1281  			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
  1282  		}
  1283  		return nil
  1284  	}
  1285  	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
  1286  	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
  1287  	if appErr != nil {
  1288  		appStatus, ok := status.FromError(appErr)
  1289  		if !ok {
  1290  			// Convert appErr if it is not a grpc status error.
  1291  			appErr = status.Error(codes.Unknown, appErr.Error())
  1292  			appStatus, _ = status.FromError(appErr)
  1293  		}
  1294  		if trInfo != nil {
  1295  			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1296  			trInfo.tr.SetError()
  1297  		}
  1298  		if e := t.WriteStatus(stream, appStatus); e != nil {
  1299  			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1300  		}
  1301  		if binlog != nil {
  1302  			if h, _ := stream.Header(); h.Len() > 0 {
  1303  				// Only log serverHeader if there was header. Otherwise it can
  1304  				// be trailer only.
  1305  				binlog.Log(&binarylog.ServerHeader{
  1306  					Header: h,
  1307  				})
  1308  			}
  1309  			binlog.Log(&binarylog.ServerTrailer{
  1310  				Trailer: stream.Trailer(),
  1311  				Err:     appErr,
  1312  			})
  1313  		}
  1314  		return appErr
  1315  	}
  1316  	if trInfo != nil {
  1317  		trInfo.tr.LazyLog(stringer("OK"), false)
  1318  	}
  1319  	opts := &transport.Options{Last: true}
  1320  
  1321  	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
  1322  		if err == io.EOF {
  1323  			// The entire stream is done (for unary RPC only).
  1324  			return err
  1325  		}
  1326  		if sts, ok := status.FromError(err); ok {
  1327  			if e := t.WriteStatus(stream, sts); e != nil {
  1328  				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1329  			}
  1330  		} else {
  1331  			var st transport.ConnectionError
  1332  			switch {
  1333  			case errors.As(err, &st):
  1334  				// Nothing to do here.
  1335  			default:
  1336  				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
  1337  			}
  1338  		}
  1339  		if binlog != nil {
  1340  			h, _ := stream.Header()
  1341  			binlog.Log(&binarylog.ServerHeader{
  1342  				Header: h,
  1343  			})
  1344  			binlog.Log(&binarylog.ServerTrailer{
  1345  				Trailer: stream.Trailer(),
  1346  				Err:     appErr,
  1347  			})
  1348  		}
  1349  		return err
  1350  	}
  1351  	if binlog != nil {
  1352  		h, _ := stream.Header()
  1353  		binlog.Log(&binarylog.ServerHeader{
  1354  			Header: h,
  1355  		})
  1356  		binlog.Log(&binarylog.ServerMessage{
  1357  			Message: reply,
  1358  		})
  1359  	}
  1360  	if channelz.IsOn() {
  1361  		t.IncrMsgSent()
  1362  	}
  1363  	if trInfo != nil {
  1364  		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
  1365  	}
  1366  	// TODO: Should we be logging if writing status failed here, like above?
  1367  	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
  1368  	// error or allow the stats handler to see it?
  1369  	err = t.WriteStatus(stream, statusOK)
  1370  	if binlog != nil {
  1371  		binlog.Log(&binarylog.ServerTrailer{
  1372  			Trailer: stream.Trailer(),
  1373  			Err:     appErr,
  1374  		})
  1375  	}
  1376  	return err
  1377  }
  1378  
  1379  // chainStreamServerInterceptors chains all stream server interceptors into one.
  1380  func chainStreamServerInterceptors(s *Server) {
  1381  	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
  1382  	// be executed before any other chained interceptors.
  1383  	interceptors := s.opts.chainStreamInts
  1384  	if s.opts.streamInt != nil {
  1385  		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
  1386  	}
  1387  
  1388  	var chainedInt StreamServerInterceptor
  1389  	if len(interceptors) == 0 {
  1390  		chainedInt = nil
  1391  	} else if len(interceptors) == 1 {
  1392  		chainedInt = interceptors[0]
  1393  	} else {
  1394  		chainedInt = chainStreamInterceptors(interceptors)
  1395  	}
  1396  
  1397  	s.opts.streamInt = chainedInt
  1398  }
  1399  
  1400  func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
  1401  	return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
  1402  		// the struct ensures the variables are allocated together, rather than separately, since we
  1403  		// know they should be garbage collected together. This saves 1 allocation and decreases
  1404  		// time/call by about 10% on the microbenchmark.
  1405  		var state struct {
  1406  			i    int
  1407  			next StreamHandler
  1408  		}
  1409  		state.next = func(srv interface{}, ss ServerStream) error {
  1410  			if state.i == len(interceptors)-1 {
  1411  				return interceptors[state.i](srv, ss, info, handler)
  1412  			}
  1413  			state.i++
  1414  			return interceptors[state.i-1](srv, ss, info, state.next)
  1415  		}
  1416  		return state.next(srv, ss)
  1417  	}
  1418  }
  1419  
  1420  func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
  1421  	if channelz.IsOn() {
  1422  		s.incrCallsStarted()
  1423  	}
  1424  	sh := s.opts.statsHandler
  1425  	var statsBegin *stats.Begin
  1426  	if sh != nil {
  1427  		beginTime := time.Now()
  1428  		statsBegin = &stats.Begin{
  1429  			BeginTime:      beginTime,
  1430  			IsClientStream: sd.ClientStreams,
  1431  			IsServerStream: sd.ServerStreams,
  1432  		}
  1433  		sh.HandleRPC(stream.Context(), statsBegin)
  1434  	}
  1435  	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
  1436  	ss := &serverStream{
  1437  		ctx:                   ctx,
  1438  		t:                     t,
  1439  		s:                     stream,
  1440  		p:                     &parser{r: stream},
  1441  		codec:                 s.getCodec(stream.ContentSubtype()),
  1442  		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
  1443  		maxSendMessageSize:    s.opts.maxSendMessageSize,
  1444  		trInfo:                trInfo,
  1445  		statsHandler:          sh,
  1446  	}
  1447  
  1448  	if sh != nil || trInfo != nil || channelz.IsOn() {
  1449  		// See comment in processUnaryRPC on defers.
  1450  		defer func() {
  1451  			if trInfo != nil {
  1452  				ss.mu.Lock()
  1453  				if err != nil && err != io.EOF {
  1454  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1455  					ss.trInfo.tr.SetError()
  1456  				}
  1457  				ss.trInfo.tr.Finish()
  1458  				ss.trInfo.tr = nil
  1459  				ss.mu.Unlock()
  1460  			}
  1461  
  1462  			if sh != nil {
  1463  				end := &stats.End{
  1464  					BeginTime: statsBegin.BeginTime,
  1465  					EndTime:   time.Now(),
  1466  				}
  1467  				if err != nil && err != io.EOF {
  1468  					end.Error = toRPCErr(err)
  1469  				}
  1470  				sh.HandleRPC(stream.Context(), end)
  1471  			}
  1472  
  1473  			if channelz.IsOn() {
  1474  				if err != nil && err != io.EOF {
  1475  					s.incrCallsFailed()
  1476  				} else {
  1477  					s.incrCallsSucceeded()
  1478  				}
  1479  			}
  1480  		}()
  1481  	}
  1482  
  1483  	ss.binlog = binarylog.GetMethodLogger(stream.Method())
  1484  	if ss.binlog != nil {
  1485  		md, _ := metadata.FromIncomingContext(ctx)
  1486  		logEntry := &binarylog.ClientHeader{
  1487  			Header:     md,
  1488  			MethodName: stream.Method(),
  1489  			PeerAddr:   nil,
  1490  		}
  1491  		if deadline, ok := ctx.Deadline(); ok {
  1492  			logEntry.Timeout = time.Until(deadline)
  1493  			if logEntry.Timeout < 0 {
  1494  				logEntry.Timeout = 0
  1495  			}
  1496  		}
  1497  		if a := md[":authority"]; len(a) > 0 {
  1498  			logEntry.Authority = a[0]
  1499  		}
  1500  		if pr, ok := peer.FromContext(ss.Context()); ok {
  1501  			logEntry.PeerAddr = pr.Addr
  1502  		}
  1503  		ss.binlog.Log(logEntry)
  1504  	}
  1505  
  1506  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1507  	// to find a matching registered compressor for decomp.
  1508  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1509  		ss.dc = s.opts.dc
  1510  	} else if rc != "" && rc != encoding.Identity {
  1511  		ss.decomp = encoding.GetCompressor(rc)
  1512  		if ss.decomp == nil {
  1513  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1514  			_ = t.WriteStatus(ss.s, st)
  1515  			return st.Err()
  1516  		}
  1517  	}
  1518  
  1519  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1520  	// the incoming message compression method.
  1521  	//
  1522  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1523  	if s.opts.cp != nil {
  1524  		ss.cp = s.opts.cp
  1525  		stream.SetSendCompress(s.opts.cp.Type())
  1526  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1527  		// Legacy compressor not specified; attempt to respond with same encoding.
  1528  		ss.comp = encoding.GetCompressor(rc)
  1529  		if ss.comp != nil {
  1530  			stream.SetSendCompress(rc)
  1531  		}
  1532  	}
  1533  
  1534  	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
  1535  
  1536  	if trInfo != nil {
  1537  		trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1538  	}
  1539  	var appErr error
  1540  	var server interface{}
  1541  	if info != nil {
  1542  		server = info.serviceImpl
  1543  	}
  1544  	if s.opts.streamInt == nil {
  1545  		appErr = sd.Handler(server, ss)
  1546  	} else {
  1547  		info := &StreamServerInfo{
  1548  			FullMethod:     stream.Method(),
  1549  			IsClientStream: sd.ClientStreams,
  1550  			IsServerStream: sd.ServerStreams,
  1551  		}
  1552  		appErr = s.opts.streamInt(server, ss, info, sd.Handler)
  1553  	}
  1554  	if appErr != nil {
  1555  		appStatus, ok := status.FromError(appErr)
  1556  		if !ok {
  1557  			appStatus = status.New(codes.Unknown, appErr.Error())
  1558  			appErr = appStatus.Err()
  1559  		}
  1560  		if trInfo != nil {
  1561  			ss.mu.Lock()
  1562  			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1563  			ss.trInfo.tr.SetError()
  1564  			ss.mu.Unlock()
  1565  		}
  1566  		_ = t.WriteStatus(ss.s, appStatus)
  1567  		if ss.binlog != nil {
  1568  			ss.binlog.Log(&binarylog.ServerTrailer{
  1569  				Trailer: ss.s.Trailer(),
  1570  				Err:     appErr,
  1571  			})
  1572  		}
  1573  		// TODO: Should we log an error from WriteStatus here and below?
  1574  		return appErr
  1575  	}
  1576  	if trInfo != nil {
  1577  		ss.mu.Lock()
  1578  		ss.trInfo.tr.LazyLog(stringer("OK"), false)
  1579  		ss.mu.Unlock()
  1580  	}
  1581  	err = t.WriteStatus(ss.s, statusOK)
  1582  	if ss.binlog != nil {
  1583  		ss.binlog.Log(&binarylog.ServerTrailer{
  1584  			Trailer: ss.s.Trailer(),
  1585  			Err:     appErr,
  1586  		})
  1587  	}
  1588  	return err
  1589  }
  1590  
  1591  func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
  1592  	sm := stream.Method()
  1593  	if sm != "" && sm[0] == '/' {
  1594  		sm = sm[1:]
  1595  	}
  1596  	pos := strings.LastIndex(sm, "/")
  1597  	if pos == -1 {
  1598  		if trInfo != nil {
  1599  			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
  1600  			trInfo.tr.SetError()
  1601  		}
  1602  		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
  1603  		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
  1604  			if trInfo != nil {
  1605  				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1606  				trInfo.tr.SetError()
  1607  			}
  1608  			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
  1609  		}
  1610  		if trInfo != nil {
  1611  			trInfo.tr.Finish()
  1612  		}
  1613  		return
  1614  	}
  1615  	service := sm[:pos]
  1616  	method := sm[pos+1:]
  1617  
  1618  	srv, knownService := s.services[service]
  1619  	if knownService {
  1620  		if md, ok := srv.methods[method]; ok {
  1621  			_ = s.processUnaryRPC(t, stream, srv, md, trInfo)
  1622  			return
  1623  		}
  1624  		if sd, ok := srv.streams[method]; ok {
  1625  			_ = s.processStreamingRPC(t, stream, srv, sd, trInfo)
  1626  			return
  1627  		}
  1628  	}
  1629  	// Unknown service, or known server unknown method.
  1630  	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
  1631  		_ = s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
  1632  		return
  1633  	}
  1634  	var errDesc string
  1635  	if !knownService {
  1636  		errDesc = fmt.Sprintf("unknown service %v", service)
  1637  	} else {
  1638  		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
  1639  	}
  1640  	if trInfo != nil {
  1641  		trInfo.tr.LazyPrintf("%s", errDesc)
  1642  		trInfo.tr.SetError()
  1643  	}
  1644  	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
  1645  		if trInfo != nil {
  1646  			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1647  			trInfo.tr.SetError()
  1648  		}
  1649  		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
  1650  	}
  1651  	if trInfo != nil {
  1652  		trInfo.tr.Finish()
  1653  	}
  1654  }
  1655  
  1656  // The key to save ServerTransportStream in the context.
  1657  type streamKey struct{}
  1658  
  1659  // NewContextWithServerTransportStream creates a new context from ctx and
  1660  // attaches stream to it.
  1661  //
  1662  // Experimental
  1663  //
  1664  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1665  // later release.
  1666  func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
  1667  	return context.WithValue(ctx, streamKey{}, stream)
  1668  }
  1669  
  1670  // ServerTransportStream is a minimal interface that a transport stream must
  1671  // implement. This can be used to mock an actual transport stream for tests of
  1672  // handler code that use, for example, grpc.SetHeader (which requires some
  1673  // stream to be in context).
  1674  //
  1675  // See also NewContextWithServerTransportStream.
  1676  //
  1677  // Experimental
  1678  //
  1679  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
  1680  // later release.
  1681  type ServerTransportStream interface {
  1682  	Method() string
  1683  	SetHeader(md metadata.MD) error
  1684  	SendHeader(md metadata.MD) error
  1685  	SetTrailer(md metadata.MD) error
  1686  }
  1687  
  1688  // ServerTransportStreamFromContext returns the ServerTransportStream saved in
  1689  // ctx. Returns nil if the given context has no stream associated with it
  1690  // (which implies it is not an RPC invocation context).
  1691  //
  1692  // Experimental
  1693  //
  1694  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1695  // later release.
  1696  func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
  1697  	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
  1698  	return s
  1699  }
  1700  
  1701  // Stop stops the gRPC server. It immediately closes all open
  1702  // connections and listeners.
  1703  // It cancels all active RPCs on the server side and the corresponding
  1704  // pending RPCs on the client side will get notified by connection
  1705  // errors.
  1706  func (s *Server) Stop() {
  1707  	s.quit.Fire()
  1708  
  1709  	defer func() {
  1710  		s.serveWG.Wait()
  1711  		s.done.Fire()
  1712  	}()
  1713  
  1714  	s.channelzRemoveOnce.Do(func() {
  1715  		if channelz.IsOn() {
  1716  			channelz.RemoveEntry(s.channelzID)
  1717  		}
  1718  	})
  1719  
  1720  	s.mu.Lock()
  1721  	listeners := s.lis
  1722  	s.lis = nil
  1723  	conns := s.conns
  1724  	s.conns = nil
  1725  	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
  1726  	s.cv.Broadcast()
  1727  	s.mu.Unlock()
  1728  
  1729  	for lis := range listeners {
  1730  		_ = lis.Close()
  1731  	}
  1732  	for _, cs := range conns {
  1733  		for st := range cs {
  1734  			st.Close()
  1735  		}
  1736  	}
  1737  	if s.opts.numServerWorkers > 0 {
  1738  		s.stopServerWorkers()
  1739  	}
  1740  
  1741  	s.mu.Lock()
  1742  	if s.events != nil {
  1743  		s.events.Finish()
  1744  		s.events = nil
  1745  	}
  1746  	s.mu.Unlock()
  1747  }
  1748  
  1749  // GracefulStop stops the gRPC server gracefully. It stops the server from
  1750  // accepting new connections and RPCs and blocks until all the pending RPCs are
  1751  // finished.
  1752  func (s *Server) GracefulStop() {
  1753  	s.quit.Fire()
  1754  	defer s.done.Fire()
  1755  
  1756  	s.channelzRemoveOnce.Do(func() {
  1757  		if channelz.IsOn() {
  1758  			channelz.RemoveEntry(s.channelzID)
  1759  		}
  1760  	})
  1761  	s.mu.Lock()
  1762  	if s.conns == nil {
  1763  		s.mu.Unlock()
  1764  		return
  1765  	}
  1766  
  1767  	for lis := range s.lis {
  1768  		_ = lis.Close()
  1769  	}
  1770  	s.lis = nil
  1771  	if !s.drain {
  1772  		for _, conns := range s.conns {
  1773  			for st := range conns {
  1774  				st.Drain()
  1775  			}
  1776  		}
  1777  		s.drain = true
  1778  	}
  1779  
  1780  	// Wait for serving threads to be ready to exit.  Only then can we be sure no
  1781  	// new conns will be created.
  1782  	s.mu.Unlock()
  1783  	s.serveWG.Wait()
  1784  	s.mu.Lock()
  1785  
  1786  	for len(s.conns) != 0 {
  1787  		s.cv.Wait()
  1788  	}
  1789  	s.conns = nil
  1790  	if s.events != nil {
  1791  		s.events.Finish()
  1792  		s.events = nil
  1793  	}
  1794  	s.mu.Unlock()
  1795  }
  1796  
  1797  // contentSubtype must be lowercase
  1798  // cannot return nil
  1799  func (s *Server) getCodec(contentSubtype string) baseCodec {
  1800  	if s.opts.codec != nil {
  1801  		return s.opts.codec
  1802  	}
  1803  	if contentSubtype == "" {
  1804  		return encoding.GetCodec(proto.Name)
  1805  	}
  1806  	codec := encoding.GetCodec(contentSubtype)
  1807  	if codec == nil {
  1808  		return encoding.GetCodec(proto.Name)
  1809  	}
  1810  	return codec
  1811  }
  1812  
  1813  // SetHeader sets the header metadata.
  1814  // When called multiple times, all the provided metadata will be merged.
  1815  // All the metadata will be sent out when one of the following happens:
  1816  //  - grpc.SendHeader() is called;
  1817  //  - The first response is sent out;
  1818  //  - An RPC status is sent out (error or success).
  1819  func SetHeader(ctx context.Context, md metadata.MD) error {
  1820  	if md.Len() == 0 {
  1821  		return nil
  1822  	}
  1823  	stream := ServerTransportStreamFromContext(ctx)
  1824  	if stream == nil {
  1825  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1826  	}
  1827  	return stream.SetHeader(md)
  1828  }
  1829  
  1830  // SendHeader sends header metadata. It may be called at most once.
  1831  // The provided md and headers set by SetHeader() will be sent.
  1832  func SendHeader(ctx context.Context, md metadata.MD) error {
  1833  	stream := ServerTransportStreamFromContext(ctx)
  1834  	if stream == nil {
  1835  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1836  	}
  1837  	if err := stream.SendHeader(md); err != nil {
  1838  		return toRPCErr(err)
  1839  	}
  1840  	return nil
  1841  }
  1842  
  1843  // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
  1844  // When called more than once, all the provided metadata will be merged.
  1845  func SetTrailer(ctx context.Context, md metadata.MD) error {
  1846  	if md.Len() == 0 {
  1847  		return nil
  1848  	}
  1849  	stream := ServerTransportStreamFromContext(ctx)
  1850  	if stream == nil {
  1851  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1852  	}
  1853  	return stream.SetTrailer(md)
  1854  }
  1855  
  1856  // Method returns the method string for the server context.  The returned
  1857  // string is in the format of "/service/method".
  1858  func Method(ctx context.Context) (string, bool) {
  1859  	s := ServerTransportStreamFromContext(ctx)
  1860  	if s == nil {
  1861  		return "", false
  1862  	}
  1863  	return s.Method(), true
  1864  }
  1865  
  1866  type channelzServer struct {
  1867  	s *Server
  1868  }
  1869  
  1870  func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
  1871  	return c.s.channelzMetric()
  1872  }