gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/server.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package grpc
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"math"
    27  	"net"
    28  	"reflect"
    29  	"runtime"
    30  	"strings"
    31  	"sync"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	http "gitee.com/ks-custle/core-gm/gmhttp"
    36  
    37  	"gitee.com/ks-custle/core-gm/net/trace"
    38  
    39  	"gitee.com/ks-custle/core-gm/grpc/codes"
    40  	"gitee.com/ks-custle/core-gm/grpc/credentials"
    41  	"gitee.com/ks-custle/core-gm/grpc/encoding"
    42  	"gitee.com/ks-custle/core-gm/grpc/encoding/proto"
    43  	"gitee.com/ks-custle/core-gm/grpc/grpclog"
    44  	"gitee.com/ks-custle/core-gm/grpc/internal"
    45  	"gitee.com/ks-custle/core-gm/grpc/internal/binarylog"
    46  	"gitee.com/ks-custle/core-gm/grpc/internal/channelz"
    47  	"gitee.com/ks-custle/core-gm/grpc/internal/grpcrand"
    48  	"gitee.com/ks-custle/core-gm/grpc/internal/grpcsync"
    49  	"gitee.com/ks-custle/core-gm/grpc/internal/transport"
    50  	"gitee.com/ks-custle/core-gm/grpc/keepalive"
    51  	"gitee.com/ks-custle/core-gm/grpc/metadata"
    52  	"gitee.com/ks-custle/core-gm/grpc/peer"
    53  	"gitee.com/ks-custle/core-gm/grpc/stats"
    54  	"gitee.com/ks-custle/core-gm/grpc/status"
    55  	"gitee.com/ks-custle/core-gm/grpc/tap"
    56  )
    57  
    58  const (
    59  	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
    60  	defaultServerMaxSendMessageSize    = math.MaxInt32
    61  
    62  	// Server transports are tracked in a map which is keyed on listener
    63  	// address. For regular gRPC traffic, connections are accepted in Serve()
    64  	// through a call to Accept(), and we use the actual listener address as key
    65  	// when we add it to the map. But for connections received through
    66  	// ServeHTTP(), we do not have a listener and hence use this dummy value.
    67  	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
    68  )
    69  
    70  func init() {
    71  	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
    72  		return srv.opts.creds
    73  	}
    74  	internal.DrainServerTransports = func(srv *Server, addr string) {
    75  		srv.drainServerTransports(addr)
    76  	}
    77  }
    78  
    79  var statusOK = status.New(codes.OK, "")
    80  var logger = grpclog.Component("core")
    81  
    82  type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
    83  
    84  // MethodDesc represents an RPC service's method specification.
    85  type MethodDesc struct {
    86  	MethodName string
    87  	Handler    methodHandler
    88  }
    89  
    90  // ServiceDesc represents an RPC service's specification.
    91  type ServiceDesc struct {
    92  	ServiceName string
    93  	// The pointer to the service interface. Used to check whether the user
    94  	// provided implementation satisfies the interface requirements.
    95  	HandlerType interface{}
    96  	Methods     []MethodDesc
    97  	Streams     []StreamDesc
    98  	Metadata    interface{}
    99  }
   100  
   101  // serviceInfo wraps information about a service. It is very similar to
   102  // ServiceDesc and is constructed from it for internal purposes.
   103  type serviceInfo struct {
   104  	// Contains the implementation for the methods in this service.
   105  	serviceImpl interface{}
   106  	methods     map[string]*MethodDesc
   107  	streams     map[string]*StreamDesc
   108  	mdata       interface{}
   109  }
   110  
   111  type serverWorkerData struct {
   112  	st     transport.ServerTransport
   113  	wg     *sync.WaitGroup
   114  	stream *transport.Stream
   115  }
   116  
   117  // Server is a gRPC server to serve RPC requests.
   118  type Server struct {
   119  	opts serverOptions
   120  
   121  	mu  sync.Mutex // guards following
   122  	lis map[net.Listener]bool
   123  	// conns contains all active server transports. It is a map keyed on a
   124  	// listener address with the value being the set of active transports
   125  	// belonging to that listener.
   126  	conns    map[string]map[transport.ServerTransport]bool
   127  	serve    bool
   128  	drain    bool
   129  	cv       *sync.Cond              // signaled when connections close for GracefulStop
   130  	services map[string]*serviceInfo // service name -> service info
   131  	events   trace.EventLog
   132  
   133  	quit               *grpcsync.Event
   134  	done               *grpcsync.Event
   135  	channelzRemoveOnce sync.Once
   136  	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
   137  
   138  	channelzID int64 // channelz unique identification number
   139  	czData     *channelzData
   140  
   141  	serverWorkerChannels []chan *serverWorkerData
   142  }
   143  
   144  type serverOptions struct {
   145  	creds                 credentials.TransportCredentials
   146  	codec                 baseCodec
   147  	cp                    Compressor
   148  	dc                    Decompressor
   149  	unaryInt              UnaryServerInterceptor
   150  	streamInt             StreamServerInterceptor
   151  	chainUnaryInts        []UnaryServerInterceptor
   152  	chainStreamInts       []StreamServerInterceptor
   153  	inTapHandle           tap.ServerInHandle
   154  	statsHandler          stats.Handler
   155  	maxConcurrentStreams  uint32
   156  	maxReceiveMessageSize int
   157  	maxSendMessageSize    int
   158  	unknownStreamDesc     *StreamDesc
   159  	keepaliveParams       keepalive.ServerParameters
   160  	keepalivePolicy       keepalive.EnforcementPolicy
   161  	initialWindowSize     int32
   162  	initialConnWindowSize int32
   163  	writeBufferSize       int
   164  	readBufferSize        int
   165  	connectionTimeout     time.Duration
   166  	maxHeaderListSize     *uint32
   167  	headerTableSize       *uint32
   168  	numServerWorkers      uint32
   169  }
   170  
   171  var defaultServerOptions = serverOptions{
   172  	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
   173  	maxSendMessageSize:    defaultServerMaxSendMessageSize,
   174  	connectionTimeout:     120 * time.Second,
   175  	writeBufferSize:       defaultWriteBufSize,
   176  	readBufferSize:        defaultReadBufSize,
   177  }
   178  
   179  // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
   180  type ServerOption interface {
   181  	apply(*serverOptions)
   182  }
   183  
   184  // EmptyServerOption does not alter the server configuration. It can be embedded
   185  // in another structure to build custom server options.
   186  //
   187  // # Experimental
   188  //
   189  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
   190  // later release.
   191  type EmptyServerOption struct{}
   192  
   193  func (EmptyServerOption) apply(*serverOptions) {}
   194  
   195  // funcServerOption wraps a function that modifies serverOptions into an
   196  // implementation of the ServerOption interface.
   197  type funcServerOption struct {
   198  	f func(*serverOptions)
   199  }
   200  
   201  func (fdo *funcServerOption) apply(do *serverOptions) {
   202  	fdo.f(do)
   203  }
   204  
   205  func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
   206  	return &funcServerOption{
   207  		f: f,
   208  	}
   209  }
   210  
   211  // WriteBufferSize determines how much data can be batched before doing a write on the wire.
   212  // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
   213  // The default value for this buffer is 32KB.
   214  // Zero will disable the write buffer such that each write will be on underlying connection.
   215  // Note: A Send call may not directly translate to a write.
   216  func WriteBufferSize(s int) ServerOption {
   217  	return newFuncServerOption(func(o *serverOptions) {
   218  		o.writeBufferSize = s
   219  	})
   220  }
   221  
   222  // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
   223  // for one read syscall.
   224  // The default value for this buffer is 32KB.
   225  // Zero will disable read buffer for a connection so data framer can access the underlying
   226  // conn directly.
   227  func ReadBufferSize(s int) ServerOption {
   228  	return newFuncServerOption(func(o *serverOptions) {
   229  		o.readBufferSize = s
   230  	})
   231  }
   232  
   233  // InitialWindowSize returns a ServerOption that sets window size for stream.
   234  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   235  func InitialWindowSize(s int32) ServerOption {
   236  	return newFuncServerOption(func(o *serverOptions) {
   237  		o.initialWindowSize = s
   238  	})
   239  }
   240  
   241  // InitialConnWindowSize returns a ServerOption that sets window size for a connection.
   242  // The lower bound for window size is 64K and any value smaller than that will be ignored.
   243  func InitialConnWindowSize(s int32) ServerOption {
   244  	return newFuncServerOption(func(o *serverOptions) {
   245  		o.initialConnWindowSize = s
   246  	})
   247  }
   248  
   249  // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
   250  func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
   251  	if kp.Time > 0 && kp.Time < time.Second {
   252  		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
   253  		kp.Time = time.Second
   254  	}
   255  
   256  	return newFuncServerOption(func(o *serverOptions) {
   257  		o.keepaliveParams = kp
   258  	})
   259  }
   260  
   261  // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
   262  func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
   263  	return newFuncServerOption(func(o *serverOptions) {
   264  		o.keepalivePolicy = kep
   265  	})
   266  }
   267  
   268  // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
   269  //
   270  // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
   271  //
   272  // Deprecated: register codecs using encoding.RegisterCodec. The server will
   273  // automatically use registered codecs based on the incoming requests' headers.
   274  // See also
   275  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   276  // Will be supported throughout 1.x.
   277  func CustomCodec(codec Codec) ServerOption {
   278  	return newFuncServerOption(func(o *serverOptions) {
   279  		o.codec = codec
   280  	})
   281  }
   282  
   283  // ForceServerCodec returns a ServerOption that sets a codec for message
   284  // marshaling and unmarshaling.
   285  //
   286  // This will override any lookups by content-subtype for Codecs registered
   287  // with RegisterCodec.
   288  //
   289  // See Content-Type on
   290  // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
   291  // more details. Also see the documentation on RegisterCodec and
   292  // CallContentSubtype for more details on the interaction between encoding.Codec
   293  // and content-subtype.
   294  //
   295  // This function is provided for advanced users; prefer to register codecs
   296  // using encoding.RegisterCodec.
   297  // The server will automatically use registered codecs based on the incoming
   298  // requests' headers. See also
   299  // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
   300  // Will be supported throughout 1.x.
   301  //
   302  // # Experimental
   303  //
   304  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   305  // later release.
   306  func ForceServerCodec(codec encoding.Codec) ServerOption {
   307  	return newFuncServerOption(func(o *serverOptions) {
   308  		o.codec = codec
   309  	})
   310  }
   311  
   312  // RPCCompressor returns a ServerOption that sets a compressor for outbound
   313  // messages.  For backward compatibility, all outbound messages will be sent
   314  // using this compressor, regardless of incoming message compression.  By
   315  // default, server messages will be sent using the same compressor with which
   316  // request messages were sent.
   317  //
   318  // Deprecated: use encoding.RegisterCompressor instead. Will be supported
   319  // throughout 1.x.
   320  func RPCCompressor(cp Compressor) ServerOption {
   321  	return newFuncServerOption(func(o *serverOptions) {
   322  		o.cp = cp
   323  	})
   324  }
   325  
   326  // RPCDecompressor returns a ServerOption that sets a decompressor for inbound
   327  // messages.  It has higher priority than decompressors registered via
   328  // encoding.RegisterCompressor.
   329  //
   330  // Deprecated: use encoding.RegisterCompressor instead. Will be supported
   331  // throughout 1.x.
   332  func RPCDecompressor(dc Decompressor) ServerOption {
   333  	return newFuncServerOption(func(o *serverOptions) {
   334  		o.dc = dc
   335  	})
   336  }
   337  
   338  // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   339  // If this is not set, gRPC uses the default limit.
   340  //
   341  // Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
   342  func MaxMsgSize(m int) ServerOption {
   343  	return MaxRecvMsgSize(m)
   344  }
   345  
   346  // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
   347  // If this is not set, gRPC uses the default 4MB.
   348  func MaxRecvMsgSize(m int) ServerOption {
   349  	return newFuncServerOption(func(o *serverOptions) {
   350  		o.maxReceiveMessageSize = m
   351  	})
   352  }
   353  
   354  // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
   355  // If this is not set, gRPC uses the default `math.MaxInt32`.
   356  func MaxSendMsgSize(m int) ServerOption {
   357  	return newFuncServerOption(func(o *serverOptions) {
   358  		o.maxSendMessageSize = m
   359  	})
   360  }
   361  
   362  // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
   363  // of concurrent streams to each ServerTransport.
   364  func MaxConcurrentStreams(n uint32) ServerOption {
   365  	return newFuncServerOption(func(o *serverOptions) {
   366  		o.maxConcurrentStreams = n
   367  	})
   368  }
   369  
   370  // Creds returns a ServerOption that sets credentials for server connections.
   371  func Creds(c credentials.TransportCredentials) ServerOption {
   372  	return newFuncServerOption(func(o *serverOptions) {
   373  		o.creds = c
   374  	})
   375  }
   376  
   377  // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
   378  // server. Only one unary interceptor can be installed. The construction of multiple
   379  // interceptors (e.g., chaining) can be implemented at the caller.
   380  func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
   381  	return newFuncServerOption(func(o *serverOptions) {
   382  		if o.unaryInt != nil {
   383  			panic("The unary server interceptor was already set and may not be reset.")
   384  		}
   385  		o.unaryInt = i
   386  	})
   387  }
   388  
   389  // ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
   390  // for unary RPCs. The first interceptor will be the outer most,
   391  // while the last interceptor will be the inner most wrapper around the real call.
   392  // All unary interceptors added by this method will be chained.
   393  func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
   394  	return newFuncServerOption(func(o *serverOptions) {
   395  		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
   396  	})
   397  }
   398  
   399  // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
   400  // server. Only one stream interceptor can be installed.
   401  func StreamInterceptor(i StreamServerInterceptor) ServerOption {
   402  	return newFuncServerOption(func(o *serverOptions) {
   403  		if o.streamInt != nil {
   404  			panic("The stream server interceptor was already set and may not be reset.")
   405  		}
   406  		o.streamInt = i
   407  	})
   408  }
   409  
   410  // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
   411  // for streaming RPCs. The first interceptor will be the outer most,
   412  // while the last interceptor will be the inner most wrapper around the real call.
   413  // All stream interceptors added by this method will be chained.
   414  func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
   415  	return newFuncServerOption(func(o *serverOptions) {
   416  		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
   417  	})
   418  }
   419  
   420  // InTapHandle returns a ServerOption that sets the tap handle for all the server
   421  // transport to be created. Only one can be installed.
   422  //
   423  // # Experimental
   424  //
   425  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   426  // later release.
   427  func InTapHandle(h tap.ServerInHandle) ServerOption {
   428  	return newFuncServerOption(func(o *serverOptions) {
   429  		if o.inTapHandle != nil {
   430  			panic("The tap handle was already set and may not be reset.")
   431  		}
   432  		o.inTapHandle = h
   433  	})
   434  }
   435  
   436  // StatsHandler returns a ServerOption that sets the stats handler for the server.
   437  func StatsHandler(h stats.Handler) ServerOption {
   438  	return newFuncServerOption(func(o *serverOptions) {
   439  		o.statsHandler = h
   440  	})
   441  }
   442  
   443  // UnknownServiceHandler returns a ServerOption that allows for adding a custom
   444  // unknown service handler. The provided method is a bidi-streaming RPC service
   445  // handler that will be invoked instead of returning the "unimplemented" gRPC
   446  // error whenever a request is received for an unregistered service or method.
   447  // The handling function and stream interceptor (if set) have full access to
   448  // the ServerStream, including its Context.
   449  func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
   450  	return newFuncServerOption(func(o *serverOptions) {
   451  		o.unknownStreamDesc = &StreamDesc{
   452  			StreamName: "unknown_service_handler",
   453  			Handler:    streamHandler,
   454  			// We need to assume that the users of the streamHandler will want to use both.
   455  			ClientStreams: true,
   456  			ServerStreams: true,
   457  		}
   458  	})
   459  }
   460  
   461  // ConnectionTimeout returns a ServerOption that sets the timeout for
   462  // connection establishment (up to and including HTTP/2 handshaking) for all
   463  // new connections.  If this is not set, the default is 120 seconds.  A zero or
   464  // negative value will result in an immediate timeout.
   465  //
   466  // # Experimental
   467  //
   468  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   469  // later release.
   470  func ConnectionTimeout(d time.Duration) ServerOption {
   471  	return newFuncServerOption(func(o *serverOptions) {
   472  		o.connectionTimeout = d
   473  	})
   474  }
   475  
   476  // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
   477  // of header list that the server is prepared to accept.
   478  func MaxHeaderListSize(s uint32) ServerOption {
   479  	return newFuncServerOption(func(o *serverOptions) {
   480  		o.maxHeaderListSize = &s
   481  	})
   482  }
   483  
   484  // HeaderTableSize returns a ServerOption that sets the size of dynamic
   485  // header table for stream.
   486  //
   487  // # Experimental
   488  //
   489  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   490  // later release.
   491  func HeaderTableSize(s uint32) ServerOption {
   492  	return newFuncServerOption(func(o *serverOptions) {
   493  		o.headerTableSize = &s
   494  	})
   495  }
   496  
   497  // NumStreamWorkers returns a ServerOption that sets the number of worker
   498  // goroutines that should be used to process incoming streams. Setting this to
   499  // zero (default) will disable workers and spawn a new goroutine for each
   500  // stream.
   501  //
   502  // # Experimental
   503  //
   504  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   505  // later release.
   506  func NumStreamWorkers(numServerWorkers uint32) ServerOption {
   507  	// TODO: If/when this API gets stabilized (i.e. stream workers become the
   508  	// only way streams are processed), change the behavior of the zero value to
   509  	// a sane default. Preliminary experiments suggest that a value equal to the
   510  	// number of CPUs available is most performant; requires thorough testing.
   511  	return newFuncServerOption(func(o *serverOptions) {
   512  		o.numServerWorkers = numServerWorkers
   513  	})
   514  }
   515  
   516  // serverWorkerResetThreshold defines how often the stack must be reset. Every
   517  // N requests, by spawning a new goroutine in its place, a worker can reset its
   518  // stack so that large stacks don't live in memory forever. 2^16 should allow
   519  // each goroutine stack to live for at least a few seconds in a typical
   520  // workload (assuming a QPS of a few thousand requests/sec).
   521  const serverWorkerResetThreshold = 1 << 16
   522  
   523  // serverWorkers blocks on a *transport.Stream channel forever and waits for
   524  // data to be fed by serveStreams. This allows different requests to be
   525  // processed by the same goroutine, removing the need for expensive stack
   526  // re-allocations (see the runtime.morestack problem [1]).
   527  //
   528  // [1] https://github.com/golang/go/issues/18138
   529  func (s *Server) serverWorker(ch chan *serverWorkerData) {
   530  	// To make sure all server workers don't reset at the same time, choose a
   531  	// random number of iterations before resetting.
   532  	threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
   533  	for completed := 0; completed < threshold; completed++ {
   534  		data, ok := <-ch
   535  		if !ok {
   536  			return
   537  		}
   538  		s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
   539  		data.wg.Done()
   540  	}
   541  	go s.serverWorker(ch)
   542  }
   543  
   544  // initServerWorkers creates worker goroutines and channels to process incoming
   545  // connections to reduce the time spent overall on runtime.morestack.
   546  func (s *Server) initServerWorkers() {
   547  	s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
   548  	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
   549  		s.serverWorkerChannels[i] = make(chan *serverWorkerData)
   550  		go s.serverWorker(s.serverWorkerChannels[i])
   551  	}
   552  }
   553  
   554  func (s *Server) stopServerWorkers() {
   555  	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
   556  		close(s.serverWorkerChannels[i])
   557  	}
   558  }
   559  
   560  // NewServer creates a gRPC server which has no service registered and has not
   561  // started to accept requests yet.
   562  func NewServer(opt ...ServerOption) *Server {
   563  	opts := defaultServerOptions
   564  	for _, o := range opt {
   565  		o.apply(&opts)
   566  	}
   567  	s := &Server{
   568  		lis:      make(map[net.Listener]bool),
   569  		opts:     opts,
   570  		conns:    make(map[string]map[transport.ServerTransport]bool),
   571  		services: make(map[string]*serviceInfo),
   572  		quit:     grpcsync.NewEvent(),
   573  		done:     grpcsync.NewEvent(),
   574  		czData:   new(channelzData),
   575  	}
   576  	chainUnaryServerInterceptors(s)
   577  	chainStreamServerInterceptors(s)
   578  	s.cv = sync.NewCond(&s.mu)
   579  	if EnableTracing {
   580  		_, file, line, _ := runtime.Caller(1)
   581  		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
   582  	}
   583  
   584  	if s.opts.numServerWorkers > 0 {
   585  		s.initServerWorkers()
   586  	}
   587  
   588  	if channelz.IsOn() {
   589  		s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
   590  	}
   591  	return s
   592  }
   593  
   594  // printf records an event in s's event log, unless s has been stopped.
   595  // REQUIRES s.mu is held.
   596  func (s *Server) printf(format string, a ...interface{}) {
   597  	if s.events != nil {
   598  		s.events.Printf(format, a...)
   599  	}
   600  }
   601  
   602  // errorf records an error in s's event log, unless s has been stopped.
   603  // REQUIRES s.mu is held.
   604  func (s *Server) errorf(format string, a ...interface{}) {
   605  	if s.events != nil {
   606  		s.events.Errorf(format, a...)
   607  	}
   608  }
   609  
   610  // ServiceRegistrar wraps a single method that supports service registration. It
   611  // enables users to pass concrete types other than grpc.Server to the service
   612  // registration methods exported by the IDL generated code.
   613  type ServiceRegistrar interface {
   614  	// RegisterService registers a service and its implementation to the
   615  	// concrete type implementing this interface.  It may not be called
   616  	// once the server has started serving.
   617  	// desc describes the service and its methods and handlers. impl is the
   618  	// service implementation which is passed to the method handlers.
   619  	RegisterService(desc *ServiceDesc, impl interface{})
   620  }
   621  
   622  // RegisterService registers a service and its implementation to the gRPC
   623  // server. It is called from the IDL generated code. This must be called before
   624  // invoking Serve. If ss is non-nil (for legacy code), its type is checked to
   625  // ensure it implements sd.HandlerType.
   626  func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
   627  	if ss != nil {
   628  		ht := reflect.TypeOf(sd.HandlerType).Elem()
   629  		st := reflect.TypeOf(ss)
   630  		if !st.Implements(ht) {
   631  			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
   632  		}
   633  	}
   634  	s.register(sd, ss)
   635  }
   636  
   637  func (s *Server) register(sd *ServiceDesc, ss interface{}) {
   638  	s.mu.Lock()
   639  	defer s.mu.Unlock()
   640  	s.printf("RegisterService(%q)", sd.ServiceName)
   641  	if s.serve {
   642  		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
   643  	}
   644  	if _, ok := s.services[sd.ServiceName]; ok {
   645  		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
   646  	}
   647  	info := &serviceInfo{
   648  		serviceImpl: ss,
   649  		methods:     make(map[string]*MethodDesc),
   650  		streams:     make(map[string]*StreamDesc),
   651  		mdata:       sd.Metadata,
   652  	}
   653  	for i := range sd.Methods {
   654  		d := &sd.Methods[i]
   655  		info.methods[d.MethodName] = d
   656  	}
   657  	for i := range sd.Streams {
   658  		d := &sd.Streams[i]
   659  		info.streams[d.StreamName] = d
   660  	}
   661  	s.services[sd.ServiceName] = info
   662  }
   663  
   664  // MethodInfo contains the information of an RPC including its method name and type.
   665  type MethodInfo struct {
   666  	// Name is the method name only, without the service name or package name.
   667  	Name string
   668  	// IsClientStream indicates whether the RPC is a client streaming RPC.
   669  	IsClientStream bool
   670  	// IsServerStream indicates whether the RPC is a server streaming RPC.
   671  	IsServerStream bool
   672  }
   673  
   674  // ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
   675  type ServiceInfo struct {
   676  	Methods []MethodInfo
   677  	// Metadata is the metadata specified in ServiceDesc when registering service.
   678  	Metadata interface{}
   679  }
   680  
   681  // GetServiceInfo returns a map from service names to ServiceInfo.
   682  // Service names include the package names, in the form of <package>.<service>.
   683  func (s *Server) GetServiceInfo() map[string]ServiceInfo {
   684  	ret := make(map[string]ServiceInfo)
   685  	for n, srv := range s.services {
   686  		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
   687  		for m := range srv.methods {
   688  			methods = append(methods, MethodInfo{
   689  				Name:           m,
   690  				IsClientStream: false,
   691  				IsServerStream: false,
   692  			})
   693  		}
   694  		for m, d := range srv.streams {
   695  			methods = append(methods, MethodInfo{
   696  				Name:           m,
   697  				IsClientStream: d.ClientStreams,
   698  				IsServerStream: d.ServerStreams,
   699  			})
   700  		}
   701  
   702  		ret[n] = ServiceInfo{
   703  			Methods:  methods,
   704  			Metadata: srv.mdata,
   705  		}
   706  	}
   707  	return ret
   708  }
   709  
   710  // ErrServerStopped indicates that the operation is now illegal because of
   711  // the server being stopped.
   712  var ErrServerStopped = errors.New("grpc: the server has been stopped")
   713  
   714  type listenSocket struct {
   715  	net.Listener
   716  	channelzID int64
   717  }
   718  
   719  func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
   720  	return &channelz.SocketInternalMetric{
   721  		SocketOptions: channelz.GetSocketOption(l.Listener),
   722  		LocalAddr:     l.Listener.Addr(),
   723  	}
   724  }
   725  
   726  func (l *listenSocket) Close() error {
   727  	err := l.Listener.Close()
   728  	if channelz.IsOn() {
   729  		channelz.RemoveEntry(l.channelzID)
   730  	}
   731  	return err
   732  }
   733  
   734  // Serve accepts incoming connections on the listener lis, creating a new
   735  // ServerTransport and service goroutine for each. The service goroutines
   736  // read gRPC requests and then call the registered handlers to reply to them.
   737  // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
   738  // this method returns.
   739  // Serve will return a non-nil error unless Stop or GracefulStop is called.
   740  func (s *Server) Serve(lis net.Listener) error {
   741  	s.mu.Lock()
   742  	s.printf("serving")
   743  	s.serve = true
   744  	if s.lis == nil {
   745  		// Serve called after Stop or GracefulStop.
   746  		s.mu.Unlock()
   747  		lis.Close()
   748  		return ErrServerStopped
   749  	}
   750  
   751  	s.serveWG.Add(1)
   752  	defer func() {
   753  		s.serveWG.Done()
   754  		if s.quit.HasFired() {
   755  			// Stop or GracefulStop called; block until done and return nil.
   756  			<-s.done.Done()
   757  		}
   758  	}()
   759  
   760  	ls := &listenSocket{Listener: lis}
   761  	s.lis[ls] = true
   762  
   763  	if channelz.IsOn() {
   764  		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
   765  	}
   766  	s.mu.Unlock()
   767  
   768  	defer func() {
   769  		s.mu.Lock()
   770  		if s.lis != nil && s.lis[ls] {
   771  			ls.Close()
   772  			delete(s.lis, ls)
   773  		}
   774  		s.mu.Unlock()
   775  	}()
   776  
   777  	var tempDelay time.Duration // how long to sleep on accept failure
   778  
   779  	for {
   780  		rawConn, err := lis.Accept()
   781  		if err != nil {
   782  			if ne, ok := err.(interface {
   783  				Temporary() bool
   784  			}); ok && ne.Temporary() {
   785  				if tempDelay == 0 {
   786  					tempDelay = 5 * time.Millisecond
   787  				} else {
   788  					tempDelay *= 2
   789  				}
   790  				if max := 1 * time.Second; tempDelay > max {
   791  					tempDelay = max
   792  				}
   793  				s.mu.Lock()
   794  				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
   795  				s.mu.Unlock()
   796  				timer := time.NewTimer(tempDelay)
   797  				select {
   798  				case <-timer.C:
   799  				case <-s.quit.Done():
   800  					timer.Stop()
   801  					return nil
   802  				}
   803  				continue
   804  			}
   805  			s.mu.Lock()
   806  			s.printf("done serving; Accept = %v", err)
   807  			s.mu.Unlock()
   808  
   809  			if s.quit.HasFired() {
   810  				return nil
   811  			}
   812  			return err
   813  		}
   814  		tempDelay = 0
   815  		// Start a new goroutine to deal with rawConn so we don't stall this Accept
   816  		// loop goroutine.
   817  		//
   818  		// Make sure we account for the goroutine so GracefulStop doesn't nil out
   819  		// s.conns before this conn can be added.
   820  		s.serveWG.Add(1)
   821  		go func() {
   822  			s.handleRawConn(lis.Addr().String(), rawConn)
   823  			s.serveWG.Done()
   824  		}()
   825  	}
   826  }
   827  
   828  // handleRawConn forks a goroutine to handle a just-accepted connection that
   829  // has not had any I/O performed on it yet.
   830  func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
   831  	if s.quit.HasFired() {
   832  		rawConn.Close()
   833  		return
   834  	}
   835  	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
   836  
   837  	// Finish handshaking (HTTP2)
   838  	st := s.newHTTP2Transport(rawConn)
   839  	rawConn.SetDeadline(time.Time{})
   840  	if st == nil {
   841  		return
   842  	}
   843  
   844  	if !s.addConn(lisAddr, st) {
   845  		return
   846  	}
   847  	go func() {
   848  		s.serveStreams(st)
   849  		s.removeConn(lisAddr, st)
   850  	}()
   851  }
   852  
   853  func (s *Server) drainServerTransports(addr string) {
   854  	s.mu.Lock()
   855  	conns := s.conns[addr]
   856  	for st := range conns {
   857  		st.Drain()
   858  	}
   859  	s.mu.Unlock()
   860  }
   861  
   862  // newHTTP2Transport sets up a http/2 transport (using the
   863  // gRPC http2 server transport in transport/http2_server.go).
   864  func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
   865  	config := &transport.ServerConfig{
   866  		MaxStreams:            s.opts.maxConcurrentStreams,
   867  		ConnectionTimeout:     s.opts.connectionTimeout,
   868  		Credentials:           s.opts.creds,
   869  		InTapHandle:           s.opts.inTapHandle,
   870  		StatsHandler:          s.opts.statsHandler,
   871  		KeepaliveParams:       s.opts.keepaliveParams,
   872  		KeepalivePolicy:       s.opts.keepalivePolicy,
   873  		InitialWindowSize:     s.opts.initialWindowSize,
   874  		InitialConnWindowSize: s.opts.initialConnWindowSize,
   875  		WriteBufferSize:       s.opts.writeBufferSize,
   876  		ReadBufferSize:        s.opts.readBufferSize,
   877  		ChannelzParentID:      s.channelzID,
   878  		MaxHeaderListSize:     s.opts.maxHeaderListSize,
   879  		HeaderTableSize:       s.opts.headerTableSize,
   880  	}
   881  	st, err := transport.NewServerTransport(c, config)
   882  	if err != nil {
   883  		s.mu.Lock()
   884  		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
   885  		s.mu.Unlock()
   886  		// ErrConnDispatched means that the connection was dispatched away from
   887  		// gRPC; those connections should be left open.
   888  		if err != credentials.ErrConnDispatched {
   889  			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
   890  			if err != io.EOF {
   891  				channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
   892  			}
   893  			c.Close()
   894  		}
   895  		return nil
   896  	}
   897  
   898  	return st
   899  }
   900  
   901  func (s *Server) serveStreams(st transport.ServerTransport) {
   902  	defer st.Close()
   903  	var wg sync.WaitGroup
   904  
   905  	var roundRobinCounter uint32
   906  	st.HandleStreams(func(stream *transport.Stream) {
   907  		wg.Add(1)
   908  		if s.opts.numServerWorkers > 0 {
   909  			data := &serverWorkerData{st: st, wg: &wg, stream: stream}
   910  			select {
   911  			case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
   912  			default:
   913  				// If all stream workers are busy, fallback to the default code path.
   914  				go func() {
   915  					s.handleStream(st, stream, s.traceInfo(st, stream))
   916  					wg.Done()
   917  				}()
   918  			}
   919  		} else {
   920  			go func() {
   921  				defer wg.Done()
   922  				s.handleStream(st, stream, s.traceInfo(st, stream))
   923  			}()
   924  		}
   925  	}, func(ctx context.Context, method string) context.Context {
   926  		if !EnableTracing {
   927  			return ctx
   928  		}
   929  		tr := trace.New("grpc.Recv."+methodFamily(method), method)
   930  		return trace.NewContext(ctx, tr)
   931  	})
   932  	wg.Wait()
   933  }
   934  
   935  var _ http.Handler = (*Server)(nil)
   936  
   937  // ServeHTTP implements the Go standard library's http.Handler
   938  // interface by responding to the gRPC request r, by looking up
   939  // the requested gRPC method in the gRPC server s.
   940  //
   941  // The provided HTTP request must have arrived on an HTTP/2
   942  // connection. When using the Go standard library's server,
   943  // practically this means that the Request must also have arrived
   944  // over TLS.
   945  //
   946  // To share one port (such as 443 for https) between gRPC and an
   947  // existing http.Handler, use a root http.Handler such as:
   948  //
   949  //	if r.ProtoMajor == 2 && strings.HasPrefix(
   950  //		r.Header.Get("Content-Type"), "application/grpc") {
   951  //		grpcServer.ServeHTTP(w, r)
   952  //	} else {
   953  //		yourMux.ServeHTTP(w, r)
   954  //	}
   955  //
   956  // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
   957  // separate from grpc-go's HTTP/2 server. Performance and features may vary
   958  // between the two paths. ServeHTTP does not support some gRPC features
   959  // available through grpc-go's HTTP/2 server.
   960  //
   961  // # Experimental
   962  //
   963  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
   964  // later release.
   965  func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
   966  	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
   967  	if err != nil {
   968  		http.Error(w, err.Error(), http.StatusInternalServerError)
   969  		return
   970  	}
   971  	if !s.addConn(listenerAddressForServeHTTP, st) {
   972  		return
   973  	}
   974  	defer s.removeConn(listenerAddressForServeHTTP, st)
   975  	s.serveStreams(st)
   976  }
   977  
   978  // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
   979  // If tracing is not enabled, it returns nil.
   980  func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
   981  	if !EnableTracing {
   982  		return nil
   983  	}
   984  	tr, ok := trace.FromContext(stream.Context())
   985  	if !ok {
   986  		return nil
   987  	}
   988  
   989  	trInfo = &traceInfo{
   990  		tr: tr,
   991  		firstLine: firstLine{
   992  			client:     false,
   993  			remoteAddr: st.RemoteAddr(),
   994  		},
   995  	}
   996  	if dl, ok := stream.Context().Deadline(); ok {
   997  		trInfo.firstLine.deadline = time.Until(dl)
   998  	}
   999  	return trInfo
  1000  }
  1001  
  1002  func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
  1003  	s.mu.Lock()
  1004  	defer s.mu.Unlock()
  1005  	if s.conns == nil {
  1006  		st.Close()
  1007  		return false
  1008  	}
  1009  	if s.drain {
  1010  		// Transport added after we drained our existing conns: drain it
  1011  		// immediately.
  1012  		st.Drain()
  1013  	}
  1014  
  1015  	if s.conns[addr] == nil {
  1016  		// Create a map entry if this is the first connection on this listener.
  1017  		s.conns[addr] = make(map[transport.ServerTransport]bool)
  1018  	}
  1019  	s.conns[addr][st] = true
  1020  	return true
  1021  }
  1022  
  1023  func (s *Server) removeConn(addr string, st transport.ServerTransport) {
  1024  	s.mu.Lock()
  1025  	defer s.mu.Unlock()
  1026  
  1027  	conns := s.conns[addr]
  1028  	if conns != nil {
  1029  		delete(conns, st)
  1030  		if len(conns) == 0 {
  1031  			// If the last connection for this address is being removed, also
  1032  			// remove the map entry corresponding to the address. This is used
  1033  			// in GracefulStop() when waiting for all connections to be closed.
  1034  			delete(s.conns, addr)
  1035  		}
  1036  		s.cv.Broadcast()
  1037  	}
  1038  }
  1039  
  1040  func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
  1041  	return &channelz.ServerInternalMetric{
  1042  		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
  1043  		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
  1044  		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
  1045  		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
  1046  	}
  1047  }
  1048  
  1049  func (s *Server) incrCallsStarted() {
  1050  	atomic.AddInt64(&s.czData.callsStarted, 1)
  1051  	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
  1052  }
  1053  
  1054  func (s *Server) incrCallsSucceeded() {
  1055  	atomic.AddInt64(&s.czData.callsSucceeded, 1)
  1056  }
  1057  
  1058  func (s *Server) incrCallsFailed() {
  1059  	atomic.AddInt64(&s.czData.callsFailed, 1)
  1060  }
  1061  
  1062  func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
  1063  	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
  1064  	if err != nil {
  1065  		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
  1066  		return err
  1067  	}
  1068  	compData, err := compress(data, cp, comp)
  1069  	if err != nil {
  1070  		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
  1071  		return err
  1072  	}
  1073  	hdr, payload := msgHeader(data, compData)
  1074  	// TODO(dfawley): should we be checking len(data) instead?
  1075  	if len(payload) > s.opts.maxSendMessageSize {
  1076  		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
  1077  	}
  1078  	err = t.Write(stream, hdr, payload, opts)
  1079  	if err == nil && s.opts.statsHandler != nil {
  1080  		s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
  1081  	}
  1082  	return err
  1083  }
  1084  
  1085  // chainUnaryServerInterceptors chains all unary server interceptors into one.
  1086  func chainUnaryServerInterceptors(s *Server) {
  1087  	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
  1088  	// be executed before any other chained interceptors.
  1089  	interceptors := s.opts.chainUnaryInts
  1090  	if s.opts.unaryInt != nil {
  1091  		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
  1092  	}
  1093  
  1094  	var chainedInt UnaryServerInterceptor
  1095  	if len(interceptors) == 0 {
  1096  		chainedInt = nil
  1097  	} else if len(interceptors) == 1 {
  1098  		chainedInt = interceptors[0]
  1099  	} else {
  1100  		chainedInt = chainUnaryInterceptors(interceptors)
  1101  	}
  1102  
  1103  	s.opts.unaryInt = chainedInt
  1104  }
  1105  
  1106  func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
  1107  	return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
  1108  		// the struct ensures the variables are allocated together, rather than separately, since we
  1109  		// know they should be garbage collected together. This saves 1 allocation and decreases
  1110  		// time/call by about 10% on the microbenchmark.
  1111  		var state struct {
  1112  			i    int
  1113  			next UnaryHandler
  1114  		}
  1115  		state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
  1116  			if state.i == len(interceptors)-1 {
  1117  				return interceptors[state.i](ctx, req, info, handler)
  1118  			}
  1119  			state.i++
  1120  			return interceptors[state.i-1](ctx, req, info, state.next)
  1121  		}
  1122  		return state.next(ctx, req)
  1123  	}
  1124  }
  1125  
  1126  func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
  1127  	sh := s.opts.statsHandler
  1128  	if sh != nil || trInfo != nil || channelz.IsOn() {
  1129  		if channelz.IsOn() {
  1130  			s.incrCallsStarted()
  1131  		}
  1132  		var statsBegin *stats.Begin
  1133  		if sh != nil {
  1134  			beginTime := time.Now()
  1135  			statsBegin = &stats.Begin{
  1136  				BeginTime:      beginTime,
  1137  				IsClientStream: false,
  1138  				IsServerStream: false,
  1139  			}
  1140  			sh.HandleRPC(stream.Context(), statsBegin)
  1141  		}
  1142  		if trInfo != nil {
  1143  			trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1144  		}
  1145  		// The deferred error handling for tracing, stats handler and channelz are
  1146  		// combined into one function to reduce stack usage -- a defer takes ~56-64
  1147  		// bytes on the stack, so overflowing the stack will require a stack
  1148  		// re-allocation, which is expensive.
  1149  		//
  1150  		// To maintain behavior similar to separate deferred statements, statements
  1151  		// should be executed in the reverse order. That is, tracing first, stats
  1152  		// handler second, and channelz last. Note that panics *within* defers will
  1153  		// lead to different behavior, but that's an acceptable compromise; that
  1154  		// would be undefined behavior territory anyway.
  1155  		defer func() {
  1156  			if trInfo != nil {
  1157  				if err != nil && err != io.EOF {
  1158  					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1159  					trInfo.tr.SetError()
  1160  				}
  1161  				trInfo.tr.Finish()
  1162  			}
  1163  
  1164  			if sh != nil {
  1165  				end := &stats.End{
  1166  					BeginTime: statsBegin.BeginTime,
  1167  					EndTime:   time.Now(),
  1168  				}
  1169  				if err != nil && err != io.EOF {
  1170  					end.Error = toRPCErr(err)
  1171  				}
  1172  				sh.HandleRPC(stream.Context(), end)
  1173  			}
  1174  
  1175  			if channelz.IsOn() {
  1176  				if err != nil && err != io.EOF {
  1177  					s.incrCallsFailed()
  1178  				} else {
  1179  					s.incrCallsSucceeded()
  1180  				}
  1181  			}
  1182  		}()
  1183  	}
  1184  
  1185  	binlog := binarylog.GetMethodLogger(stream.Method())
  1186  	if binlog != nil {
  1187  		ctx := stream.Context()
  1188  		md, _ := metadata.FromIncomingContext(ctx)
  1189  		logEntry := &binarylog.ClientHeader{
  1190  			Header:     md,
  1191  			MethodName: stream.Method(),
  1192  			PeerAddr:   nil,
  1193  		}
  1194  		if deadline, ok := ctx.Deadline(); ok {
  1195  			logEntry.Timeout = time.Until(deadline)
  1196  			if logEntry.Timeout < 0 {
  1197  				logEntry.Timeout = 0
  1198  			}
  1199  		}
  1200  		if a := md[":authority"]; len(a) > 0 {
  1201  			logEntry.Authority = a[0]
  1202  		}
  1203  		if peer, ok := peer.FromContext(ctx); ok {
  1204  			logEntry.PeerAddr = peer.Addr
  1205  		}
  1206  		binlog.Log(logEntry)
  1207  	}
  1208  
  1209  	// comp and cp are used for compression.  decomp and dc are used for
  1210  	// decompression.  If comp and decomp are both set, they are the same;
  1211  	// however they are kept separate to ensure that at most one of the
  1212  	// compressor/decompressor variable pairs are set for use later.
  1213  	var comp, decomp encoding.Compressor
  1214  	var cp Compressor
  1215  	var dc Decompressor
  1216  
  1217  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1218  	// to find a matching registered compressor for decomp.
  1219  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1220  		dc = s.opts.dc
  1221  	} else if rc != "" && rc != encoding.Identity {
  1222  		decomp = encoding.GetCompressor(rc)
  1223  		if decomp == nil {
  1224  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1225  			t.WriteStatus(stream, st)
  1226  			return st.Err()
  1227  		}
  1228  	}
  1229  
  1230  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1231  	// the incoming message compression method.
  1232  	//
  1233  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1234  	if s.opts.cp != nil {
  1235  		cp = s.opts.cp
  1236  		stream.SetSendCompress(cp.Type())
  1237  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1238  		// Legacy compressor not specified; attempt to respond with same encoding.
  1239  		comp = encoding.GetCompressor(rc)
  1240  		if comp != nil {
  1241  			stream.SetSendCompress(rc)
  1242  		}
  1243  	}
  1244  
  1245  	var payInfo *payloadInfo
  1246  	if sh != nil || binlog != nil {
  1247  		payInfo = &payloadInfo{}
  1248  	}
  1249  	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
  1250  	if err != nil {
  1251  		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
  1252  			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
  1253  		}
  1254  		return err
  1255  	}
  1256  	if channelz.IsOn() {
  1257  		t.IncrMsgRecv()
  1258  	}
  1259  	df := func(v interface{}) error {
  1260  		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
  1261  			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
  1262  		}
  1263  		if sh != nil {
  1264  			sh.HandleRPC(stream.Context(), &stats.InPayload{
  1265  				RecvTime:   time.Now(),
  1266  				Payload:    v,
  1267  				WireLength: payInfo.wireLength + headerLen,
  1268  				Data:       d,
  1269  				Length:     len(d),
  1270  			})
  1271  		}
  1272  		if binlog != nil {
  1273  			binlog.Log(&binarylog.ClientMessage{
  1274  				Message: d,
  1275  			})
  1276  		}
  1277  		if trInfo != nil {
  1278  			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
  1279  		}
  1280  		return nil
  1281  	}
  1282  	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
  1283  	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
  1284  	if appErr != nil {
  1285  		appStatus, ok := status.FromError(appErr)
  1286  		if !ok {
  1287  			// Convert appErr if it is not a grpc status error.
  1288  			appErr = status.Error(codes.Unknown, appErr.Error())
  1289  			appStatus, _ = status.FromError(appErr)
  1290  		}
  1291  		if trInfo != nil {
  1292  			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1293  			trInfo.tr.SetError()
  1294  		}
  1295  		if e := t.WriteStatus(stream, appStatus); e != nil {
  1296  			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1297  		}
  1298  		if binlog != nil {
  1299  			if h, _ := stream.Header(); h.Len() > 0 {
  1300  				// Only log serverHeader if there was header. Otherwise it can
  1301  				// be trailer only.
  1302  				binlog.Log(&binarylog.ServerHeader{
  1303  					Header: h,
  1304  				})
  1305  			}
  1306  			binlog.Log(&binarylog.ServerTrailer{
  1307  				Trailer: stream.Trailer(),
  1308  				Err:     appErr,
  1309  			})
  1310  		}
  1311  		return appErr
  1312  	}
  1313  	if trInfo != nil {
  1314  		trInfo.tr.LazyLog(stringer("OK"), false)
  1315  	}
  1316  	opts := &transport.Options{Last: true}
  1317  
  1318  	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
  1319  		if err == io.EOF {
  1320  			// The entire stream is done (for unary RPC only).
  1321  			return err
  1322  		}
  1323  		if sts, ok := status.FromError(err); ok {
  1324  			if e := t.WriteStatus(stream, sts); e != nil {
  1325  				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
  1326  			}
  1327  		} else {
  1328  			switch st := err.(type) {
  1329  			case transport.ConnectionError:
  1330  				// Nothing to do here.
  1331  			default:
  1332  				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
  1333  			}
  1334  		}
  1335  		if binlog != nil {
  1336  			h, _ := stream.Header()
  1337  			binlog.Log(&binarylog.ServerHeader{
  1338  				Header: h,
  1339  			})
  1340  			binlog.Log(&binarylog.ServerTrailer{
  1341  				Trailer: stream.Trailer(),
  1342  				Err:     appErr,
  1343  			})
  1344  		}
  1345  		return err
  1346  	}
  1347  	if binlog != nil {
  1348  		h, _ := stream.Header()
  1349  		binlog.Log(&binarylog.ServerHeader{
  1350  			Header: h,
  1351  		})
  1352  		binlog.Log(&binarylog.ServerMessage{
  1353  			Message: reply,
  1354  		})
  1355  	}
  1356  	if channelz.IsOn() {
  1357  		t.IncrMsgSent()
  1358  	}
  1359  	if trInfo != nil {
  1360  		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
  1361  	}
  1362  	// TODO: Should we be logging if writing status failed here, like above?
  1363  	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
  1364  	// error or allow the stats handler to see it?
  1365  	err = t.WriteStatus(stream, statusOK)
  1366  	if binlog != nil {
  1367  		binlog.Log(&binarylog.ServerTrailer{
  1368  			Trailer: stream.Trailer(),
  1369  			Err:     appErr,
  1370  		})
  1371  	}
  1372  	return err
  1373  }
  1374  
  1375  // chainStreamServerInterceptors chains all stream server interceptors into one.
  1376  func chainStreamServerInterceptors(s *Server) {
  1377  	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
  1378  	// be executed before any other chained interceptors.
  1379  	interceptors := s.opts.chainStreamInts
  1380  	if s.opts.streamInt != nil {
  1381  		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
  1382  	}
  1383  
  1384  	var chainedInt StreamServerInterceptor
  1385  	if len(interceptors) == 0 {
  1386  		chainedInt = nil
  1387  	} else if len(interceptors) == 1 {
  1388  		chainedInt = interceptors[0]
  1389  	} else {
  1390  		chainedInt = chainStreamInterceptors(interceptors)
  1391  	}
  1392  
  1393  	s.opts.streamInt = chainedInt
  1394  }
  1395  
  1396  func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
  1397  	return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
  1398  		// the struct ensures the variables are allocated together, rather than separately, since we
  1399  		// know they should be garbage collected together. This saves 1 allocation and decreases
  1400  		// time/call by about 10% on the microbenchmark.
  1401  		var state struct {
  1402  			i    int
  1403  			next StreamHandler
  1404  		}
  1405  		state.next = func(srv interface{}, ss ServerStream) error {
  1406  			if state.i == len(interceptors)-1 {
  1407  				return interceptors[state.i](srv, ss, info, handler)
  1408  			}
  1409  			state.i++
  1410  			return interceptors[state.i-1](srv, ss, info, state.next)
  1411  		}
  1412  		return state.next(srv, ss)
  1413  	}
  1414  }
  1415  
  1416  func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
  1417  	if channelz.IsOn() {
  1418  		s.incrCallsStarted()
  1419  	}
  1420  	sh := s.opts.statsHandler
  1421  	var statsBegin *stats.Begin
  1422  	if sh != nil {
  1423  		beginTime := time.Now()
  1424  		statsBegin = &stats.Begin{
  1425  			BeginTime:      beginTime,
  1426  			IsClientStream: sd.ClientStreams,
  1427  			IsServerStream: sd.ServerStreams,
  1428  		}
  1429  		sh.HandleRPC(stream.Context(), statsBegin)
  1430  	}
  1431  	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
  1432  	ss := &serverStream{
  1433  		ctx:                   ctx,
  1434  		t:                     t,
  1435  		s:                     stream,
  1436  		p:                     &parser{r: stream},
  1437  		codec:                 s.getCodec(stream.ContentSubtype()),
  1438  		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
  1439  		maxSendMessageSize:    s.opts.maxSendMessageSize,
  1440  		trInfo:                trInfo,
  1441  		statsHandler:          sh,
  1442  	}
  1443  
  1444  	if sh != nil || trInfo != nil || channelz.IsOn() {
  1445  		// See comment in processUnaryRPC on defers.
  1446  		defer func() {
  1447  			if trInfo != nil {
  1448  				ss.mu.Lock()
  1449  				if err != nil && err != io.EOF {
  1450  					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1451  					ss.trInfo.tr.SetError()
  1452  				}
  1453  				ss.trInfo.tr.Finish()
  1454  				ss.trInfo.tr = nil
  1455  				ss.mu.Unlock()
  1456  			}
  1457  
  1458  			if sh != nil {
  1459  				end := &stats.End{
  1460  					BeginTime: statsBegin.BeginTime,
  1461  					EndTime:   time.Now(),
  1462  				}
  1463  				if err != nil && err != io.EOF {
  1464  					end.Error = toRPCErr(err)
  1465  				}
  1466  				sh.HandleRPC(stream.Context(), end)
  1467  			}
  1468  
  1469  			if channelz.IsOn() {
  1470  				if err != nil && err != io.EOF {
  1471  					s.incrCallsFailed()
  1472  				} else {
  1473  					s.incrCallsSucceeded()
  1474  				}
  1475  			}
  1476  		}()
  1477  	}
  1478  
  1479  	ss.binlog = binarylog.GetMethodLogger(stream.Method())
  1480  	if ss.binlog != nil {
  1481  		md, _ := metadata.FromIncomingContext(ctx)
  1482  		logEntry := &binarylog.ClientHeader{
  1483  			Header:     md,
  1484  			MethodName: stream.Method(),
  1485  			PeerAddr:   nil,
  1486  		}
  1487  		if deadline, ok := ctx.Deadline(); ok {
  1488  			logEntry.Timeout = time.Until(deadline)
  1489  			if logEntry.Timeout < 0 {
  1490  				logEntry.Timeout = 0
  1491  			}
  1492  		}
  1493  		if a := md[":authority"]; len(a) > 0 {
  1494  			logEntry.Authority = a[0]
  1495  		}
  1496  		if peer, ok := peer.FromContext(ss.Context()); ok {
  1497  			logEntry.PeerAddr = peer.Addr
  1498  		}
  1499  		ss.binlog.Log(logEntry)
  1500  	}
  1501  
  1502  	// If dc is set and matches the stream's compression, use it.  Otherwise, try
  1503  	// to find a matching registered compressor for decomp.
  1504  	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
  1505  		ss.dc = s.opts.dc
  1506  	} else if rc != "" && rc != encoding.Identity {
  1507  		ss.decomp = encoding.GetCompressor(rc)
  1508  		if ss.decomp == nil {
  1509  			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
  1510  			t.WriteStatus(ss.s, st)
  1511  			return st.Err()
  1512  		}
  1513  	}
  1514  
  1515  	// If cp is set, use it.  Otherwise, attempt to compress the response using
  1516  	// the incoming message compression method.
  1517  	//
  1518  	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
  1519  	if s.opts.cp != nil {
  1520  		ss.cp = s.opts.cp
  1521  		stream.SetSendCompress(s.opts.cp.Type())
  1522  	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
  1523  		// Legacy compressor not specified; attempt to respond with same encoding.
  1524  		ss.comp = encoding.GetCompressor(rc)
  1525  		if ss.comp != nil {
  1526  			stream.SetSendCompress(rc)
  1527  		}
  1528  	}
  1529  
  1530  	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
  1531  
  1532  	if trInfo != nil {
  1533  		trInfo.tr.LazyLog(&trInfo.firstLine, false)
  1534  	}
  1535  	var appErr error
  1536  	var server interface{}
  1537  	if info != nil {
  1538  		server = info.serviceImpl
  1539  	}
  1540  	if s.opts.streamInt == nil {
  1541  		appErr = sd.Handler(server, ss)
  1542  	} else {
  1543  		info := &StreamServerInfo{
  1544  			FullMethod:     stream.Method(),
  1545  			IsClientStream: sd.ClientStreams,
  1546  			IsServerStream: sd.ServerStreams,
  1547  		}
  1548  		appErr = s.opts.streamInt(server, ss, info, sd.Handler)
  1549  	}
  1550  	if appErr != nil {
  1551  		appStatus, ok := status.FromError(appErr)
  1552  		if !ok {
  1553  			appStatus = status.New(codes.Unknown, appErr.Error())
  1554  			appErr = appStatus.Err()
  1555  		}
  1556  		if trInfo != nil {
  1557  			ss.mu.Lock()
  1558  			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
  1559  			ss.trInfo.tr.SetError()
  1560  			ss.mu.Unlock()
  1561  		}
  1562  		t.WriteStatus(ss.s, appStatus)
  1563  		if ss.binlog != nil {
  1564  			ss.binlog.Log(&binarylog.ServerTrailer{
  1565  				Trailer: ss.s.Trailer(),
  1566  				Err:     appErr,
  1567  			})
  1568  		}
  1569  		// TODO: Should we log an error from WriteStatus here and below?
  1570  		return appErr
  1571  	}
  1572  	if trInfo != nil {
  1573  		ss.mu.Lock()
  1574  		ss.trInfo.tr.LazyLog(stringer("OK"), false)
  1575  		ss.mu.Unlock()
  1576  	}
  1577  	err = t.WriteStatus(ss.s, statusOK)
  1578  	if ss.binlog != nil {
  1579  		ss.binlog.Log(&binarylog.ServerTrailer{
  1580  			Trailer: ss.s.Trailer(),
  1581  			Err:     appErr,
  1582  		})
  1583  	}
  1584  	return err
  1585  }
  1586  
  1587  func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
  1588  	sm := stream.Method()
  1589  	if sm != "" && sm[0] == '/' {
  1590  		sm = sm[1:]
  1591  	}
  1592  	pos := strings.LastIndex(sm, "/")
  1593  	if pos == -1 {
  1594  		if trInfo != nil {
  1595  			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
  1596  			trInfo.tr.SetError()
  1597  		}
  1598  		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
  1599  		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
  1600  			if trInfo != nil {
  1601  				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1602  				trInfo.tr.SetError()
  1603  			}
  1604  			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
  1605  		}
  1606  		if trInfo != nil {
  1607  			trInfo.tr.Finish()
  1608  		}
  1609  		return
  1610  	}
  1611  	service := sm[:pos]
  1612  	method := sm[pos+1:]
  1613  
  1614  	srv, knownService := s.services[service]
  1615  	if knownService {
  1616  		if md, ok := srv.methods[method]; ok {
  1617  			s.processUnaryRPC(t, stream, srv, md, trInfo)
  1618  			return
  1619  		}
  1620  		if sd, ok := srv.streams[method]; ok {
  1621  			s.processStreamingRPC(t, stream, srv, sd, trInfo)
  1622  			return
  1623  		}
  1624  	}
  1625  	// Unknown service, or known server unknown method.
  1626  	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
  1627  		s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
  1628  		return
  1629  	}
  1630  	var errDesc string
  1631  	if !knownService {
  1632  		errDesc = fmt.Sprintf("unknown service %v", service)
  1633  	} else {
  1634  		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
  1635  	}
  1636  	if trInfo != nil {
  1637  		trInfo.tr.LazyPrintf("%s", errDesc)
  1638  		trInfo.tr.SetError()
  1639  	}
  1640  	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
  1641  		if trInfo != nil {
  1642  			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
  1643  			trInfo.tr.SetError()
  1644  		}
  1645  		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
  1646  	}
  1647  	if trInfo != nil {
  1648  		trInfo.tr.Finish()
  1649  	}
  1650  }
  1651  
  1652  // The key to save ServerTransportStream in the context.
  1653  type streamKey struct{}
  1654  
  1655  // NewContextWithServerTransportStream creates a new context from ctx and
  1656  // attaches stream to it.
  1657  //
  1658  // # Experimental
  1659  //
  1660  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1661  // later release.
  1662  func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
  1663  	return context.WithValue(ctx, streamKey{}, stream)
  1664  }
  1665  
  1666  // ServerTransportStream is a minimal interface that a transport stream must
  1667  // implement. This can be used to mock an actual transport stream for tests of
  1668  // handler code that use, for example, grpc.SetHeader (which requires some
  1669  // stream to be in context).
  1670  //
  1671  // See also NewContextWithServerTransportStream.
  1672  //
  1673  // # Experimental
  1674  //
  1675  // Notice: This type is EXPERIMENTAL and may be changed or removed in a
  1676  // later release.
  1677  type ServerTransportStream interface {
  1678  	Method() string
  1679  	SetHeader(md metadata.MD) error
  1680  	SendHeader(md metadata.MD) error
  1681  	SetTrailer(md metadata.MD) error
  1682  }
  1683  
  1684  // ServerTransportStreamFromContext returns the ServerTransportStream saved in
  1685  // ctx. Returns nil if the given context has no stream associated with it
  1686  // (which implies it is not an RPC invocation context).
  1687  //
  1688  // # Experimental
  1689  //
  1690  // Notice: This API is EXPERIMENTAL and may be changed or removed in a
  1691  // later release.
  1692  func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
  1693  	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
  1694  	return s
  1695  }
  1696  
  1697  // Stop stops the gRPC server. It immediately closes all open
  1698  // connections and listeners.
  1699  // It cancels all active RPCs on the server side and the corresponding
  1700  // pending RPCs on the client side will get notified by connection
  1701  // errors.
  1702  func (s *Server) Stop() {
  1703  	s.quit.Fire()
  1704  
  1705  	defer func() {
  1706  		s.serveWG.Wait()
  1707  		s.done.Fire()
  1708  	}()
  1709  
  1710  	s.channelzRemoveOnce.Do(func() {
  1711  		if channelz.IsOn() {
  1712  			channelz.RemoveEntry(s.channelzID)
  1713  		}
  1714  	})
  1715  
  1716  	s.mu.Lock()
  1717  	listeners := s.lis
  1718  	s.lis = nil
  1719  	conns := s.conns
  1720  	s.conns = nil
  1721  	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
  1722  	s.cv.Broadcast()
  1723  	s.mu.Unlock()
  1724  
  1725  	for lis := range listeners {
  1726  		lis.Close()
  1727  	}
  1728  	for _, cs := range conns {
  1729  		for st := range cs {
  1730  			st.Close()
  1731  		}
  1732  	}
  1733  	if s.opts.numServerWorkers > 0 {
  1734  		s.stopServerWorkers()
  1735  	}
  1736  
  1737  	s.mu.Lock()
  1738  	if s.events != nil {
  1739  		s.events.Finish()
  1740  		s.events = nil
  1741  	}
  1742  	s.mu.Unlock()
  1743  }
  1744  
  1745  // GracefulStop stops the gRPC server gracefully. It stops the server from
  1746  // accepting new connections and RPCs and blocks until all the pending RPCs are
  1747  // finished.
  1748  func (s *Server) GracefulStop() {
  1749  	s.quit.Fire()
  1750  	defer s.done.Fire()
  1751  
  1752  	s.channelzRemoveOnce.Do(func() {
  1753  		if channelz.IsOn() {
  1754  			channelz.RemoveEntry(s.channelzID)
  1755  		}
  1756  	})
  1757  	s.mu.Lock()
  1758  	if s.conns == nil {
  1759  		s.mu.Unlock()
  1760  		return
  1761  	}
  1762  
  1763  	for lis := range s.lis {
  1764  		lis.Close()
  1765  	}
  1766  	s.lis = nil
  1767  	if !s.drain {
  1768  		for _, conns := range s.conns {
  1769  			for st := range conns {
  1770  				st.Drain()
  1771  			}
  1772  		}
  1773  		s.drain = true
  1774  	}
  1775  
  1776  	// Wait for serving threads to be ready to exit.  Only then can we be sure no
  1777  	// new conns will be created.
  1778  	s.mu.Unlock()
  1779  	s.serveWG.Wait()
  1780  	s.mu.Lock()
  1781  
  1782  	for len(s.conns) != 0 {
  1783  		s.cv.Wait()
  1784  	}
  1785  	s.conns = nil
  1786  	if s.events != nil {
  1787  		s.events.Finish()
  1788  		s.events = nil
  1789  	}
  1790  	s.mu.Unlock()
  1791  }
  1792  
  1793  // contentSubtype must be lowercase
  1794  // cannot return nil
  1795  func (s *Server) getCodec(contentSubtype string) baseCodec {
  1796  	if s.opts.codec != nil {
  1797  		return s.opts.codec
  1798  	}
  1799  	if contentSubtype == "" {
  1800  		return encoding.GetCodec(proto.Name)
  1801  	}
  1802  	codec := encoding.GetCodec(contentSubtype)
  1803  	if codec == nil {
  1804  		return encoding.GetCodec(proto.Name)
  1805  	}
  1806  	return codec
  1807  }
  1808  
  1809  // SetHeader sets the header metadata.
  1810  // When called multiple times, all the provided metadata will be merged.
  1811  // All the metadata will be sent out when one of the following happens:
  1812  //   - grpc.SendHeader() is called;
  1813  //   - The first response is sent out;
  1814  //   - An RPC status is sent out (error or success).
  1815  func SetHeader(ctx context.Context, md metadata.MD) error {
  1816  	if md.Len() == 0 {
  1817  		return nil
  1818  	}
  1819  	stream := ServerTransportStreamFromContext(ctx)
  1820  	if stream == nil {
  1821  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1822  	}
  1823  	return stream.SetHeader(md)
  1824  }
  1825  
  1826  // SendHeader sends header metadata. It may be called at most once.
  1827  // The provided md and headers set by SetHeader() will be sent.
  1828  func SendHeader(ctx context.Context, md metadata.MD) error {
  1829  	stream := ServerTransportStreamFromContext(ctx)
  1830  	if stream == nil {
  1831  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1832  	}
  1833  	if err := stream.SendHeader(md); err != nil {
  1834  		return toRPCErr(err)
  1835  	}
  1836  	return nil
  1837  }
  1838  
  1839  // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
  1840  // When called more than once, all the provided metadata will be merged.
  1841  func SetTrailer(ctx context.Context, md metadata.MD) error {
  1842  	if md.Len() == 0 {
  1843  		return nil
  1844  	}
  1845  	stream := ServerTransportStreamFromContext(ctx)
  1846  	if stream == nil {
  1847  		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
  1848  	}
  1849  	return stream.SetTrailer(md)
  1850  }
  1851  
  1852  // Method returns the method string for the server context.  The returned
  1853  // string is in the format of "/service/method".
  1854  func Method(ctx context.Context) (string, bool) {
  1855  	s := ServerTransportStreamFromContext(ctx)
  1856  	if s == nil {
  1857  		return "", false
  1858  	}
  1859  	return s.Method(), true
  1860  }
  1861  
  1862  type channelzServer struct {
  1863  	s *Server
  1864  }
  1865  
  1866  func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
  1867  	return c.s.channelzMetric()
  1868  }