trpc.group/trpc-go/trpc-go@v1.0.3/transport/server_transport_tcp.go (about)

     1  //
     2  //
     3  // Tencent is pleased to support the open source community by making tRPC available.
     4  //
     5  // Copyright (C) 2023 THL A29 Limited, a Tencent company.
     6  // All rights reserved.
     7  //
     8  // If you have downloaded a copy of the tRPC source code from Tencent,
     9  // please note that tRPC source code is licensed under the  Apache 2.0 License,
    10  // A copy of the Apache 2.0 License is included in this file.
    11  //
    12  //
    13  
    14  package transport
    15  
    16  import (
    17  	"context"
    18  	"io"
    19  	"math"
    20  	"net"
    21  	"strings"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/panjf2000/ants/v2"
    26  
    27  	"trpc.group/trpc-go/trpc-go/codec"
    28  	"trpc.group/trpc-go/trpc-go/errs"
    29  	"trpc.group/trpc-go/trpc-go/internal/addrutil"
    30  	"trpc.group/trpc-go/trpc-go/internal/report"
    31  	"trpc.group/trpc-go/trpc-go/internal/writev"
    32  	"trpc.group/trpc-go/trpc-go/log"
    33  	"trpc.group/trpc-go/trpc-go/rpcz"
    34  	"trpc.group/trpc-go/trpc-go/transport/internal/frame"
    35  )
    36  
    37  const defaultBufferSize = 128 * 1024
    38  
    39  type handleParam struct {
    40  	req   []byte
    41  	c     *tcpconn
    42  	start time.Time
    43  }
    44  
    45  func (p *handleParam) reset() {
    46  	p.req = nil
    47  	p.c = nil
    48  	p.start = time.Time{}
    49  }
    50  
    51  var handleParamPool = &sync.Pool{
    52  	New: func() interface{} { return new(handleParam) },
    53  }
    54  
    55  func createRoutinePool(size int) *ants.PoolWithFunc {
    56  	if size <= 0 {
    57  		size = math.MaxInt32
    58  	}
    59  	pool, err := ants.NewPoolWithFunc(size, func(args interface{}) {
    60  		param, ok := args.(*handleParam)
    61  		if !ok {
    62  			log.Tracef("routine pool args type error, shouldn't happen!")
    63  			return
    64  		}
    65  		report.TCPServerAsyncGoroutineScheduleDelay.Set(float64(time.Since(param.start).Microseconds()))
    66  		if param.c == nil {
    67  			log.Tracef("routine pool tcpconn is nil, shouldn't happen!")
    68  			return
    69  		}
    70  		param.c.handleSync(param.req)
    71  		param.reset()
    72  		handleParamPool.Put(param)
    73  	})
    74  	if err != nil {
    75  		log.Tracef("routine pool create error:%v", err)
    76  		return nil
    77  	}
    78  	return pool
    79  }
    80  
    81  func (s *serverTransport) serveTCP(ctx context.Context, ln net.Listener, opts *ListenServeOptions) error {
    82  	// Create a goroutine pool if ServerAsync enabled.
    83  	var pool *ants.PoolWithFunc
    84  	if opts.ServerAsync {
    85  		pool = createRoutinePool(opts.Routines)
    86  	}
    87  	for tempDelay := time.Duration(0); ; {
    88  		rwc, err := ln.Accept()
    89  		if err != nil {
    90  			if ne, ok := err.(net.Error); ok && ne.Temporary() {
    91  				tempDelay = doTempDelay(tempDelay)
    92  				continue
    93  			}
    94  			select {
    95  			case <-ctx.Done(): // If this error is triggered by the user, such as during a restart,
    96  				return err // it is possible to directly return the error, causing the current listener to exit.
    97  			default:
    98  				// Restricted access to the internal/poll.ErrNetClosing type necessitates comparing a string literal.
    99  				const accept, closeError = "accept", "use of closed network connection"
   100  				const msg = "the server transport, listening on %s, encountered an error: %+v; this error was handled" +
   101  					" gracefully by the framework to prevent abnormal termination, serving as a reference for" +
   102  					" investigating acceptance errors that can't be filtered by the Temporary interface"
   103  				if e, ok := err.(*net.OpError); ok && e.Op == accept && strings.Contains(e.Err.Error(), closeError) {
   104  					log.Infof("listener with address %s is closed", ln.Addr())
   105  					return err
   106  				}
   107  				log.Errorf(msg, ln.Addr(), err)
   108  				continue
   109  			}
   110  		}
   111  		tempDelay = 0
   112  		if tcpConn, ok := rwc.(*net.TCPConn); ok {
   113  			if err := tcpConn.SetKeepAlive(true); err != nil {
   114  				log.Tracef("tcp conn set keepalive error:%v", err)
   115  			}
   116  			if s.opts.KeepAlivePeriod > 0 {
   117  				if err := tcpConn.SetKeepAlivePeriod(s.opts.KeepAlivePeriod); err != nil {
   118  					log.Tracef("tcp conn set keepalive period error:%v", err)
   119  				}
   120  			}
   121  		}
   122  		tc := &tcpconn{
   123  			conn:        s.newConn(ctx, opts),
   124  			rwc:         rwc,
   125  			fr:          opts.FramerBuilder.New(codec.NewReader(rwc)),
   126  			remoteAddr:  rwc.RemoteAddr(),
   127  			localAddr:   rwc.LocalAddr(),
   128  			serverAsync: opts.ServerAsync,
   129  			writev:      opts.Writev,
   130  			st:          s,
   131  			pool:        pool,
   132  		}
   133  		// Start goroutine sending with writev.
   134  		if tc.writev {
   135  			tc.buffer = writev.NewBuffer()
   136  			tc.closeNotify = make(chan struct{}, 1)
   137  			tc.buffer.Start(tc.rwc, tc.closeNotify)
   138  		}
   139  		// To avoid over writing packages, checks whether should we copy packages by Framer and
   140  		// some other configurations.
   141  		tc.copyFrame = frame.ShouldCopy(opts.CopyFrame, tc.serverAsync, codec.IsSafeFramer(tc.fr))
   142  		key := addrutil.AddrToKey(tc.localAddr, tc.remoteAddr)
   143  		s.m.Lock()
   144  		s.addrToConn[key] = tc
   145  		s.m.Unlock()
   146  		go tc.serve()
   147  	}
   148  }
   149  
   150  func doTempDelay(tempDelay time.Duration) time.Duration {
   151  	if tempDelay == 0 {
   152  		tempDelay = 5 * time.Millisecond
   153  	} else {
   154  		tempDelay *= 2
   155  	}
   156  	if max := 1 * time.Second; tempDelay > max {
   157  		tempDelay = max
   158  	}
   159  	time.Sleep(tempDelay)
   160  	return tempDelay
   161  }
   162  
   163  // tcpconn is the connection which is established when server accept a client connecting request.
   164  type tcpconn struct {
   165  	*conn
   166  	rwc         net.Conn
   167  	fr          codec.Framer
   168  	localAddr   net.Addr
   169  	remoteAddr  net.Addr
   170  	serverAsync bool
   171  	writev      bool
   172  	copyFrame   bool
   173  	closeOnce   sync.Once
   174  	st          *serverTransport
   175  	pool        *ants.PoolWithFunc
   176  	buffer      *writev.Buffer
   177  	closeNotify chan struct{}
   178  }
   179  
   180  // close closes socket and cleans up.
   181  func (c *tcpconn) close() {
   182  	c.closeOnce.Do(func() {
   183  		// Send error msg to handler.
   184  		ctx, msg := codec.WithNewMessage(context.Background())
   185  		msg.WithLocalAddr(c.localAddr)
   186  		msg.WithRemoteAddr(c.remoteAddr)
   187  		e := &errs.Error{
   188  			Type: errs.ErrorTypeFramework,
   189  			Code: errs.RetServerSystemErr,
   190  			Desc: "trpc",
   191  			Msg:  "Server connection closed",
   192  		}
   193  		msg.WithServerRspErr(e)
   194  		// The connection closing message is handed over to handler.
   195  		if err := c.conn.handleClose(ctx); err != nil {
   196  			log.Trace("transport: notify connection close failed", err)
   197  		}
   198  		// Notify to stop writev sending goroutine.
   199  		if c.writev {
   200  			close(c.closeNotify)
   201  		}
   202  
   203  		// Remove cache in server stream transport.
   204  		key := addrutil.AddrToKey(c.localAddr, c.remoteAddr)
   205  		c.st.m.Lock()
   206  		delete(c.st.addrToConn, key)
   207  		c.st.m.Unlock()
   208  
   209  		// Finally, close the socket connection.
   210  		c.rwc.Close()
   211  	})
   212  }
   213  
   214  // write encapsulates tcp conn write.
   215  func (c *tcpconn) write(p []byte) (int, error) {
   216  	if c.writev {
   217  		return c.buffer.Write(p)
   218  	}
   219  	return c.rwc.Write(p)
   220  }
   221  
   222  func (c *tcpconn) serve() {
   223  	defer c.close()
   224  	for {
   225  		// Check if upstream has closed.
   226  		select {
   227  		case <-c.ctx.Done():
   228  			return
   229  		default:
   230  		}
   231  
   232  		if c.idleTimeout > 0 {
   233  			now := time.Now()
   234  			// SetReadDeadline has poor performance, so, update timeout every 5 seconds.
   235  			if now.Sub(c.lastVisited) > 5*time.Second {
   236  				c.lastVisited = now
   237  				err := c.rwc.SetReadDeadline(now.Add(c.idleTimeout))
   238  				if err != nil {
   239  					log.Trace("transport: tcpconn SetReadDeadline fail ", err)
   240  					return
   241  				}
   242  			}
   243  		}
   244  
   245  		req, err := c.fr.ReadFrame()
   246  		if err != nil {
   247  			if err == io.EOF {
   248  				report.TCPServerTransportReadEOF.Incr() // client has closed the connections.
   249  				return
   250  			}
   251  			// Server closes the connection if client sends no package in last idle timeout.
   252  			if e, ok := err.(net.Error); ok && e.Timeout() {
   253  				report.TCPServerTransportIdleTimeout.Incr()
   254  				return
   255  			}
   256  			report.TCPServerTransportReadFail.Incr()
   257  			log.Trace("transport: tcpconn serve ReadFrame fail ", err)
   258  			return
   259  		}
   260  		report.TCPServerTransportReceiveSize.Set(float64(len(req)))
   261  		// if framer is not concurrent safe, copy the data to avoid over writing.
   262  		if c.copyFrame {
   263  			reqCopy := make([]byte, len(req))
   264  			copy(reqCopy, req)
   265  			req = reqCopy
   266  		}
   267  
   268  		c.handle(req)
   269  	}
   270  }
   271  
   272  func (c *tcpconn) handle(req []byte) {
   273  	if !c.serverAsync || c.pool == nil {
   274  		c.handleSync(req)
   275  		return
   276  	}
   277  
   278  	// Using sync.pool to dispatch package processing goroutine parameters can reduce a memory
   279  	// allocation and slightly promote performance.
   280  	args := handleParamPool.Get().(*handleParam)
   281  	args.req = req
   282  	args.c = c
   283  	args.start = time.Now()
   284  	if err := c.pool.Invoke(args); err != nil {
   285  		report.TCPServerTransportJobQueueFullFail.Incr()
   286  		log.Trace("transport: tcpconn serve routine pool put job queue fail ", err)
   287  		c.handleSyncWithErr(req, errs.ErrServerRoutinePoolBusy)
   288  	}
   289  }
   290  
   291  func (c *tcpconn) handleSync(req []byte) {
   292  	c.handleSyncWithErr(req, nil)
   293  }
   294  
   295  func (c *tcpconn) handleSyncWithErr(req []byte, e error) {
   296  	ctx, msg := codec.WithNewMessage(context.Background())
   297  	defer codec.PutBackMessage(msg)
   298  	msg.WithServerRspErr(e)
   299  	// Record local addr and remote addr to context.
   300  	msg.WithLocalAddr(c.localAddr)
   301  	msg.WithRemoteAddr(c.remoteAddr)
   302  
   303  	span, ender, ctx := rpcz.NewSpanContext(ctx, "server")
   304  	span.SetAttribute(rpcz.TRPCAttributeRequestSize, len(req))
   305  
   306  	rsp, err := c.conn.handle(ctx, req)
   307  
   308  	defer func() {
   309  		span.SetAttribute(rpcz.TRPCAttributeRPCName, msg.ServerRPCName())
   310  		if err == nil {
   311  			span.SetAttribute(rpcz.TRPCAttributeError, msg.ServerRspErr())
   312  		} else {
   313  			span.SetAttribute(rpcz.TRPCAttributeError, err)
   314  		}
   315  		ender.End()
   316  	}()
   317  	if err != nil {
   318  		if err != errs.ErrServerNoResponse {
   319  			report.TCPServerTransportHandleFail.Incr()
   320  			log.Trace("transport: tcpconn serve handle fail ", err)
   321  			c.close()
   322  			return
   323  		}
   324  		// On stream RPC, server does not need to write rsp, just returns.
   325  		return
   326  	}
   327  	report.TCPServerTransportSendSize.Set(float64(len(rsp)))
   328  	span.SetAttribute(rpcz.TRPCAttributeResponseSize, len(rsp))
   329  	{
   330  		// common RPC write rsp.
   331  		_, ender := span.NewChild("SendMessage")
   332  		_, err = c.write(rsp)
   333  		ender.End()
   334  	}
   335  
   336  	if err != nil {
   337  		report.TCPServerTransportWriteFail.Incr()
   338  		log.Trace("transport: tcpconn write fail ", err)
   339  		c.close()
   340  	}
   341  }