github.com/theQRL/go-zond@v0.1.1/rpc/handler.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rpc
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"reflect"
    23  	"strconv"
    24  	"strings"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/theQRL/go-zond/log"
    29  )
    30  
    31  // handler handles JSON-RPC messages. There is one handler per connection. Note that
    32  // handler is not safe for concurrent use. Message handling never blocks indefinitely
    33  // because RPCs are processed on background goroutines launched by handler.
    34  //
    35  // The entry points for incoming messages are:
    36  //
    37  //	h.handleMsg(message)
    38  //	h.handleBatch(message)
    39  //
    40  // Outgoing calls use the requestOp struct. Register the request before sending it
    41  // on the connection:
    42  //
    43  //	op := &requestOp{ids: ...}
    44  //	h.addRequestOp(op)
    45  //
    46  // Now send the request, then wait for the reply to be delivered through handleMsg:
    47  //
    48  //	if err := op.wait(...); err != nil {
    49  //		h.removeRequestOp(op) // timeout, etc.
    50  //	}
    51  type handler struct {
    52  	reg                  *serviceRegistry
    53  	unsubscribeCb        *callback
    54  	idgen                func() ID                      // subscription ID generator
    55  	respWait             map[string]*requestOp          // active client requests
    56  	clientSubs           map[string]*ClientSubscription // active client subscriptions
    57  	callWG               sync.WaitGroup                 // pending call goroutines
    58  	rootCtx              context.Context                // canceled by close()
    59  	cancelRoot           func()                         // cancel function for rootCtx
    60  	conn                 jsonWriter                     // where responses will be sent
    61  	log                  log.Logger
    62  	allowSubscribe       bool
    63  	batchRequestLimit    int
    64  	batchResponseMaxSize int
    65  
    66  	subLock    sync.Mutex
    67  	serverSubs map[ID]*Subscription
    68  }
    69  
    70  type callProc struct {
    71  	ctx       context.Context
    72  	notifiers []*Notifier
    73  }
    74  
    75  func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler {
    76  	rootCtx, cancelRoot := context.WithCancel(connCtx)
    77  	h := &handler{
    78  		reg:                  reg,
    79  		idgen:                idgen,
    80  		conn:                 conn,
    81  		respWait:             make(map[string]*requestOp),
    82  		clientSubs:           make(map[string]*ClientSubscription),
    83  		rootCtx:              rootCtx,
    84  		cancelRoot:           cancelRoot,
    85  		allowSubscribe:       true,
    86  		serverSubs:           make(map[ID]*Subscription),
    87  		log:                  log.Root(),
    88  		batchRequestLimit:    batchRequestLimit,
    89  		batchResponseMaxSize: batchResponseMaxSize,
    90  	}
    91  	if conn.remoteAddr() != "" {
    92  		h.log = h.log.New("conn", conn.remoteAddr())
    93  	}
    94  	h.unsubscribeCb = newCallback(reflect.Value{}, reflect.ValueOf(h.unsubscribe))
    95  	return h
    96  }
    97  
    98  // batchCallBuffer manages in progress call messages and their responses during a batch
    99  // call. Calls need to be synchronized between the processing and timeout-triggering
   100  // goroutines.
   101  type batchCallBuffer struct {
   102  	mutex sync.Mutex
   103  	calls []*jsonrpcMessage
   104  	resp  []*jsonrpcMessage
   105  	wrote bool
   106  }
   107  
   108  // nextCall returns the next unprocessed message.
   109  func (b *batchCallBuffer) nextCall() *jsonrpcMessage {
   110  	b.mutex.Lock()
   111  	defer b.mutex.Unlock()
   112  
   113  	if len(b.calls) == 0 {
   114  		return nil
   115  	}
   116  	// The popping happens in `pushAnswer`. The in progress call is kept
   117  	// so we can return an error for it in case of timeout.
   118  	msg := b.calls[0]
   119  	return msg
   120  }
   121  
   122  // pushResponse adds the response to last call returned by nextCall.
   123  func (b *batchCallBuffer) pushResponse(answer *jsonrpcMessage) {
   124  	b.mutex.Lock()
   125  	defer b.mutex.Unlock()
   126  
   127  	if answer != nil {
   128  		b.resp = append(b.resp, answer)
   129  	}
   130  	b.calls = b.calls[1:]
   131  }
   132  
   133  // write sends the responses.
   134  func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) {
   135  	b.mutex.Lock()
   136  	defer b.mutex.Unlock()
   137  
   138  	b.doWrite(ctx, conn, false)
   139  }
   140  
   141  // respondWithError sends the responses added so far. For the remaining unanswered call
   142  // messages, it responds with the given error.
   143  func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) {
   144  	b.mutex.Lock()
   145  	defer b.mutex.Unlock()
   146  
   147  	for _, msg := range b.calls {
   148  		if !msg.isNotification() {
   149  			b.resp = append(b.resp, msg.errorResponse(err))
   150  		}
   151  	}
   152  	b.doWrite(ctx, conn, true)
   153  }
   154  
   155  // doWrite actually writes the response.
   156  // This assumes b.mutex is held.
   157  func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter, isErrorResponse bool) {
   158  	if b.wrote {
   159  		return
   160  	}
   161  	b.wrote = true // can only write once
   162  	if len(b.resp) > 0 {
   163  		conn.writeJSON(ctx, b.resp, isErrorResponse)
   164  	}
   165  }
   166  
   167  // handleBatch executes all messages in a batch and returns the responses.
   168  func (h *handler) handleBatch(msgs []*jsonrpcMessage) {
   169  	// Emit error response for empty batches:
   170  	if len(msgs) == 0 {
   171  		h.startCallProc(func(cp *callProc) {
   172  			resp := errorMessage(&invalidRequestError{"empty batch"})
   173  			h.conn.writeJSON(cp.ctx, resp, true)
   174  		})
   175  		return
   176  	}
   177  	// Apply limit on total number of requests.
   178  	if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit {
   179  		h.startCallProc(func(cp *callProc) {
   180  			h.respondWithBatchTooLarge(cp, msgs)
   181  		})
   182  		return
   183  	}
   184  
   185  	// Handle non-call messages first.
   186  	// Here we need to find the requestOp that sent the request batch.
   187  	calls := make([]*jsonrpcMessage, 0, len(msgs))
   188  	h.handleResponses(msgs, func(msg *jsonrpcMessage) {
   189  		calls = append(calls, msg)
   190  	})
   191  	if len(calls) == 0 {
   192  		return
   193  	}
   194  
   195  	// Process calls on a goroutine because they may block indefinitely:
   196  	h.startCallProc(func(cp *callProc) {
   197  		var (
   198  			timer      *time.Timer
   199  			cancel     context.CancelFunc
   200  			callBuffer = &batchCallBuffer{calls: calls, resp: make([]*jsonrpcMessage, 0, len(calls))}
   201  		)
   202  
   203  		cp.ctx, cancel = context.WithCancel(cp.ctx)
   204  		defer cancel()
   205  
   206  		// Cancel the request context after timeout and send an error response. Since the
   207  		// currently-running method might not return immediately on timeout, we must wait
   208  		// for the timeout concurrently with processing the request.
   209  		if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
   210  			timer = time.AfterFunc(timeout, func() {
   211  				cancel()
   212  				err := &internalServerError{errcodeTimeout, errMsgTimeout}
   213  				callBuffer.respondWithError(cp.ctx, h.conn, err)
   214  			})
   215  		}
   216  
   217  		responseBytes := 0
   218  		for {
   219  			// No need to handle rest of calls if timed out.
   220  			if cp.ctx.Err() != nil {
   221  				break
   222  			}
   223  			msg := callBuffer.nextCall()
   224  			if msg == nil {
   225  				break
   226  			}
   227  			resp := h.handleCallMsg(cp, msg)
   228  			callBuffer.pushResponse(resp)
   229  			if resp != nil && h.batchResponseMaxSize != 0 {
   230  				responseBytes += len(resp.Result)
   231  				if responseBytes > h.batchResponseMaxSize {
   232  					err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge}
   233  					callBuffer.respondWithError(cp.ctx, h.conn, err)
   234  					break
   235  				}
   236  			}
   237  		}
   238  		if timer != nil {
   239  			timer.Stop()
   240  		}
   241  
   242  		h.addSubscriptions(cp.notifiers)
   243  		callBuffer.write(cp.ctx, h.conn)
   244  		for _, n := range cp.notifiers {
   245  			n.activate()
   246  		}
   247  	})
   248  }
   249  
   250  func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) {
   251  	resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge})
   252  	// Find the first call and add its "id" field to the error.
   253  	// This is the best we can do, given that the protocol doesn't have a way
   254  	// of reporting an error for the entire batch.
   255  	for _, msg := range batch {
   256  		if msg.isCall() {
   257  			resp.ID = msg.ID
   258  			break
   259  		}
   260  	}
   261  	h.conn.writeJSON(cp.ctx, []*jsonrpcMessage{resp}, true)
   262  }
   263  
   264  // handleMsg handles a single non-batch message.
   265  func (h *handler) handleMsg(msg *jsonrpcMessage) {
   266  	msgs := []*jsonrpcMessage{msg}
   267  	h.handleResponses(msgs, func(msg *jsonrpcMessage) {
   268  		h.startCallProc(func(cp *callProc) {
   269  			h.handleNonBatchCall(cp, msg)
   270  		})
   271  	})
   272  }
   273  
   274  func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) {
   275  	var (
   276  		responded sync.Once
   277  		timer     *time.Timer
   278  		cancel    context.CancelFunc
   279  	)
   280  	cp.ctx, cancel = context.WithCancel(cp.ctx)
   281  	defer cancel()
   282  
   283  	// Cancel the request context after timeout and send an error response. Since the
   284  	// running method might not return immediately on timeout, we must wait for the
   285  	// timeout concurrently with processing the request.
   286  	if timeout, ok := ContextRequestTimeout(cp.ctx); ok {
   287  		timer = time.AfterFunc(timeout, func() {
   288  			cancel()
   289  			responded.Do(func() {
   290  				resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout})
   291  				h.conn.writeJSON(cp.ctx, resp, true)
   292  			})
   293  		})
   294  	}
   295  
   296  	answer := h.handleCallMsg(cp, msg)
   297  	if timer != nil {
   298  		timer.Stop()
   299  	}
   300  	h.addSubscriptions(cp.notifiers)
   301  	if answer != nil {
   302  		responded.Do(func() {
   303  			h.conn.writeJSON(cp.ctx, answer, false)
   304  		})
   305  	}
   306  	for _, n := range cp.notifiers {
   307  		n.activate()
   308  	}
   309  }
   310  
   311  // close cancels all requests except for inflightReq and waits for
   312  // call goroutines to shut down.
   313  func (h *handler) close(err error, inflightReq *requestOp) {
   314  	h.cancelAllRequests(err, inflightReq)
   315  	h.callWG.Wait()
   316  	h.cancelRoot()
   317  	h.cancelServerSubscriptions(err)
   318  }
   319  
   320  // addRequestOp registers a request operation.
   321  func (h *handler) addRequestOp(op *requestOp) {
   322  	for _, id := range op.ids {
   323  		h.respWait[string(id)] = op
   324  	}
   325  }
   326  
   327  // removeRequestOps stops waiting for the given request IDs.
   328  func (h *handler) removeRequestOp(op *requestOp) {
   329  	for _, id := range op.ids {
   330  		delete(h.respWait, string(id))
   331  	}
   332  }
   333  
   334  // cancelAllRequests unblocks and removes pending requests and active subscriptions.
   335  func (h *handler) cancelAllRequests(err error, inflightReq *requestOp) {
   336  	didClose := make(map[*requestOp]bool)
   337  	if inflightReq != nil {
   338  		didClose[inflightReq] = true
   339  	}
   340  
   341  	for id, op := range h.respWait {
   342  		// Remove the op so that later calls will not close op.resp again.
   343  		delete(h.respWait, id)
   344  
   345  		if !didClose[op] {
   346  			op.err = err
   347  			close(op.resp)
   348  			didClose[op] = true
   349  		}
   350  	}
   351  	for id, sub := range h.clientSubs {
   352  		delete(h.clientSubs, id)
   353  		sub.close(err)
   354  	}
   355  }
   356  
   357  func (h *handler) addSubscriptions(nn []*Notifier) {
   358  	h.subLock.Lock()
   359  	defer h.subLock.Unlock()
   360  
   361  	for _, n := range nn {
   362  		if sub := n.takeSubscription(); sub != nil {
   363  			h.serverSubs[sub.ID] = sub
   364  		}
   365  	}
   366  }
   367  
   368  // cancelServerSubscriptions removes all subscriptions and closes their error channels.
   369  func (h *handler) cancelServerSubscriptions(err error) {
   370  	h.subLock.Lock()
   371  	defer h.subLock.Unlock()
   372  
   373  	for id, s := range h.serverSubs {
   374  		s.err <- err
   375  		close(s.err)
   376  		delete(h.serverSubs, id)
   377  	}
   378  }
   379  
   380  // startCallProc runs fn in a new goroutine and starts tracking it in the h.calls wait group.
   381  func (h *handler) startCallProc(fn func(*callProc)) {
   382  	h.callWG.Add(1)
   383  	go func() {
   384  		ctx, cancel := context.WithCancel(h.rootCtx)
   385  		defer h.callWG.Done()
   386  		defer cancel()
   387  		fn(&callProc{ctx: ctx})
   388  	}()
   389  }
   390  
   391  // handleResponse processes method call responses.
   392  func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) {
   393  	var resolvedops []*requestOp
   394  	handleResp := func(msg *jsonrpcMessage) {
   395  		op := h.respWait[string(msg.ID)]
   396  		if op == nil {
   397  			h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID})
   398  			return
   399  		}
   400  		resolvedops = append(resolvedops, op)
   401  		delete(h.respWait, string(msg.ID))
   402  
   403  		// For subscription responses, start the subscription if the server
   404  		// indicates success. EthSubscribe gets unblocked in either case through
   405  		// the op.resp channel.
   406  		if op.sub != nil {
   407  			if msg.Error != nil {
   408  				op.err = msg.Error
   409  			} else {
   410  				op.err = json.Unmarshal(msg.Result, &op.sub.subid)
   411  				if op.err == nil {
   412  					go op.sub.run()
   413  					h.clientSubs[op.sub.subid] = op.sub
   414  				}
   415  			}
   416  		}
   417  
   418  		if !op.hadResponse {
   419  			op.hadResponse = true
   420  			op.resp <- batch
   421  		}
   422  	}
   423  
   424  	for _, msg := range batch {
   425  		start := time.Now()
   426  		switch {
   427  		case msg.isResponse():
   428  			handleResp(msg)
   429  			h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start))
   430  
   431  		case msg.isNotification():
   432  			if strings.HasSuffix(msg.Method, notificationMethodSuffix) {
   433  				h.handleSubscriptionResult(msg)
   434  				continue
   435  			}
   436  			handleCall(msg)
   437  
   438  		default:
   439  			handleCall(msg)
   440  		}
   441  	}
   442  
   443  	for _, op := range resolvedops {
   444  		h.removeRequestOp(op)
   445  	}
   446  }
   447  
   448  // handleSubscriptionResult processes subscription notifications.
   449  func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) {
   450  	var result subscriptionResult
   451  	if err := json.Unmarshal(msg.Params, &result); err != nil {
   452  		h.log.Debug("Dropping invalid subscription message")
   453  		return
   454  	}
   455  	if h.clientSubs[result.ID] != nil {
   456  		h.clientSubs[result.ID].deliver(result.Result)
   457  	}
   458  }
   459  
   460  // handleCallMsg executes a call message and returns the answer.
   461  func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
   462  	start := time.Now()
   463  	switch {
   464  	case msg.isNotification():
   465  		h.handleCall(ctx, msg)
   466  		h.log.Debug("Served "+msg.Method, "duration", time.Since(start))
   467  		return nil
   468  
   469  	case msg.isCall():
   470  		resp := h.handleCall(ctx, msg)
   471  		var ctx []interface{}
   472  		ctx = append(ctx, "reqid", idForLog{msg.ID}, "duration", time.Since(start))
   473  		if resp.Error != nil {
   474  			ctx = append(ctx, "err", resp.Error.Message)
   475  			if resp.Error.Data != nil {
   476  				ctx = append(ctx, "errdata", resp.Error.Data)
   477  			}
   478  			h.log.Warn("Served "+msg.Method, ctx...)
   479  		} else {
   480  			h.log.Debug("Served "+msg.Method, ctx...)
   481  		}
   482  		return resp
   483  
   484  	case msg.hasValidID():
   485  		return msg.errorResponse(&invalidRequestError{"invalid request"})
   486  
   487  	default:
   488  		return errorMessage(&invalidRequestError{"invalid request"})
   489  	}
   490  }
   491  
   492  // handleCall processes method calls.
   493  func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
   494  	if msg.isSubscribe() {
   495  		return h.handleSubscribe(cp, msg)
   496  	}
   497  	var callb *callback
   498  	if msg.isUnsubscribe() {
   499  		callb = h.unsubscribeCb
   500  	} else {
   501  		callb = h.reg.callback(msg.Method)
   502  	}
   503  	if callb == nil {
   504  		return msg.errorResponse(&methodNotFoundError{method: msg.Method})
   505  	}
   506  
   507  	args, err := parsePositionalArguments(msg.Params, callb.argTypes)
   508  	if err != nil {
   509  		return msg.errorResponse(&invalidParamsError{err.Error()})
   510  	}
   511  	start := time.Now()
   512  	answer := h.runMethod(cp.ctx, msg, callb, args)
   513  
   514  	// Collect the statistics for RPC calls if metrics is enabled.
   515  	// We only care about pure rpc call. Filter out subscription.
   516  	if callb != h.unsubscribeCb {
   517  		rpcRequestGauge.Inc(1)
   518  		if answer.Error != nil {
   519  			failedRequestGauge.Inc(1)
   520  		} else {
   521  			successfulRequestGauge.Inc(1)
   522  		}
   523  		rpcServingTimer.UpdateSince(start)
   524  		updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start))
   525  	}
   526  
   527  	return answer
   528  }
   529  
   530  // handleSubscribe processes *_subscribe method calls.
   531  func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage {
   532  	if !h.allowSubscribe {
   533  		return msg.errorResponse(ErrNotificationsUnsupported)
   534  	}
   535  
   536  	// Subscription method name is first argument.
   537  	name, err := parseSubscriptionName(msg.Params)
   538  	if err != nil {
   539  		return msg.errorResponse(&invalidParamsError{err.Error()})
   540  	}
   541  	namespace := msg.namespace()
   542  	callb := h.reg.subscription(namespace, name)
   543  	if callb == nil {
   544  		return msg.errorResponse(&subscriptionNotFoundError{namespace, name})
   545  	}
   546  
   547  	// Parse subscription name arg too, but remove it before calling the callback.
   548  	argTypes := append([]reflect.Type{stringType}, callb.argTypes...)
   549  	args, err := parsePositionalArguments(msg.Params, argTypes)
   550  	if err != nil {
   551  		return msg.errorResponse(&invalidParamsError{err.Error()})
   552  	}
   553  	args = args[1:]
   554  
   555  	// Install notifier in context so the subscription handler can find it.
   556  	n := &Notifier{h: h, namespace: namespace}
   557  	cp.notifiers = append(cp.notifiers, n)
   558  	ctx := context.WithValue(cp.ctx, notifierKey{}, n)
   559  
   560  	return h.runMethod(ctx, msg, callb, args)
   561  }
   562  
   563  // runMethod runs the Go callback for an RPC method.
   564  func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *callback, args []reflect.Value) *jsonrpcMessage {
   565  	result, err := callb.call(ctx, msg.Method, args)
   566  	if err != nil {
   567  		return msg.errorResponse(err)
   568  	}
   569  	return msg.response(result)
   570  }
   571  
   572  // unsubscribe is the callback function for all *_unsubscribe calls.
   573  func (h *handler) unsubscribe(ctx context.Context, id ID) (bool, error) {
   574  	h.subLock.Lock()
   575  	defer h.subLock.Unlock()
   576  
   577  	s := h.serverSubs[id]
   578  	if s == nil {
   579  		return false, ErrSubscriptionNotFound
   580  	}
   581  	close(s.err)
   582  	delete(h.serverSubs, id)
   583  	return true, nil
   584  }
   585  
   586  type idForLog struct{ json.RawMessage }
   587  
   588  func (id idForLog) String() string {
   589  	if s, err := strconv.Unquote(string(id.RawMessage)); err == nil {
   590  		return s
   591  	}
   592  	return string(id.RawMessage)
   593  }