github.com/ethereum/go-ethereum@v1.16.1/rpc/handler.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rpc 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/json" 23 "errors" 24 "fmt" 25 "reflect" 26 "strconv" 27 "strings" 28 "sync" 29 "time" 30 31 "github.com/ethereum/go-ethereum/log" 32 ) 33 34 // handler handles JSON-RPC messages. There is one handler per connection. Note that 35 // handler is not safe for concurrent use. Message handling never blocks indefinitely 36 // because RPCs are processed on background goroutines launched by handler. 37 // 38 // The entry points for incoming messages are: 39 // 40 // h.handleMsg(message) 41 // h.handleBatch(message) 42 // 43 // Outgoing calls use the requestOp struct. Register the request before sending it 44 // on the connection: 45 // 46 // op := &requestOp{ids: ...} 47 // h.addRequestOp(op) 48 // 49 // Now send the request, then wait for the reply to be delivered through handleMsg: 50 // 51 // if err := op.wait(...); err != nil { 52 // h.removeRequestOp(op) // timeout, etc. 53 // } 54 type handler struct { 55 reg *serviceRegistry 56 unsubscribeCb *callback 57 idgen func() ID // subscription ID generator 58 respWait map[string]*requestOp // active client requests 59 clientSubs map[string]*ClientSubscription // active client subscriptions 60 callWG sync.WaitGroup // pending call goroutines 61 rootCtx context.Context // canceled by close() 62 cancelRoot func() // cancel function for rootCtx 63 conn jsonWriter // where responses will be sent 64 log log.Logger 65 allowSubscribe bool 66 batchRequestLimit int 67 batchResponseMaxSize int 68 69 subLock sync.Mutex 70 serverSubs map[ID]*Subscription 71 } 72 73 type callProc struct { 74 ctx context.Context 75 notifiers []*Notifier 76 } 77 78 func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler { 79 rootCtx, cancelRoot := context.WithCancel(connCtx) 80 h := &handler{ 81 reg: reg, 82 idgen: idgen, 83 conn: conn, 84 respWait: make(map[string]*requestOp), 85 clientSubs: make(map[string]*ClientSubscription), 86 rootCtx: rootCtx, 87 cancelRoot: cancelRoot, 88 allowSubscribe: true, 89 serverSubs: make(map[ID]*Subscription), 90 log: log.Root(), 91 batchRequestLimit: batchRequestLimit, 92 batchResponseMaxSize: batchResponseMaxSize, 93 } 94 if conn.remoteAddr() != "" { 95 h.log = h.log.New("conn", conn.remoteAddr()) 96 } 97 h.unsubscribeCb = newCallback(reflect.Value{}, reflect.ValueOf(h.unsubscribe)) 98 return h 99 } 100 101 // batchCallBuffer manages in progress call messages and their responses during a batch 102 // call. Calls need to be synchronized between the processing and timeout-triggering 103 // goroutines. 104 type batchCallBuffer struct { 105 mutex sync.Mutex 106 calls []*jsonrpcMessage 107 resp []*jsonrpcMessage 108 wrote bool 109 } 110 111 // nextCall returns the next unprocessed message. 112 func (b *batchCallBuffer) nextCall() *jsonrpcMessage { 113 b.mutex.Lock() 114 defer b.mutex.Unlock() 115 116 if len(b.calls) == 0 { 117 return nil 118 } 119 // The popping happens in `pushAnswer`. The in progress call is kept 120 // so we can return an error for it in case of timeout. 121 msg := b.calls[0] 122 return msg 123 } 124 125 // pushResponse adds the response to last call returned by nextCall. 126 func (b *batchCallBuffer) pushResponse(answer *jsonrpcMessage) { 127 b.mutex.Lock() 128 defer b.mutex.Unlock() 129 130 if answer != nil { 131 b.resp = append(b.resp, answer) 132 } 133 b.calls = b.calls[1:] 134 } 135 136 // write sends the responses. 137 func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) { 138 b.mutex.Lock() 139 defer b.mutex.Unlock() 140 141 b.doWrite(ctx, conn, false) 142 } 143 144 // respondWithError sends the responses added so far. For the remaining unanswered call 145 // messages, it responds with the given error. 146 func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) { 147 b.mutex.Lock() 148 defer b.mutex.Unlock() 149 150 for _, msg := range b.calls { 151 if !msg.isNotification() { 152 b.resp = append(b.resp, msg.errorResponse(err)) 153 } 154 } 155 b.doWrite(ctx, conn, true) 156 } 157 158 // doWrite actually writes the response. 159 // This assumes b.mutex is held. 160 func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter, isErrorResponse bool) { 161 if b.wrote { 162 return 163 } 164 b.wrote = true // can only write once 165 if len(b.resp) > 0 { 166 conn.writeJSON(ctx, b.resp, isErrorResponse) 167 } 168 } 169 170 // handleBatch executes all messages in a batch and returns the responses. 171 func (h *handler) handleBatch(msgs []*jsonrpcMessage) { 172 // Emit error response for empty batches: 173 if len(msgs) == 0 { 174 h.startCallProc(func(cp *callProc) { 175 resp := errorMessage(&invalidRequestError{"empty batch"}) 176 h.conn.writeJSON(cp.ctx, resp, true) 177 }) 178 return 179 } 180 // Apply limit on total number of requests. 181 if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit { 182 h.startCallProc(func(cp *callProc) { 183 h.respondWithBatchTooLarge(cp, msgs) 184 }) 185 return 186 } 187 188 // Handle non-call messages first. 189 // Here we need to find the requestOp that sent the request batch. 190 calls := make([]*jsonrpcMessage, 0, len(msgs)) 191 h.handleResponses(msgs, func(msg *jsonrpcMessage) { 192 calls = append(calls, msg) 193 }) 194 if len(calls) == 0 { 195 return 196 } 197 198 // Process calls on a goroutine because they may block indefinitely: 199 h.startCallProc(func(cp *callProc) { 200 var ( 201 timer *time.Timer 202 cancel context.CancelFunc 203 callBuffer = &batchCallBuffer{calls: calls, resp: make([]*jsonrpcMessage, 0, len(calls))} 204 ) 205 206 cp.ctx, cancel = context.WithCancel(cp.ctx) 207 defer cancel() 208 209 // Cancel the request context after timeout and send an error response. Since the 210 // currently-running method might not return immediately on timeout, we must wait 211 // for the timeout concurrently with processing the request. 212 if timeout, ok := ContextRequestTimeout(cp.ctx); ok { 213 timer = time.AfterFunc(timeout, func() { 214 cancel() 215 err := &internalServerError{errcodeTimeout, errMsgTimeout} 216 callBuffer.respondWithError(cp.ctx, h.conn, err) 217 }) 218 } 219 220 responseBytes := 0 221 for { 222 // No need to handle rest of calls if timed out. 223 if cp.ctx.Err() != nil { 224 break 225 } 226 msg := callBuffer.nextCall() 227 if msg == nil { 228 break 229 } 230 resp := h.handleCallMsg(cp, msg) 231 callBuffer.pushResponse(resp) 232 if resp != nil && h.batchResponseMaxSize != 0 { 233 responseBytes += len(resp.Result) 234 if responseBytes > h.batchResponseMaxSize { 235 err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge} 236 callBuffer.respondWithError(cp.ctx, h.conn, err) 237 break 238 } 239 } 240 } 241 if timer != nil { 242 timer.Stop() 243 } 244 245 h.addSubscriptions(cp.notifiers) 246 callBuffer.write(cp.ctx, h.conn) 247 for _, n := range cp.notifiers { 248 n.activate() 249 } 250 }) 251 } 252 253 func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) { 254 resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge}) 255 // Find the first call and add its "id" field to the error. 256 // This is the best we can do, given that the protocol doesn't have a way 257 // of reporting an error for the entire batch. 258 for _, msg := range batch { 259 if msg.isCall() { 260 resp.ID = msg.ID 261 break 262 } 263 } 264 h.conn.writeJSON(cp.ctx, []*jsonrpcMessage{resp}, true) 265 } 266 267 // handleMsg handles a single non-batch message. 268 func (h *handler) handleMsg(msg *jsonrpcMessage) { 269 msgs := []*jsonrpcMessage{msg} 270 h.handleResponses(msgs, func(msg *jsonrpcMessage) { 271 h.startCallProc(func(cp *callProc) { 272 h.handleNonBatchCall(cp, msg) 273 }) 274 }) 275 } 276 277 func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) { 278 var ( 279 responded sync.Once 280 timer *time.Timer 281 cancel context.CancelFunc 282 ) 283 cp.ctx, cancel = context.WithCancel(cp.ctx) 284 defer cancel() 285 286 // Cancel the request context after timeout and send an error response. Since the 287 // running method might not return immediately on timeout, we must wait for the 288 // timeout concurrently with processing the request. 289 if timeout, ok := ContextRequestTimeout(cp.ctx); ok { 290 timer = time.AfterFunc(timeout, func() { 291 cancel() 292 responded.Do(func() { 293 resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) 294 h.conn.writeJSON(cp.ctx, resp, true) 295 }) 296 }) 297 } 298 299 answer := h.handleCallMsg(cp, msg) 300 if timer != nil { 301 timer.Stop() 302 } 303 h.addSubscriptions(cp.notifiers) 304 if answer != nil { 305 responded.Do(func() { 306 h.conn.writeJSON(cp.ctx, answer, false) 307 }) 308 } 309 for _, n := range cp.notifiers { 310 n.activate() 311 } 312 } 313 314 // close cancels all requests except for inflightReq and waits for 315 // call goroutines to shut down. 316 func (h *handler) close(err error, inflightReq *requestOp) { 317 h.cancelAllRequests(err, inflightReq) 318 h.callWG.Wait() 319 h.cancelRoot() 320 h.cancelServerSubscriptions(err) 321 } 322 323 // addRequestOp registers a request operation. 324 func (h *handler) addRequestOp(op *requestOp) { 325 for _, id := range op.ids { 326 h.respWait[string(id)] = op 327 } 328 } 329 330 // removeRequestOp stops waiting for the given request IDs. 331 func (h *handler) removeRequestOp(op *requestOp) { 332 for _, id := range op.ids { 333 delete(h.respWait, string(id)) 334 } 335 } 336 337 // cancelAllRequests unblocks and removes pending requests and active subscriptions. 338 func (h *handler) cancelAllRequests(err error, inflightReq *requestOp) { 339 didClose := make(map[*requestOp]bool) 340 if inflightReq != nil { 341 didClose[inflightReq] = true 342 } 343 344 for id, op := range h.respWait { 345 // Remove the op so that later calls will not close op.resp again. 346 delete(h.respWait, id) 347 348 if !didClose[op] { 349 op.err = err 350 close(op.resp) 351 didClose[op] = true 352 } 353 } 354 for id, sub := range h.clientSubs { 355 delete(h.clientSubs, id) 356 sub.close(err) 357 } 358 } 359 360 func (h *handler) addSubscriptions(nn []*Notifier) { 361 h.subLock.Lock() 362 defer h.subLock.Unlock() 363 364 for _, n := range nn { 365 if sub := n.takeSubscription(); sub != nil { 366 h.serverSubs[sub.ID] = sub 367 } 368 } 369 } 370 371 // cancelServerSubscriptions removes all subscriptions and closes their error channels. 372 func (h *handler) cancelServerSubscriptions(err error) { 373 h.subLock.Lock() 374 defer h.subLock.Unlock() 375 376 for id, s := range h.serverSubs { 377 s.err <- err 378 close(s.err) 379 delete(h.serverSubs, id) 380 } 381 } 382 383 // startCallProc runs fn in a new goroutine and starts tracking it in the h.calls wait group. 384 func (h *handler) startCallProc(fn func(*callProc)) { 385 h.callWG.Add(1) 386 go func() { 387 ctx, cancel := context.WithCancel(h.rootCtx) 388 defer h.callWG.Done() 389 defer cancel() 390 fn(&callProc{ctx: ctx}) 391 }() 392 } 393 394 // handleResponses processes method call responses. 395 func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) { 396 var resolvedops []*requestOp 397 handleResp := func(msg *jsonrpcMessage) { 398 op := h.respWait[string(msg.ID)] 399 if op == nil { 400 h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) 401 return 402 } 403 resolvedops = append(resolvedops, op) 404 delete(h.respWait, string(msg.ID)) 405 406 // For subscription responses, start the subscription if the server 407 // indicates success. EthSubscribe gets unblocked in either case through 408 // the op.resp channel. 409 if op.sub != nil { 410 if msg.Error != nil { 411 op.err = msg.Error 412 } else { 413 op.err = json.Unmarshal(msg.Result, &op.sub.subid) 414 if op.err == nil { 415 go op.sub.run() 416 h.clientSubs[op.sub.subid] = op.sub 417 } 418 } 419 } 420 421 if !op.hadResponse { 422 op.hadResponse = true 423 op.resp <- batch 424 } 425 } 426 427 for _, msg := range batch { 428 start := time.Now() 429 switch { 430 case msg.isResponse(): 431 handleResp(msg) 432 h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) 433 434 case msg.isNotification(): 435 if strings.HasSuffix(msg.Method, notificationMethodSuffix) { 436 h.handleSubscriptionResult(msg) 437 continue 438 } 439 handleCall(msg) 440 441 default: 442 handleCall(msg) 443 } 444 } 445 446 for _, op := range resolvedops { 447 h.removeRequestOp(op) 448 } 449 } 450 451 // handleSubscriptionResult processes subscription notifications. 452 func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) { 453 var result subscriptionResult 454 if err := json.Unmarshal(msg.Params, &result); err != nil { 455 h.log.Debug("Dropping invalid subscription message") 456 return 457 } 458 if h.clientSubs[result.ID] != nil { 459 h.clientSubs[result.ID].deliver(result.Result) 460 } 461 } 462 463 // handleCallMsg executes a call message and returns the answer. 464 func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage { 465 start := time.Now() 466 switch { 467 case msg.isNotification(): 468 h.handleCall(ctx, msg) 469 h.log.Debug("Served "+msg.Method, "duration", time.Since(start)) 470 return nil 471 472 case msg.isCall(): 473 resp := h.handleCall(ctx, msg) 474 var logctx []any 475 logctx = append(logctx, "reqid", idForLog{msg.ID}, "duration", time.Since(start)) 476 if resp.Error != nil { 477 logctx = append(logctx, "err", resp.Error.Message) 478 if resp.Error.Data != nil { 479 logctx = append(logctx, "errdata", formatErrorData(resp.Error.Data)) 480 } 481 h.log.Warn("Served "+msg.Method, logctx...) 482 } else { 483 h.log.Debug("Served "+msg.Method, logctx...) 484 } 485 return resp 486 487 case msg.hasValidID(): 488 return msg.errorResponse(&invalidRequestError{"invalid request"}) 489 490 default: 491 return errorMessage(&invalidRequestError{"invalid request"}) 492 } 493 } 494 495 // handleCall processes method calls. 496 func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { 497 if msg.isSubscribe() { 498 return h.handleSubscribe(cp, msg) 499 } 500 var callb *callback 501 if msg.isUnsubscribe() { 502 callb = h.unsubscribeCb 503 } else { 504 // Check method name length 505 if len(msg.Method) > maxMethodNameLength { 506 return msg.errorResponse(&invalidRequestError{fmt.Sprintf("method name too long: %d > %d", len(msg.Method), maxMethodNameLength)}) 507 } 508 callb = h.reg.callback(msg.Method) 509 } 510 if callb == nil { 511 return msg.errorResponse(&methodNotFoundError{method: msg.Method}) 512 } 513 514 args, err := parsePositionalArguments(msg.Params, callb.argTypes) 515 if err != nil { 516 return msg.errorResponse(&invalidParamsError{err.Error()}) 517 } 518 start := time.Now() 519 answer := h.runMethod(cp.ctx, msg, callb, args) 520 521 // Collect the statistics for RPC calls if metrics is enabled. 522 // We only care about pure rpc call. Filter out subscription. 523 if callb != h.unsubscribeCb { 524 rpcRequestGauge.Inc(1) 525 if answer.Error != nil { 526 failedRequestGauge.Inc(1) 527 } else { 528 successfulRequestGauge.Inc(1) 529 } 530 rpcServingTimer.UpdateSince(start) 531 updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start)) 532 } 533 534 return answer 535 } 536 537 // handleSubscribe processes *_subscribe method calls. 538 func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { 539 if !h.allowSubscribe { 540 return msg.errorResponse(ErrNotificationsUnsupported) 541 } 542 543 // Check method name length 544 if len(msg.Method) > maxMethodNameLength { 545 return msg.errorResponse(&invalidRequestError{fmt.Sprintf("subscription name too long: %d > %d", len(msg.Method), maxMethodNameLength)}) 546 } 547 548 // Subscription method name is first argument. 549 name, err := parseSubscriptionName(msg.Params) 550 if err != nil { 551 return msg.errorResponse(&invalidParamsError{err.Error()}) 552 } 553 namespace := msg.namespace() 554 callb := h.reg.subscription(namespace, name) 555 if callb == nil { 556 return msg.errorResponse(&subscriptionNotFoundError{namespace, name}) 557 } 558 559 // Parse subscription name arg too, but remove it before calling the callback. 560 argTypes := append([]reflect.Type{stringType}, callb.argTypes...) 561 args, err := parsePositionalArguments(msg.Params, argTypes) 562 if err != nil { 563 return msg.errorResponse(&invalidParamsError{err.Error()}) 564 } 565 args = args[1:] 566 567 // Install notifier in context so the subscription handler can find it. 568 n := &Notifier{h: h, namespace: namespace} 569 cp.notifiers = append(cp.notifiers, n) 570 ctx := context.WithValue(cp.ctx, notifierKey{}, n) 571 572 return h.runMethod(ctx, msg, callb, args) 573 } 574 575 // runMethod runs the Go callback for an RPC method. 576 func (h *handler) runMethod(ctx context.Context, msg *jsonrpcMessage, callb *callback, args []reflect.Value) *jsonrpcMessage { 577 result, err := callb.call(ctx, msg.Method, args) 578 if err != nil { 579 return msg.errorResponse(err) 580 } 581 return msg.response(result) 582 } 583 584 // unsubscribe is the callback function for all *_unsubscribe calls. 585 func (h *handler) unsubscribe(ctx context.Context, id ID) (bool, error) { 586 h.subLock.Lock() 587 defer h.subLock.Unlock() 588 589 s := h.serverSubs[id] 590 if s == nil { 591 return false, ErrSubscriptionNotFound 592 } 593 close(s.err) 594 delete(h.serverSubs, id) 595 return true, nil 596 } 597 598 type idForLog struct{ json.RawMessage } 599 600 func (id idForLog) String() string { 601 if s, err := strconv.Unquote(string(id.RawMessage)); err == nil { 602 return s 603 } 604 return string(id.RawMessage) 605 } 606 607 var errTruncatedOutput = errors.New("truncated output") 608 609 type limitedBuffer struct { 610 output []byte 611 limit int 612 } 613 614 func (buf *limitedBuffer) Write(data []byte) (int, error) { 615 avail := max(buf.limit, len(buf.output)) 616 if len(data) < avail { 617 buf.output = append(buf.output, data...) 618 return len(data), nil 619 } 620 buf.output = append(buf.output, data[:avail]...) 621 return avail, errTruncatedOutput 622 } 623 624 func formatErrorData(v any) string { 625 buf := limitedBuffer{limit: 1024} 626 err := json.NewEncoder(&buf).Encode(v) 627 switch { 628 case err == nil: 629 return string(bytes.TrimRight(buf.output, "\n")) 630 case errors.Is(err, errTruncatedOutput): 631 return fmt.Sprintf("%s... (truncated)", buf.output) 632 default: 633 return fmt.Sprintf("bad error data (err=%v)", err) 634 } 635 }