github.com/ethereum/go-ethereum@v1.16.1/rpc/client_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rpc 18 19 import ( 20 "context" 21 "encoding/json" 22 "errors" 23 "fmt" 24 "math/rand" 25 "net" 26 "net/http" 27 "net/http/httptest" 28 "os" 29 "reflect" 30 "runtime" 31 "strings" 32 "sync" 33 "testing" 34 "time" 35 36 "github.com/davecgh/go-spew/spew" 37 "github.com/ethereum/go-ethereum/log" 38 ) 39 40 func TestClientRequest(t *testing.T) { 41 t.Parallel() 42 43 server := newTestServer() 44 defer server.Stop() 45 client := DialInProc(server) 46 defer client.Close() 47 48 var resp echoResult 49 if err := client.Call(&resp, "test_echo", "hello", 10, &echoArgs{"world"}); err != nil { 50 t.Fatal(err) 51 } 52 if !reflect.DeepEqual(resp, echoResult{"hello", 10, &echoArgs{"world"}}) { 53 t.Errorf("incorrect result %#v", resp) 54 } 55 } 56 57 func TestClientResponseType(t *testing.T) { 58 t.Parallel() 59 60 server := newTestServer() 61 defer server.Stop() 62 client := DialInProc(server) 63 defer client.Close() 64 65 if err := client.Call(nil, "test_echo", "hello", 10, &echoArgs{"world"}); err != nil { 66 t.Errorf("Passing nil as result should be fine, but got an error: %v", err) 67 } 68 var resultVar echoResult 69 // Note: passing the var, not a ref 70 err := client.Call(resultVar, "test_echo", "hello", 10, &echoArgs{"world"}) 71 if err == nil { 72 t.Error("Passing a var as result should be an error") 73 } 74 } 75 76 // This test checks calling a method that returns 'null'. 77 func TestClientNullResponse(t *testing.T) { 78 t.Parallel() 79 80 server := newTestServer() 81 defer server.Stop() 82 83 client := DialInProc(server) 84 defer client.Close() 85 86 var result json.RawMessage 87 if err := client.Call(&result, "test_null"); err != nil { 88 t.Fatal(err) 89 } 90 if result == nil { 91 t.Fatal("Expected non-nil result") 92 } 93 if !reflect.DeepEqual(result, json.RawMessage("null")) { 94 t.Errorf("Expected null, got %s", result) 95 } 96 } 97 98 // This test checks that server-returned errors with code and data come out of Client.Call. 99 func TestClientErrorData(t *testing.T) { 100 t.Parallel() 101 102 server := newTestServer() 103 defer server.Stop() 104 client := DialInProc(server) 105 defer client.Close() 106 107 var resp interface{} 108 err := client.Call(&resp, "test_returnError") 109 if err == nil { 110 t.Fatal("expected error") 111 } 112 113 // Check code. 114 // The method handler returns an error value which implements the rpc.Error 115 // interface, i.e. it has a custom error code. The server returns this error code. 116 expectedCode := testError{}.ErrorCode() 117 if e, ok := err.(Error); !ok { 118 t.Fatalf("client did not return rpc.Error, got %#v", e) 119 } else if e.ErrorCode() != expectedCode { 120 t.Fatalf("wrong error code %d, want %d", e.ErrorCode(), expectedCode) 121 } 122 123 // Check data. 124 if e, ok := err.(DataError); !ok { 125 t.Fatalf("client did not return rpc.DataError, got %#v", e) 126 } else if e.ErrorData() != (testError{}.ErrorData()) { 127 t.Fatalf("wrong error data %#v, want %#v", e.ErrorData(), testError{}.ErrorData()) 128 } 129 } 130 131 func TestClientBatchRequest(t *testing.T) { 132 t.Parallel() 133 134 server := newTestServer() 135 defer server.Stop() 136 client := DialInProc(server) 137 defer client.Close() 138 139 batch := []BatchElem{ 140 { 141 Method: "test_echo", 142 Args: []interface{}{"hello", 10, &echoArgs{"world"}}, 143 Result: new(echoResult), 144 }, 145 { 146 Method: "test_echo", 147 Args: []interface{}{"hello2", 11, &echoArgs{"world"}}, 148 Result: new(echoResult), 149 }, 150 { 151 Method: "no_such_method", 152 Args: []interface{}{1, 2, 3}, 153 Result: new(int), 154 }, 155 } 156 if err := client.BatchCall(batch); err != nil { 157 t.Fatal(err) 158 } 159 wantResult := []BatchElem{ 160 { 161 Method: "test_echo", 162 Args: []interface{}{"hello", 10, &echoArgs{"world"}}, 163 Result: &echoResult{"hello", 10, &echoArgs{"world"}}, 164 }, 165 { 166 Method: "test_echo", 167 Args: []interface{}{"hello2", 11, &echoArgs{"world"}}, 168 Result: &echoResult{"hello2", 11, &echoArgs{"world"}}, 169 }, 170 { 171 Method: "no_such_method", 172 Args: []interface{}{1, 2, 3}, 173 Result: new(int), 174 Error: &jsonError{Code: -32601, Message: "the method no_such_method does not exist/is not available"}, 175 }, 176 } 177 if !reflect.DeepEqual(batch, wantResult) { 178 t.Errorf("batch results mismatch:\ngot %swant %s", spew.Sdump(batch), spew.Sdump(wantResult)) 179 } 180 } 181 182 // This checks that, for HTTP connections, the length of batch responses is validated to 183 // match the request exactly. 184 func TestClientBatchRequest_len(t *testing.T) { 185 t.Parallel() 186 187 b, err := json.Marshal([]jsonrpcMessage{ 188 {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)}, 189 {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)}, 190 }) 191 if err != nil { 192 t.Fatal("failed to encode jsonrpc message:", err) 193 } 194 s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { 195 _, err := rw.Write(b) 196 if err != nil { 197 t.Error("failed to write response:", err) 198 } 199 })) 200 t.Cleanup(s.Close) 201 202 t.Run("too-few", func(t *testing.T) { 203 t.Parallel() 204 205 client, err := Dial(s.URL) 206 if err != nil { 207 t.Fatal("failed to dial test server:", err) 208 } 209 defer client.Close() 210 211 batch := []BatchElem{ 212 {Method: "foo", Result: new(string)}, 213 {Method: "bar", Result: new(string)}, 214 {Method: "baz", Result: new(string)}, 215 } 216 ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) 217 defer cancelFn() 218 219 if err := client.BatchCallContext(ctx, batch); err != nil { 220 t.Fatal("error:", err) 221 } 222 for i, elem := range batch[:2] { 223 if elem.Error != nil { 224 t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) 225 } 226 } 227 for i, elem := range batch[2:] { 228 if elem.Error != ErrMissingBatchResponse { 229 t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) 230 } 231 } 232 }) 233 234 t.Run("too-many", func(t *testing.T) { 235 t.Parallel() 236 237 client, err := Dial(s.URL) 238 if err != nil { 239 t.Fatal("failed to dial test server:", err) 240 } 241 defer client.Close() 242 243 batch := []BatchElem{ 244 {Method: "foo", Result: new(string)}, 245 } 246 ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) 247 defer cancelFn() 248 249 if err := client.BatchCallContext(ctx, batch); err != nil { 250 t.Fatal("error:", err) 251 } 252 for i, elem := range batch[:1] { 253 if elem.Error != nil { 254 t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) 255 } 256 } 257 for i, elem := range batch[1:] { 258 if elem.Error != ErrMissingBatchResponse { 259 t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) 260 } 261 } 262 }) 263 } 264 265 // This checks that the client can handle the case where the server doesn't 266 // respond to all requests in a batch. 267 func TestClientBatchRequestLimit(t *testing.T) { 268 t.Parallel() 269 270 server := newTestServer() 271 defer server.Stop() 272 server.SetBatchLimits(2, 100000) 273 client := DialInProc(server) 274 defer client.Close() 275 276 batch := []BatchElem{ 277 {Method: "foo"}, 278 {Method: "bar"}, 279 {Method: "baz"}, 280 } 281 err := client.BatchCall(batch) 282 if err != nil { 283 t.Fatal("unexpected error:", err) 284 } 285 286 // Check that the first response indicates an error with batch size. 287 var err0 Error 288 if !errors.As(batch[0].Error, &err0) { 289 t.Log("error zero:", batch[0].Error) 290 t.Fatalf("batch elem 0 has wrong error type: %T", batch[0].Error) 291 } else { 292 if err0.ErrorCode() != -32600 || err0.Error() != errMsgBatchTooLarge { 293 t.Fatalf("wrong error on batch elem zero: %v", err0) 294 } 295 } 296 297 // Check that remaining response batch elements are reported as absent. 298 for i, elem := range batch[1:] { 299 if elem.Error != ErrMissingBatchResponse { 300 t.Fatalf("batch elem %d has unexpected error: %v", i+1, elem.Error) 301 } 302 } 303 } 304 305 func TestClientNotify(t *testing.T) { 306 t.Parallel() 307 308 server := newTestServer() 309 defer server.Stop() 310 client := DialInProc(server) 311 defer client.Close() 312 313 if err := client.Notify(context.Background(), "test_echo", "hello", 10, &echoArgs{"world"}); err != nil { 314 t.Fatal(err) 315 } 316 } 317 318 // func TestClientCancelInproc(t *testing.T) { testClientCancel("inproc", t) } 319 func TestClientCancelWebsocket(t *testing.T) { testClientCancel("ws", t) } 320 func TestClientCancelHTTP(t *testing.T) { testClientCancel("http", t) } 321 func TestClientCancelIPC(t *testing.T) { testClientCancel("ipc", t) } 322 323 // This test checks that requests made through CallContext can be canceled by canceling 324 // the context. 325 func testClientCancel(transport string, t *testing.T) { 326 // These tests take a lot of time, run them all at once. 327 // You probably want to run with -parallel 1 or comment out 328 // the call to t.Parallel if you enable the logging. 329 t.Parallel() 330 331 server := newTestServer() 332 defer server.Stop() 333 334 // What we want to achieve is that the context gets canceled 335 // at various stages of request processing. The interesting cases 336 // are: 337 // - cancel during dial 338 // - cancel while performing a HTTP request 339 // - cancel while waiting for a response 340 // 341 // To trigger those, the times are chosen such that connections 342 // are killed within the deadline for every other call (maxKillTimeout 343 // is 2x maxCancelTimeout). 344 // 345 // Once a connection is dead, there is a fair chance it won't connect 346 // successfully because the accept is delayed by 1s. 347 maxContextCancelTimeout := 300 * time.Millisecond 348 fl := &flakeyListener{ 349 maxAcceptDelay: 1 * time.Second, 350 maxKillTimeout: 600 * time.Millisecond, 351 } 352 353 var client *Client 354 switch transport { 355 case "ws", "http": 356 c, hs := httpTestClient(server, transport, fl) 357 defer hs.Close() 358 client = c 359 case "ipc": 360 c, l := ipcTestClient(server, fl) 361 defer l.Close() 362 client = c 363 default: 364 panic("unknown transport: " + transport) 365 } 366 defer client.Close() 367 368 // The actual test starts here. 369 var ( 370 wg sync.WaitGroup 371 nreqs = 10 372 ncallers = 10 373 ) 374 caller := func(index int) { 375 defer wg.Done() 376 for i := 0; i < nreqs; i++ { 377 var ( 378 ctx context.Context 379 cancel func() 380 timeout = time.Duration(rand.Int63n(int64(maxContextCancelTimeout))) 381 ) 382 if index < ncallers/2 { 383 // For half of the callers, create a context without deadline 384 // and cancel it later. 385 ctx, cancel = context.WithCancel(context.Background()) 386 time.AfterFunc(timeout, cancel) 387 } else { 388 // For the other half, create a context with a deadline instead. This is 389 // different because the context deadline is used to set the socket write 390 // deadline. 391 ctx, cancel = context.WithTimeout(context.Background(), timeout) 392 } 393 394 // Now perform a call with the context. 395 // The key thing here is that no call will ever complete successfully. 396 err := client.CallContext(ctx, nil, "test_block") 397 switch { 398 case err == nil: 399 _, hasDeadline := ctx.Deadline() 400 t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline) 401 // default: 402 // t.Logf("got expected error with %v wait time: %v", timeout, err) 403 } 404 cancel() 405 } 406 } 407 wg.Add(ncallers) 408 for i := 0; i < ncallers; i++ { 409 go caller(i) 410 } 411 wg.Wait() 412 } 413 414 func TestClientSubscribeInvalidArg(t *testing.T) { 415 t.Parallel() 416 417 server := newTestServer() 418 defer server.Stop() 419 client := DialInProc(server) 420 defer client.Close() 421 422 check := func(shouldPanic bool, arg interface{}) { 423 defer func() { 424 err := recover() 425 if shouldPanic && err == nil { 426 t.Errorf("EthSubscribe should've panicked for %#v", arg) 427 } 428 if !shouldPanic && err != nil { 429 t.Errorf("EthSubscribe shouldn't have panicked for %#v", arg) 430 buf := make([]byte, 1024*1024) 431 buf = buf[:runtime.Stack(buf, false)] 432 t.Error(err) 433 t.Error(string(buf)) 434 } 435 }() 436 client.EthSubscribe(context.Background(), arg, "foo_bar") 437 } 438 check(true, nil) 439 check(true, 1) 440 check(true, (chan int)(nil)) 441 check(true, make(<-chan int)) 442 check(false, make(chan int)) 443 check(false, make(chan<- int)) 444 } 445 446 func TestClientSubscribe(t *testing.T) { 447 t.Parallel() 448 449 server := newTestServer() 450 defer server.Stop() 451 client := DialInProc(server) 452 defer client.Close() 453 454 nc := make(chan int) 455 count := 10 456 sub, err := client.Subscribe(context.Background(), "nftest", nc, "someSubscription", count, 0) 457 if err != nil { 458 t.Fatal("can't subscribe:", err) 459 } 460 for i := 0; i < count; i++ { 461 if val := <-nc; val != i { 462 t.Fatalf("value mismatch: got %d, want %d", val, i) 463 } 464 } 465 466 sub.Unsubscribe() 467 select { 468 case v := <-nc: 469 t.Fatal("received value after unsubscribe:", v) 470 case err := <-sub.Err(): 471 if err != nil { 472 t.Fatalf("Err returned a non-nil error after explicit unsubscribe: %q", err) 473 } 474 case <-time.After(1 * time.Second): 475 t.Fatalf("subscription not closed within 1s after unsubscribe") 476 } 477 } 478 479 // In this test, the connection drops while Subscribe is waiting for a response. 480 func TestClientSubscribeClose(t *testing.T) { 481 t.Parallel() 482 483 server := newTestServer() 484 service := ¬ificationTestService{ 485 gotHangSubscriptionReq: make(chan struct{}), 486 unblockHangSubscription: make(chan struct{}), 487 } 488 if err := server.RegisterName("nftest2", service); err != nil { 489 t.Fatal(err) 490 } 491 492 defer server.Stop() 493 client := DialInProc(server) 494 defer client.Close() 495 496 var ( 497 nc = make(chan int) 498 errc = make(chan error, 1) 499 sub *ClientSubscription 500 err error 501 ) 502 go func() { 503 sub, err = client.Subscribe(context.Background(), "nftest2", nc, "hangSubscription", 999) 504 errc <- err 505 }() 506 507 <-service.gotHangSubscriptionReq 508 client.Close() 509 service.unblockHangSubscription <- struct{}{} 510 511 select { 512 case err := <-errc: 513 if err == nil { 514 t.Errorf("Subscribe returned nil error after Close") 515 } 516 if sub != nil { 517 t.Error("Subscribe returned non-nil subscription after Close") 518 } 519 case <-time.After(1 * time.Second): 520 t.Fatalf("Subscribe did not return within 1s after Close") 521 } 522 } 523 524 // This test reproduces https://github.com/ethereum/go-ethereum/issues/17837 where the 525 // client hangs during shutdown when Unsubscribe races with Client.Close. 526 func TestClientCloseUnsubscribeRace(t *testing.T) { 527 t.Parallel() 528 529 server := newTestServer() 530 defer server.Stop() 531 532 for i := 0; i < 20; i++ { 533 client := DialInProc(server) 534 nc := make(chan int) 535 sub, err := client.Subscribe(context.Background(), "nftest", nc, "someSubscription", 3, 1) 536 if err != nil { 537 t.Fatal(err) 538 } 539 go client.Close() 540 go sub.Unsubscribe() 541 select { 542 case <-sub.Err(): 543 case <-time.After(5 * time.Second): 544 t.Fatal("subscription not closed within timeout") 545 } 546 } 547 } 548 549 // unsubscribeBlocker will wait for the quit channel to process an unsubscribe 550 // request. 551 type unsubscribeBlocker struct { 552 ServerCodec 553 quit chan struct{} 554 } 555 556 func (b *unsubscribeBlocker) readBatch() ([]*jsonrpcMessage, bool, error) { 557 msgs, batch, err := b.ServerCodec.readBatch() 558 for _, msg := range msgs { 559 if msg.isUnsubscribe() { 560 <-b.quit 561 } 562 } 563 return msgs, batch, err 564 } 565 566 // TestUnsubscribeTimeout verifies that calling the client's Unsubscribe 567 // function will eventually timeout and not block forever in case the serve does 568 // not respond. 569 // It reproducers the issue https://github.com/ethereum/go-ethereum/issues/30156 570 func TestUnsubscribeTimeout(t *testing.T) { 571 t.Parallel() 572 573 srv := NewServer() 574 srv.RegisterName("nftest", new(notificationTestService)) 575 576 // Setup middleware to block on unsubscribe. 577 p1, p2 := net.Pipe() 578 blocker := &unsubscribeBlocker{ServerCodec: NewCodec(p1), quit: make(chan struct{})} 579 defer close(blocker.quit) 580 581 // Serve the middleware. 582 go srv.ServeCodec(blocker, OptionMethodInvocation|OptionSubscriptions) 583 defer srv.Stop() 584 585 // Create the client on the other end of the pipe. 586 cfg := new(clientConfig) 587 client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) { 588 return NewCodec(p2), nil 589 }) 590 defer client.Close() 591 592 // Start subscription. 593 sub, err := client.Subscribe(context.Background(), "nftest", make(chan int), "someSubscription", 1, 1) 594 if err != nil { 595 t.Fatalf("failed to subscribe: %v", err) 596 } 597 598 // Now on a separate thread, attempt to unsubscribe. Since the middleware 599 // won't return, the function will only return if it times out on the request. 600 done := make(chan struct{}) 601 go func() { 602 sub.Unsubscribe() 603 done <- struct{}{} 604 }() 605 606 // Wait for the timeout. If the expected time for the timeout elapses, the 607 // test is considered failed. 608 select { 609 case <-done: 610 case <-time.After(unsubscribeTimeout + 3*time.Second): 611 t.Fatalf("Unsubscribe did not return within %s", unsubscribeTimeout) 612 } 613 } 614 615 // unsubscribeRecorder collects the subscription IDs of *_unsubscribe calls. 616 type unsubscribeRecorder struct { 617 ServerCodec 618 unsubscribes map[string]bool 619 } 620 621 func (r *unsubscribeRecorder) readBatch() ([]*jsonrpcMessage, bool, error) { 622 if r.unsubscribes == nil { 623 r.unsubscribes = make(map[string]bool) 624 } 625 626 msgs, batch, err := r.ServerCodec.readBatch() 627 for _, msg := range msgs { 628 if msg.isUnsubscribe() { 629 var params []string 630 if err := json.Unmarshal(msg.Params, ¶ms); err != nil { 631 panic("unsubscribe decode error: " + err.Error()) 632 } 633 r.unsubscribes[params[0]] = true 634 } 635 } 636 return msgs, batch, err 637 } 638 639 // This checks that Client calls the _unsubscribe method on the server when Unsubscribe is 640 // called on a subscription. 641 func TestClientSubscriptionUnsubscribeServer(t *testing.T) { 642 t.Parallel() 643 644 // Create the server. 645 srv := NewServer() 646 srv.RegisterName("nftest", new(notificationTestService)) 647 p1, p2 := net.Pipe() 648 recorder := &unsubscribeRecorder{ServerCodec: NewCodec(p1)} 649 go srv.ServeCodec(recorder, OptionMethodInvocation|OptionSubscriptions) 650 defer srv.Stop() 651 652 // Create the client on the other end of the pipe. 653 cfg := new(clientConfig) 654 client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) { 655 return NewCodec(p2), nil 656 }) 657 defer client.Close() 658 659 // Create the subscription. 660 ch := make(chan int) 661 sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", 1, 1) 662 if err != nil { 663 t.Fatal(err) 664 } 665 666 // Unsubscribe and check that unsubscribe was called. 667 sub.Unsubscribe() 668 if !recorder.unsubscribes[sub.subid] { 669 t.Fatal("client did not call unsubscribe method") 670 } 671 if _, open := <-sub.Err(); open { 672 t.Fatal("subscription error channel not closed after unsubscribe") 673 } 674 } 675 676 // This checks that the subscribed channel can be closed after Unsubscribe. 677 // It is the reproducer for https://github.com/ethereum/go-ethereum/issues/22322 678 func TestClientSubscriptionChannelClose(t *testing.T) { 679 t.Parallel() 680 681 var ( 682 srv = NewServer() 683 httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) 684 wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") 685 ) 686 defer srv.Stop() 687 defer httpsrv.Close() 688 689 srv.RegisterName("nftest", new(notificationTestService)) 690 client, _ := Dial(wsURL) 691 defer client.Close() 692 693 for i := 0; i < 100; i++ { 694 ch := make(chan int, 100) 695 sub, err := client.Subscribe(context.Background(), "nftest", ch, "someSubscription", 100, 1) 696 if err != nil { 697 t.Fatal(err) 698 } 699 sub.Unsubscribe() 700 close(ch) 701 } 702 } 703 704 // This test checks that Client doesn't lock up when a single subscriber 705 // doesn't read subscription events. 706 func TestClientNotificationStorm(t *testing.T) { 707 t.Parallel() 708 709 server := newTestServer() 710 defer server.Stop() 711 712 doTest := func(count int, wantError bool) { 713 client := DialInProc(server) 714 defer client.Close() 715 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 716 defer cancel() 717 718 // Subscribe on the server. It will start sending many notifications 719 // very quickly. 720 nc := make(chan int) 721 sub, err := client.Subscribe(ctx, "nftest", nc, "someSubscription", count, 0) 722 if err != nil { 723 t.Fatal("can't subscribe:", err) 724 } 725 defer sub.Unsubscribe() 726 727 // Process each notification, try to run a call in between each of them. 728 for i := 0; i < count; i++ { 729 select { 730 case val := <-nc: 731 if val != i { 732 t.Fatalf("(%d/%d) unexpected value %d", i, count, val) 733 } 734 case err := <-sub.Err(): 735 if wantError && err != ErrSubscriptionQueueOverflow { 736 t.Fatalf("(%d/%d) got error %q, want %q", i, count, err, ErrSubscriptionQueueOverflow) 737 } else if !wantError { 738 t.Fatalf("(%d/%d) got unexpected error %q", i, count, err) 739 } 740 return 741 } 742 var r int 743 err := client.CallContext(ctx, &r, "nftest_echo", i) 744 if err != nil { 745 if !wantError { 746 t.Fatalf("(%d/%d) call error: %v", i, count, err) 747 } 748 return 749 } 750 } 751 if wantError { 752 t.Fatalf("didn't get expected error") 753 } 754 } 755 756 doTest(8000, false) 757 doTest(24000, true) 758 } 759 760 func TestClientSetHeader(t *testing.T) { 761 t.Parallel() 762 763 var gotHeader bool 764 srv := newTestServer() 765 httpsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 766 if r.Header.Get("test") == "ok" { 767 gotHeader = true 768 } 769 srv.ServeHTTP(w, r) 770 })) 771 defer httpsrv.Close() 772 defer srv.Stop() 773 774 client, err := Dial(httpsrv.URL) 775 if err != nil { 776 t.Fatal(err) 777 } 778 defer client.Close() 779 780 client.SetHeader("test", "ok") 781 if _, err := client.SupportedModules(); err != nil { 782 t.Fatal(err) 783 } 784 if !gotHeader { 785 t.Fatal("client did not set custom header") 786 } 787 788 // Check that Content-Type can be replaced. 789 client.SetHeader("content-type", "application/x-garbage") 790 _, err = client.SupportedModules() 791 if err == nil { 792 t.Fatal("no error for invalid content-type header") 793 } else if !strings.Contains(err.Error(), "Unsupported Media Type") { 794 t.Fatalf("error is not related to content-type: %q", err) 795 } 796 } 797 798 func TestClientHTTP(t *testing.T) { 799 t.Parallel() 800 801 server := newTestServer() 802 defer server.Stop() 803 804 client, hs := httpTestClient(server, "http", nil) 805 defer hs.Close() 806 defer client.Close() 807 808 // Launch concurrent requests. 809 var ( 810 results = make([]echoResult, 100) 811 errc = make(chan error, len(results)) 812 wantResult = echoResult{"a", 1, new(echoArgs)} 813 ) 814 for i := range results { 815 go func() { 816 errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args) 817 }() 818 } 819 820 // Wait for all of them to complete. 821 timeout := time.NewTimer(5 * time.Second) 822 defer timeout.Stop() 823 for i := range results { 824 select { 825 case err := <-errc: 826 if err != nil { 827 t.Fatal(err) 828 } 829 case <-timeout.C: 830 t.Fatalf("timeout (got %d/%d) results)", i+1, len(results)) 831 } 832 } 833 834 // Check results. 835 for i := range results { 836 if !reflect.DeepEqual(results[i], wantResult) { 837 t.Errorf("result %d mismatch: got %#v, want %#v", i, results[i], wantResult) 838 } 839 } 840 } 841 842 func TestClientReconnect(t *testing.T) { 843 t.Parallel() 844 845 startServer := func(addr string) (*Server, net.Listener) { 846 srv := newTestServer() 847 l, err := net.Listen("tcp", addr) 848 if err != nil { 849 t.Fatal("can't listen:", err) 850 } 851 go http.Serve(l, srv.WebsocketHandler([]string{"*"})) 852 return srv, l 853 } 854 855 ctx, cancel := context.WithTimeout(context.Background(), 12*time.Second) 856 defer cancel() 857 858 // Start a server and corresponding client. 859 s1, l1 := startServer("127.0.0.1:0") 860 client, err := DialContext(ctx, "ws://"+l1.Addr().String()) 861 if err != nil { 862 t.Fatal("can't dial", err) 863 } 864 defer client.Close() 865 866 // Perform a call. This should work because the server is up. 867 var resp echoResult 868 if err := client.CallContext(ctx, &resp, "test_echo", "", 1, nil); err != nil { 869 t.Fatal(err) 870 } 871 872 // Shut down the server and allow for some cool down time so we can listen on the same 873 // address again. 874 l1.Close() 875 s1.Stop() 876 time.Sleep(2 * time.Second) 877 878 // Try calling again. It shouldn't work. 879 if err := client.CallContext(ctx, &resp, "test_echo", "", 2, nil); err == nil { 880 t.Error("successful call while the server is down") 881 t.Logf("resp: %#v", resp) 882 } 883 884 // Start it up again and call again. The connection should be reestablished. 885 // We spawn multiple calls here to check whether this hangs somehow. 886 s2, l2 := startServer(l1.Addr().String()) 887 defer l2.Close() 888 defer s2.Stop() 889 890 start := make(chan struct{}) 891 errors := make(chan error, 20) 892 for i := 0; i < cap(errors); i++ { 893 go func() { 894 <-start 895 var resp echoResult 896 errors <- client.CallContext(ctx, &resp, "test_echo", "", 3, nil) 897 }() 898 } 899 close(start) 900 errcount := 0 901 for i := 0; i < cap(errors); i++ { 902 if err = <-errors; err != nil { 903 errcount++ 904 } 905 } 906 t.Logf("%d errors, last error: %v", errcount, err) 907 if errcount > 1 { 908 t.Errorf("expected one error after disconnect, got %d", errcount) 909 } 910 } 911 912 func httpTestClient(srv *Server, transport string, fl *flakeyListener) (*Client, *httptest.Server) { 913 // Create the HTTP server. 914 var hs *httptest.Server 915 switch transport { 916 case "ws": 917 hs = httptest.NewUnstartedServer(srv.WebsocketHandler([]string{"*"})) 918 case "http": 919 hs = httptest.NewUnstartedServer(srv) 920 default: 921 panic("unknown HTTP transport: " + transport) 922 } 923 // Wrap the listener if required. 924 if fl != nil { 925 fl.Listener = hs.Listener 926 hs.Listener = fl 927 } 928 // Connect the client. 929 hs.Start() 930 client, err := Dial(transport + "://" + hs.Listener.Addr().String()) 931 if err != nil { 932 panic(err) 933 } 934 return client, hs 935 } 936 937 func ipcTestClient(srv *Server, fl *flakeyListener) (*Client, net.Listener) { 938 // Listen on a random endpoint. 939 endpoint := fmt.Sprintf("go-ethereum-test-ipc-%d-%d", os.Getpid(), rand.Int63()) 940 if runtime.GOOS == "windows" { 941 endpoint = `\\.\pipe\` + endpoint 942 } else { 943 endpoint = os.TempDir() + "/" + endpoint 944 } 945 l, err := ipcListen(endpoint) 946 if err != nil { 947 panic(err) 948 } 949 // Connect the listener to the server. 950 if fl != nil { 951 fl.Listener = l 952 l = fl 953 } 954 go srv.ServeListener(l) 955 // Connect the client. 956 client, err := Dial(endpoint) 957 if err != nil { 958 panic(err) 959 } 960 return client, l 961 } 962 963 // flakeyListener kills accepted connections after a random timeout. 964 type flakeyListener struct { 965 net.Listener 966 maxKillTimeout time.Duration 967 maxAcceptDelay time.Duration 968 } 969 970 func (l *flakeyListener) Accept() (net.Conn, error) { 971 delay := time.Duration(rand.Int63n(int64(l.maxAcceptDelay))) 972 time.Sleep(delay) 973 974 c, err := l.Listener.Accept() 975 if err == nil { 976 timeout := time.Duration(rand.Int63n(int64(l.maxKillTimeout))) 977 time.AfterFunc(timeout, func() { 978 log.Debug(fmt.Sprintf("killing conn %v after %v", c.LocalAddr(), timeout)) 979 c.Close() 980 }) 981 } 982 return c, err 983 }