github.com/core-coin/go-core/v2@v2.1.9/node/node_test.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package node 18 19 import ( 20 crand "crypto/rand" 21 "errors" 22 "fmt" 23 "io" 24 "io/ioutil" 25 "net" 26 "net/http" 27 "os" 28 "reflect" 29 "strings" 30 "testing" 31 32 "github.com/core-coin/go-core/v2/xcbdb" 33 34 "github.com/core-coin/go-core/v2/crypto" 35 "github.com/core-coin/go-core/v2/p2p" 36 "github.com/core-coin/go-core/v2/rpc" 37 38 "github.com/stretchr/testify/assert" 39 ) 40 41 var ( 42 testNodeKey, _ = crypto.GenerateKey(crand.Reader) 43 ) 44 45 func testNodeConfig() *Config { 46 return &Config{ 47 Name: "test node", 48 P2P: p2p.Config{PrivateKey: testNodeKey}, 49 } 50 } 51 52 // Tests that an empty protocol stack can be closed more than once. 53 func TestNodeCloseMultipleTimes(t *testing.T) { 54 stack, err := New(testNodeConfig()) 55 if err != nil { 56 t.Fatalf("failed to create protocol stack: %v", err) 57 } 58 stack.Close() 59 60 // Ensure that a stopped node can be stopped again 61 for i := 0; i < 3; i++ { 62 if err := stack.Close(); err != ErrNodeStopped { 63 t.Fatalf("iter %d: stop failure mismatch: have %v, want %v", i, err, ErrNodeStopped) 64 } 65 } 66 } 67 68 func TestNodeStartMultipleTimes(t *testing.T) { 69 stack, err := New(testNodeConfig()) 70 if err != nil { 71 t.Fatalf("failed to create protocol stack: %v", err) 72 } 73 74 // Ensure that a node can be successfully started, but only once 75 if err := stack.Start(); err != nil { 76 t.Fatalf("failed to start node: %v", err) 77 } 78 if err := stack.Start(); err != ErrNodeRunning { 79 t.Fatalf("start failure mismatch: have %v, want %v ", err, ErrNodeRunning) 80 } 81 // Ensure that a node can be stopped, but only once 82 if err := stack.Close(); err != nil { 83 t.Fatalf("failed to stop node: %v", err) 84 } 85 if err := stack.Close(); err != ErrNodeStopped { 86 t.Fatalf("stop failure mismatch: have %v, want %v ", err, ErrNodeStopped) 87 } 88 } 89 90 // Tests that if the data dir is already in use, an appropriate error is returned. 91 func TestNodeUsedDataDir(t *testing.T) { 92 // Create a temporary folder to use as the data directory 93 dir, err := ioutil.TempDir("", "") 94 if err != nil { 95 t.Fatalf("failed to create temporary data directory: %v", err) 96 } 97 defer os.RemoveAll(dir) 98 99 // Create a new node based on the data directory 100 original, err := New(&Config{DataDir: dir}) 101 if err != nil { 102 t.Fatalf("failed to create original protocol stack: %v", err) 103 } 104 defer original.Close() 105 if err := original.Start(); err != nil { 106 t.Fatalf("failed to start original protocol stack: %v", err) 107 } 108 109 // Create a second node based on the same data directory and ensure failure 110 _, err = New(&Config{DataDir: dir}) 111 if err != ErrDatadirUsed { 112 t.Fatalf("duplicate datadir failure mismatch: have %v, want %v", err, ErrDatadirUsed) 113 } 114 } 115 116 // Tests whether a Lifecycle can be registered. 117 func TestLifecycleRegistry_Successful(t *testing.T) { 118 stack, err := New(testNodeConfig()) 119 if err != nil { 120 t.Fatalf("failed to create protocol stack: %v", err) 121 } 122 defer stack.Close() 123 124 noop := NewNoop() 125 stack.RegisterLifecycle(noop) 126 127 if !containsLifecycle(stack.lifecycles, noop) { 128 t.Fatalf("lifecycle was not properly registered on the node, %v", err) 129 } 130 } 131 132 // Tests whether a service's protocols can be registered properly on the node's p2p server. 133 func TestRegisterProtocols(t *testing.T) { 134 stack, err := New(testNodeConfig()) 135 if err != nil { 136 t.Fatalf("failed to create protocol stack: %v", err) 137 } 138 defer stack.Close() 139 140 fs, err := NewFullService(stack) 141 if err != nil { 142 t.Fatalf("could not create full service: %v", err) 143 } 144 145 for _, protocol := range fs.Protocols() { 146 if !containsProtocol(stack.server.Protocols, protocol) { 147 t.Fatalf("protocol %v was not successfully registered", protocol) 148 } 149 } 150 151 for _, api := range fs.APIs() { 152 if !containsAPI(stack.rpcAPIs, api) { 153 t.Fatalf("api %v was not successfully registered", api) 154 } 155 } 156 } 157 158 // This test checks that open databases are closed with node. 159 func TestNodeCloseClosesDB(t *testing.T) { 160 stack, _ := New(testNodeConfig()) 161 defer stack.Close() 162 163 db, err := stack.OpenDatabase("mydb", 0, 0, "") 164 if err != nil { 165 t.Fatal("can't open DB:", err) 166 } 167 if err = db.Put([]byte{}, []byte{}); err != nil { 168 t.Fatal("can't Put on open DB:", err) 169 } 170 171 stack.Close() 172 if err = db.Put([]byte{}, []byte{}); err == nil { 173 t.Fatal("Put succeeded after node is closed") 174 } 175 } 176 177 // This test checks that OpenDatabase can be used from within a Lifecycle Start method. 178 func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) { 179 stack, _ := New(testNodeConfig()) 180 defer stack.Close() 181 182 var db xcbdb.Database 183 var err error 184 stack.RegisterLifecycle(&InstrumentedService{ 185 startHook: func() { 186 db, err = stack.OpenDatabase("mydb", 0, 0, "") 187 if err != nil { 188 t.Fatal("can't open DB:", err) 189 } 190 }, 191 stopHook: func() { 192 db.Close() 193 }, 194 }) 195 196 stack.Start() 197 stack.Close() 198 } 199 200 // This test checks that OpenDatabase can be used from within a Lifecycle Stop method. 201 func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) { 202 stack, _ := New(testNodeConfig()) 203 defer stack.Close() 204 205 stack.RegisterLifecycle(&InstrumentedService{ 206 stopHook: func() { 207 db, err := stack.OpenDatabase("mydb", 0, 0, "") 208 if err != nil { 209 t.Fatal("can't open DB:", err) 210 } 211 db.Close() 212 }, 213 }) 214 215 stack.Start() 216 stack.Close() 217 } 218 219 // Tests that registered Lifecycles get started and stopped correctly. 220 func TestLifecycleLifeCycle(t *testing.T) { 221 stack, _ := New(testNodeConfig()) 222 defer stack.Close() 223 224 started := make(map[string]bool) 225 stopped := make(map[string]bool) 226 227 // Create a batch of instrumented services 228 lifecycles := map[string]Lifecycle{ 229 "A": &InstrumentedService{ 230 startHook: func() { started["A"] = true }, 231 stopHook: func() { stopped["A"] = true }, 232 }, 233 "B": &InstrumentedService{ 234 startHook: func() { started["B"] = true }, 235 stopHook: func() { stopped["B"] = true }, 236 }, 237 "C": &InstrumentedService{ 238 startHook: func() { started["C"] = true }, 239 stopHook: func() { stopped["C"] = true }, 240 }, 241 } 242 // register lifecycles on node 243 for _, lifecycle := range lifecycles { 244 stack.RegisterLifecycle(lifecycle) 245 } 246 // Start the node and check that all services are running 247 if err := stack.Start(); err != nil { 248 t.Fatalf("failed to start protocol stack: %v", err) 249 } 250 for id := range lifecycles { 251 if !started[id] { 252 t.Fatalf("service %s: freshly started service not running", id) 253 } 254 if stopped[id] { 255 t.Fatalf("service %s: freshly started service already stopped", id) 256 } 257 } 258 // Stop the node and check that all services have been stopped 259 if err := stack.Close(); err != nil { 260 t.Fatalf("failed to stop protocol stack: %v", err) 261 } 262 for id := range lifecycles { 263 if !stopped[id] { 264 t.Fatalf("service %s: freshly terminated service still running", id) 265 } 266 } 267 } 268 269 // Tests that if a Lifecycle fails to start, all others started before it will be 270 // shut down. 271 func TestLifecycleStartupError(t *testing.T) { 272 stack, err := New(testNodeConfig()) 273 if err != nil { 274 t.Fatalf("failed to create protocol stack: %v", err) 275 } 276 defer stack.Close() 277 278 started := make(map[string]bool) 279 stopped := make(map[string]bool) 280 281 // Create a batch of instrumented services 282 lifecycles := map[string]Lifecycle{ 283 "A": &InstrumentedService{ 284 startHook: func() { started["A"] = true }, 285 stopHook: func() { stopped["A"] = true }, 286 }, 287 "B": &InstrumentedService{ 288 startHook: func() { started["B"] = true }, 289 stopHook: func() { stopped["B"] = true }, 290 }, 291 "C": &InstrumentedService{ 292 startHook: func() { started["C"] = true }, 293 stopHook: func() { stopped["C"] = true }, 294 }, 295 } 296 // register lifecycles on node 297 for _, lifecycle := range lifecycles { 298 stack.RegisterLifecycle(lifecycle) 299 } 300 301 // Register a service that fails to construct itself 302 failure := errors.New("fail") 303 failer := &InstrumentedService{start: failure} 304 stack.RegisterLifecycle(failer) 305 306 // Start the protocol stack and ensure all started services stop 307 if err := stack.Start(); err != failure { 308 t.Fatalf("stack startup failure mismatch: have %v, want %v", err, failure) 309 } 310 for id := range lifecycles { 311 if started[id] && !stopped[id] { 312 t.Fatalf("service %s: started but not stopped", id) 313 } 314 delete(started, id) 315 delete(stopped, id) 316 } 317 } 318 319 // Tests that even if a registered Lifecycle fails to shut down cleanly, it does 320 // not influence the rest of the shutdown invocations. 321 func TestLifecycleTerminationGuarantee(t *testing.T) { 322 stack, err := New(testNodeConfig()) 323 if err != nil { 324 t.Fatalf("failed to create protocol stack: %v", err) 325 } 326 defer stack.Close() 327 328 started := make(map[string]bool) 329 stopped := make(map[string]bool) 330 331 // Create a batch of instrumented services 332 lifecycles := map[string]Lifecycle{ 333 "A": &InstrumentedService{ 334 startHook: func() { started["A"] = true }, 335 stopHook: func() { stopped["A"] = true }, 336 }, 337 "B": &InstrumentedService{ 338 startHook: func() { started["B"] = true }, 339 stopHook: func() { stopped["B"] = true }, 340 }, 341 "C": &InstrumentedService{ 342 startHook: func() { started["C"] = true }, 343 stopHook: func() { stopped["C"] = true }, 344 }, 345 } 346 // register lifecycles on node 347 for _, lifecycle := range lifecycles { 348 stack.RegisterLifecycle(lifecycle) 349 } 350 351 // Register a service that fails to shot down cleanly 352 failure := errors.New("fail") 353 failer := &InstrumentedService{stop: failure} 354 stack.RegisterLifecycle(failer) 355 356 // Start the protocol stack, and ensure that a failing shut down terminates all 357 // Start the stack and make sure all is online 358 if err := stack.Start(); err != nil { 359 t.Fatalf("failed to start protocol stack: %v", err) 360 } 361 for id := range lifecycles { 362 if !started[id] { 363 t.Fatalf("service %s: service not running", id) 364 } 365 if stopped[id] { 366 t.Fatalf("service %s: service already stopped", id) 367 } 368 } 369 // Stop the stack, verify failure and check all terminations 370 err = stack.Close() 371 if err, ok := err.(*StopError); !ok { 372 t.Fatalf("termination failure mismatch: have %v, want StopError", err) 373 } else { 374 failer := reflect.TypeOf(&InstrumentedService{}) 375 if err.Services[failer] != failure { 376 t.Fatalf("failer termination failure mismatch: have %v, want %v", err.Services[failer], failure) 377 } 378 if len(err.Services) != 1 { 379 t.Fatalf("failure count mismatch: have %d, want %d", len(err.Services), 1) 380 } 381 } 382 for id := range lifecycles { 383 if !stopped[id] { 384 t.Fatalf("service %s: service not terminated", id) 385 } 386 delete(started, id) 387 delete(stopped, id) 388 } 389 390 stack.server = &p2p.Server{} 391 stack.server.PrivateKey = testNodeKey 392 } 393 394 // Tests whether a handler can be successfully mounted on the canonical HTTP server 395 // on the given prefix 396 func TestRegisterHandler_Successful(t *testing.T) { 397 node := createNode(t, 7878, 7979) 398 399 // create and mount handler 400 handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 401 w.Write([]byte("success")) 402 }) 403 node.RegisterHandler("test", "/test", handler) 404 405 // start node 406 if err := node.Start(); err != nil { 407 t.Fatalf("could not start node: %v", err) 408 } 409 410 // create HTTP request 411 httpReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:7878/test", nil) 412 if err != nil { 413 t.Error("could not issue new http request ", err) 414 } 415 416 // check response 417 resp := doHTTPRequest(t, httpReq) 418 buf := make([]byte, 7) 419 _, err = io.ReadFull(resp.Body, buf) 420 if err != nil { 421 t.Fatalf("could not read response: %v", err) 422 } 423 assert.Equal(t, "success", string(buf)) 424 } 425 426 // Tests that the given handler will not be successfully mounted since no HTTP server 427 // is enabled for RPC 428 func TestRegisterHandler_Unsuccessful(t *testing.T) { 429 node, err := New(&DefaultConfig) 430 if err != nil { 431 t.Fatalf("could not create new node: %v", err) 432 } 433 434 // create and mount handler 435 handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 436 w.Write([]byte("success")) 437 }) 438 node.RegisterHandler("test", "/test", handler) 439 } 440 441 // Tests whether websocket requests can be handled on the same port as a regular http server. 442 func TestWebsocketHTTPOnSamePort_WebsocketRequest(t *testing.T) { 443 node := startHTTP(t, 0, 0) 444 defer node.Close() 445 446 ws := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1) 447 448 if node.WSEndpoint() != ws { 449 t.Fatalf("endpoints should be the same") 450 } 451 if !checkRPC(ws) { 452 t.Fatalf("ws request failed") 453 } 454 if !checkRPC(node.HTTPEndpoint()) { 455 t.Fatalf("http request failed") 456 } 457 } 458 459 func TestWebsocketHTTPOnSeparatePort_WSRequest(t *testing.T) { 460 // try and get a free port 461 listener, err := net.Listen("tcp", "127.0.0.1:0") 462 if err != nil { 463 t.Fatal("can't listen:", err) 464 } 465 port := listener.Addr().(*net.TCPAddr).Port 466 listener.Close() 467 468 node := startHTTP(t, 0, port) 469 defer node.Close() 470 471 wsOnHTTP := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1) 472 ws := fmt.Sprintf("ws://127.0.0.1:%d", port) 473 474 if node.WSEndpoint() == wsOnHTTP { 475 t.Fatalf("endpoints should not be the same") 476 } 477 // ensure ws endpoint matches the expected endpoint 478 if node.WSEndpoint() != ws { 479 t.Fatalf("ws endpoint is incorrect: expected %s, got %s", ws, node.WSEndpoint()) 480 } 481 482 if !checkRPC(ws) { 483 t.Fatalf("ws request failed") 484 } 485 if !checkRPC(node.HTTPEndpoint()) { 486 t.Fatalf("http request failed") 487 } 488 489 } 490 491 type rpcPrefixTest struct { 492 httpPrefix, wsPrefix string 493 // These lists paths on which JSON-RPC should be served / not served. 494 wantHTTP []string 495 wantNoHTTP []string 496 wantWS []string 497 wantNoWS []string 498 } 499 500 func TestNodeRPCPrefix(t *testing.T) { 501 t.Parallel() 502 503 tests := []rpcPrefixTest{ 504 // both off 505 { 506 httpPrefix: "", wsPrefix: "", 507 wantHTTP: []string{"/", "/?p=1"}, 508 wantNoHTTP: []string{"/test", "/test?p=1"}, 509 wantWS: []string{"/", "/?p=1"}, 510 wantNoWS: []string{"/test", "/test?p=1"}, 511 }, 512 // only http prefix 513 { 514 httpPrefix: "/testprefix", wsPrefix: "", 515 wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, 516 wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, 517 wantWS: []string{"/", "/?p=1"}, 518 wantNoWS: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, 519 }, 520 // only ws prefix 521 { 522 httpPrefix: "", wsPrefix: "/testprefix", 523 wantHTTP: []string{"/", "/?p=1"}, 524 wantNoHTTP: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, 525 wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, 526 wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, 527 }, 528 // both set 529 { 530 httpPrefix: "/testprefix", wsPrefix: "/testprefix", 531 wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, 532 wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, 533 wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, 534 wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, 535 }, 536 } 537 538 for _, test := range tests { 539 test := test 540 name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix) 541 t.Run(name, func(t *testing.T) { 542 cfg := &Config{ 543 HTTPHost: "127.0.0.1", 544 HTTPPathPrefix: test.httpPrefix, 545 WSHost: "127.0.0.1", 546 WSPathPrefix: test.wsPrefix, 547 } 548 node, err := New(cfg) 549 if err != nil { 550 t.Fatal("can't create node:", err) 551 } 552 defer node.Close() 553 if err := node.Start(); err != nil { 554 t.Fatal("can't start node:", err) 555 } 556 test.check(t, node) 557 }) 558 } 559 } 560 561 func (test rpcPrefixTest) check(t *testing.T, node *Node) { 562 t.Helper() 563 httpBase := "http://" + node.http.listenAddr() 564 wsBase := "ws://" + node.http.listenAddr() 565 566 if node.WSEndpoint() != wsBase+test.wsPrefix { 567 t.Errorf("Error: node has wrong WSEndpoint %q", node.WSEndpoint()) 568 } 569 570 for _, path := range test.wantHTTP { 571 resp := rpcRequest(t, httpBase+path) 572 if resp.StatusCode != 200 { 573 t.Errorf("Error: %s: bad status code %d, want 200", path, resp.StatusCode) 574 } 575 } 576 for _, path := range test.wantNoHTTP { 577 resp := rpcRequest(t, httpBase+path) 578 if resp.StatusCode != 404 { 579 t.Errorf("Error: %s: bad status code %d, want 404", path, resp.StatusCode) 580 } 581 } 582 for _, path := range test.wantWS { 583 err := wsRequest(t, wsBase+path) 584 if err != nil { 585 t.Errorf("Error: %s: WebSocket connection failed: %v", path, err) 586 } 587 } 588 for _, path := range test.wantNoWS { 589 err := wsRequest(t, wsBase+path) 590 if err == nil { 591 t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path) 592 } 593 594 } 595 } 596 597 func createNode(t *testing.T, httpPort, wsPort int) *Node { 598 conf := &Config{ 599 HTTPHost: "127.0.0.1", 600 HTTPPort: httpPort, 601 WSHost: "127.0.0.1", 602 WSPort: wsPort, 603 } 604 node, err := New(conf) 605 if err != nil { 606 t.Fatalf("could not create a new node: %v", err) 607 } 608 return node 609 } 610 611 func startHTTP(t *testing.T, httpPort, wsPort int) *Node { 612 node := createNode(t, httpPort, wsPort) 613 err := node.Start() 614 if err != nil { 615 t.Fatalf("could not start http service on node: %v", err) 616 } 617 618 return node 619 } 620 621 func doHTTPRequest(t *testing.T, req *http.Request) *http.Response { 622 client := http.DefaultClient 623 resp, err := client.Do(req) 624 if err != nil { 625 t.Fatalf("could not issue a GET request to the given endpoint: %v", err) 626 627 } 628 return resp 629 } 630 631 func containsProtocol(stackProtocols []p2p.Protocol, protocol p2p.Protocol) bool { 632 for _, a := range stackProtocols { 633 if reflect.DeepEqual(a, protocol) { 634 return true 635 } 636 } 637 return false 638 } 639 640 func containsAPI(stackAPIs []rpc.API, api rpc.API) bool { 641 for _, a := range stackAPIs { 642 if reflect.DeepEqual(a, api) { 643 return true 644 } 645 } 646 return false 647 }