github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/node/node.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package node 18 19 import ( 20 "crypto/ecdsa" 21 "errors" 22 "fmt" 23 "net/http" 24 "os" 25 "path/filepath" 26 "reflect" 27 "strings" 28 "sync" 29 30 "github.com/kisexp/xdchain/accounts" 31 "github.com/kisexp/xdchain/core/rawdb" 32 "github.com/kisexp/xdchain/core/types" 33 "github.com/kisexp/xdchain/ethdb" 34 "github.com/kisexp/xdchain/event" 35 "github.com/kisexp/xdchain/log" 36 "github.com/kisexp/xdchain/p2p" 37 "github.com/kisexp/xdchain/plugin" 38 "github.com/kisexp/xdchain/plugin/security" 39 "github.com/kisexp/xdchain/rpc" 40 "github.com/prometheus/tsdb/fileutil" 41 ) 42 43 // Node is a container on which services can be registered. 44 type Node struct { 45 eventmux *event.TypeMux 46 config *Config 47 accman *accounts.Manager 48 log log.Logger 49 ephemKeystore string // if non-empty, the key directory that will be removed by Stop 50 dirLock fileutil.Releaser // prevents concurrent use of instance directory 51 stop chan struct{} // Channel to wait for termination notifications 52 server *p2p.Server // Currently running P2P networking layer 53 startStopLock sync.Mutex // Start/Stop are protected by an additional lock 54 state int // Tracks state of node lifecycle 55 56 lock sync.Mutex 57 lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle 58 rpcAPIs []rpc.API // List of APIs currently provided by the node 59 http *httpServer // 60 ws *httpServer // 61 ipc *ipcServer // Stores information about the ipc http server 62 inprocHandler *rpc.Server // In-process RPC request handler to process the API requests 63 64 databases map[*closeTrackingDB]struct{} // All open databases 65 66 // Quorum 67 pluginManager *plugin.PluginManager // Manage all plugins for this node. If plugin is not enabled, an EmptyPluginManager is set. 68 // End Quorum 69 } 70 71 const ( 72 initializingState = iota 73 runningState 74 closedState 75 ) 76 77 // New creates a new P2P node, ready for protocol registration. 78 func New(conf *Config) (*Node, error) { 79 // Copy config and resolve the datadir so future changes to the current 80 // working directory don't affect the node. 81 confCopy := *conf 82 conf = &confCopy 83 if conf.DataDir != "" { 84 absdatadir, err := filepath.Abs(conf.DataDir) 85 if err != nil { 86 return nil, err 87 } 88 conf.DataDir = absdatadir 89 } 90 if conf.Logger == nil { 91 conf.Logger = log.New() 92 } 93 94 // Ensure that the instance name doesn't cause weird conflicts with 95 // other files in the data directory. 96 if strings.ContainsAny(conf.Name, `/\`) { 97 return nil, errors.New(`Config.Name must not contain '/' or '\'`) 98 } 99 if conf.Name == datadirDefaultKeyStore { 100 return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`) 101 } 102 if strings.HasSuffix(conf.Name, ".ipc") { 103 return nil, errors.New(`Config.Name cannot end in ".ipc"`) 104 } 105 106 node := &Node{ 107 config: conf, 108 inprocHandler: rpc.NewProtectedServer(nil, conf.EnableMultitenancy), 109 eventmux: new(event.TypeMux), 110 log: conf.Logger, 111 stop: make(chan struct{}), 112 server: &p2p.Server{Config: conf.P2P}, 113 databases: make(map[*closeTrackingDB]struct{}), 114 pluginManager: plugin.NewEmptyPluginManager(), 115 } 116 117 // Register built-in APIs. 118 node.rpcAPIs = append(node.rpcAPIs, node.apis()...) 119 120 // Acquire the instance directory lock. 121 if err := node.openDataDir(); err != nil { 122 return nil, err 123 } 124 // Ensure that the AccountManager method works before the node has started. We rely on 125 // this in cmd/geth. 126 am, ephemeralKeystore, err := makeAccountManager(conf) 127 if err != nil { 128 return nil, err 129 } 130 node.accman = am 131 node.ephemKeystore = ephemeralKeystore 132 133 // Initialize the p2p server. This creates the node key and discovery databases. 134 node.server.Config.PrivateKey = node.config.NodeKey() 135 node.server.Config.Name = node.config.NodeName() 136 node.server.Config.Logger = node.log 137 if node.server.Config.StaticNodes == nil { 138 node.server.Config.StaticNodes = node.config.StaticNodes() 139 } 140 if node.server.Config.TrustedNodes == nil { 141 node.server.Config.TrustedNodes = node.config.TrustedNodes() 142 } 143 if node.server.Config.NodeDatabase == "" { 144 node.server.Config.NodeDatabase = node.config.NodeDB() 145 } 146 147 // Quorum 148 node.server.Config.EnableNodePermission = node.config.EnableNodePermission 149 node.server.Config.DataDir = node.config.DataDir 150 // End Quorum 151 152 // Configure RPC servers. 153 node.http = newHTTPServer(node.log, conf.HTTPTimeouts).withMultitenancy(node.config.EnableMultitenancy) 154 node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts).withMultitenancy(node.config.EnableMultitenancy) 155 node.ipc = newIPCServer(node.log, conf.IPCEndpoint()).withMultitenancy(node.config.EnableMultitenancy) 156 157 return node, nil 158 } 159 160 // Start starts all registered lifecycles, RPC services and p2p networking. 161 // Node can only be started once. 162 func (n *Node) Start() error { 163 n.startStopLock.Lock() 164 defer n.startStopLock.Unlock() 165 166 n.lock.Lock() 167 switch n.state { 168 case runningState: 169 n.lock.Unlock() 170 return ErrNodeRunning 171 case closedState: 172 n.lock.Unlock() 173 return ErrNodeStopped 174 } 175 n.state = runningState 176 177 // Quorum 178 // Start the plugin manager before as might be needed for TLS and Auth manager for networking/rpc. 179 if err := n.PluginManager().Start(); err != nil { 180 n.doClose(nil) 181 return err 182 } 183 // End Quorum 184 185 err := n.startNetworking() 186 lifecycles := make([]Lifecycle, len(n.lifecycles)) 187 copy(lifecycles, n.lifecycles) 188 n.lock.Unlock() 189 190 // Check if networking startup failed. 191 if err != nil { 192 n.doClose(nil) 193 return err 194 } 195 // Start all registered lifecycles. 196 var started []Lifecycle 197 for _, lifecycle := range lifecycles { 198 if err = lifecycle.Start(); err != nil { 199 break 200 } 201 started = append(started, lifecycle) 202 } 203 // Check if any lifecycle failed to start. 204 if err != nil { 205 n.stopServices(started) 206 n.doClose(nil) 207 } 208 return err 209 } 210 211 // Close stops the Node and releases resources acquired in 212 // Node constructor New. 213 func (n *Node) Close() error { 214 n.startStopLock.Lock() 215 defer n.startStopLock.Unlock() 216 217 n.lock.Lock() 218 state := n.state 219 n.lock.Unlock() 220 switch state { 221 case initializingState: 222 // The node was never started. 223 return n.doClose(nil) 224 case runningState: 225 // The node was started, release resources acquired by Start(). 226 var errs []error 227 if err := n.stopServices(n.lifecycles); err != nil { 228 errs = append(errs, err) 229 } 230 return n.doClose(errs) 231 case closedState: 232 return ErrNodeStopped 233 default: 234 panic(fmt.Sprintf("node is in unknown state %d", state)) 235 } 236 } 237 238 // doClose releases resources acquired by New(), collecting errors. 239 func (n *Node) doClose(errs []error) error { 240 // Close databases. This needs the lock because it needs to 241 // synchronize with OpenDatabase*. 242 n.lock.Lock() 243 n.state = closedState 244 errs = append(errs, n.closeDatabases()...) 245 n.lock.Unlock() 246 247 if err := n.accman.Close(); err != nil { 248 errs = append(errs, err) 249 } 250 if n.ephemKeystore != "" { 251 if err := os.RemoveAll(n.ephemKeystore); err != nil { 252 errs = append(errs, err) 253 } 254 } 255 256 // Release instance directory lock. 257 n.closeDataDir() 258 259 // Unblock n.Wait. 260 close(n.stop) 261 262 // Report any errors that might have occurred. 263 switch len(errs) { 264 case 0: 265 return nil 266 case 1: 267 return errs[0] 268 default: 269 return fmt.Errorf("%v", errs) 270 } 271 } 272 273 // startNetworking starts all network endpoints. 274 func (n *Node) startNetworking() error { 275 n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) 276 if err := n.server.Start(); err != nil { 277 return convertFileLockError(err) 278 } 279 err := n.startRPC() 280 if err != nil { 281 n.stopRPC() 282 n.server.Stop() 283 } 284 return err 285 } 286 287 // containsLifecycle checks if 'lfs' contains 'l'. 288 func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool { 289 for _, obj := range lfs { 290 if obj == l { 291 return true 292 } 293 } 294 return false 295 } 296 297 // stopServices terminates running services, RPC and p2p networking. 298 // It is the inverse of Start. 299 func (n *Node) stopServices(running []Lifecycle) error { 300 n.stopRPC() 301 302 // Stop running lifecycles in reverse order. 303 failure := &StopError{Services: make(map[reflect.Type]error)} 304 // Quorum 305 if err := n.PluginManager().Stop(); err != nil { 306 failure.Services[reflect.TypeOf(n.PluginManager())] = err 307 } 308 // End Quorum 309 for i := len(running) - 1; i >= 0; i-- { 310 if err := running[i].Stop(); err != nil { 311 failure.Services[reflect.TypeOf(running[i])] = err 312 } 313 } 314 315 // Stop p2p networking. 316 n.server.Stop() 317 318 if len(failure.Services) > 0 { 319 return failure 320 } 321 return nil 322 } 323 324 func (n *Node) openDataDir() error { 325 if n.config.DataDir == "" { 326 return nil // ephemeral 327 } 328 329 instdir := filepath.Join(n.config.DataDir, n.config.name()) 330 if err := os.MkdirAll(instdir, 0700); err != nil { 331 return err 332 } 333 // Lock the instance directory to prevent concurrent use by another instance as well as 334 // accidental use of the instance directory as a database. 335 release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK")) 336 if err != nil { 337 return convertFileLockError(err) 338 } 339 n.dirLock = release 340 return nil 341 } 342 343 func (n *Node) closeDataDir() { 344 // Release instance directory lock. 345 if n.dirLock != nil { 346 if err := n.dirLock.Release(); err != nil { 347 n.log.Error("Can't release datadir lock", "err", err) 348 } 349 n.dirLock = nil 350 } 351 } 352 353 // configureRPC is a helper method to configure all the various RPC endpoints during node 354 // startup. It's not meant to be called at any time afterwards as it makes certain 355 // assumptions about the state of the node. 356 // Quorum 357 // 1. Inject mutlitenancy flag into rpc server when appropriate 358 func (n *Node) startRPC() error { 359 if err := n.startInProc(); err != nil { 360 return err 361 } 362 363 // Configure IPC. 364 if n.ipc.endpoint != "" { 365 if err := n.ipc.start(n.rpcAPIs); err != nil { 366 return err 367 } 368 } 369 370 tls, auth, err := n.GetSecuritySupports() 371 if err != nil { 372 return err 373 } 374 375 // Configure HTTP. 376 if n.config.HTTPHost != "" { 377 config := httpConfig{ 378 CorsAllowedOrigins: n.config.HTTPCors, 379 Vhosts: n.config.HTTPVirtualHosts, 380 Modules: n.config.HTTPModules, 381 } 382 server := n.http 383 if err := server.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil { 384 return err 385 } 386 if err := server.enableRPC(n.rpcAPIs, config, auth); err != nil { 387 return err 388 } 389 } 390 391 // Configure WebSocket. 392 if n.config.WSHost != "" { 393 server := n.wsServerForPort(n.config.WSPort) 394 config := wsConfig{ 395 Modules: n.config.WSModules, 396 Origins: n.config.WSOrigins, 397 } 398 if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil { 399 return err 400 } 401 if err := server.enableWS(n.rpcAPIs, config, auth); err != nil { 402 return err 403 } 404 } 405 406 if err := n.http.start(tls); err != nil { 407 return err 408 } 409 return n.ws.start(tls) 410 } 411 412 func (n *Node) wsServerForPort(port int) *httpServer { 413 if n.config.HTTPHost == "" || n.http.port == port { 414 return n.http 415 } 416 return n.ws 417 } 418 419 func (n *Node) stopRPC() { 420 n.http.stop() 421 n.ws.stop() 422 n.ipc.stop() 423 n.stopInProc() 424 } 425 426 // startInProc registers all RPC APIs on the inproc server. 427 // Quorum 428 // 1. Inject mutlitenancy flag into rpc server 429 func (n *Node) startInProc() error { 430 for _, api := range n.rpcAPIs { 431 if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil { 432 return err 433 } 434 } 435 return n.eventmux.Post(rpc.InProcServerReadyEvent{}) 436 } 437 438 // stopInProc terminates the in-process RPC endpoint. 439 func (n *Node) stopInProc() { 440 n.inprocHandler.Stop() 441 } 442 443 // Wait blocks until the node is closed. 444 func (n *Node) Wait() { 445 <-n.stop 446 } 447 448 // RegisterLifecycle registers the given Lifecycle on the node. 449 func (n *Node) RegisterLifecycle(lifecycle Lifecycle) { 450 n.lock.Lock() 451 defer n.lock.Unlock() 452 453 if n.state != initializingState { 454 panic("can't register lifecycle on running/stopped node") 455 } 456 if containsLifecycle(n.lifecycles, lifecycle) { 457 panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle)) 458 } 459 n.lifecycles = append(n.lifecycles, lifecycle) 460 } 461 462 // RegisterProtocols adds backend's protocols to the node's p2p server. 463 func (n *Node) RegisterProtocols(protocols []p2p.Protocol) { 464 n.lock.Lock() 465 defer n.lock.Unlock() 466 467 if n.state != initializingState { 468 panic("can't register protocols on running/stopped node") 469 } 470 n.server.Protocols = append(n.server.Protocols, protocols...) 471 } 472 473 // RegisterAPIs registers the APIs a service provides on the node. 474 func (n *Node) RegisterAPIs(apis []rpc.API) { 475 n.lock.Lock() 476 defer n.lock.Unlock() 477 478 if n.state != initializingState { 479 panic("can't register APIs on running/stopped node") 480 } 481 n.rpcAPIs = append(n.rpcAPIs, apis...) 482 } 483 484 // RegisterHandler mounts a handler on the given path on the canonical HTTP server. 485 // 486 // The name of the handler is shown in a log message when the HTTP server starts 487 // and should be a descriptive term for the service provided by the handler. 488 func (n *Node) RegisterHandler(name, path string, handler http.Handler) { 489 n.lock.Lock() 490 defer n.lock.Unlock() 491 492 if n.state != initializingState { 493 panic("can't register HTTP handler on running/stopped node") 494 } 495 n.http.mux.Handle(path, handler) 496 n.http.handlerNames[path] = name 497 } 498 499 // Attach creates an RPC client attached to an in-process API handler. 500 func (n *Node) Attach() (*rpc.Client, error) { 501 return rpc.DialInProc(n.inprocHandler), nil 502 } 503 504 // AttachWithPSI creates a PSI-specific RPC client attached to an in-process API handler. 505 func (n *Node) AttachWithPSI(psi types.PrivateStateIdentifier) (*rpc.Client, error) { 506 client, err := n.Attach() 507 if err != nil { 508 return nil, err 509 } 510 return client.WithPSI(psi), nil 511 } 512 513 // RPCHandler returns the in-process RPC request handler. 514 func (n *Node) RPCHandler() (*rpc.Server, error) { 515 n.lock.Lock() 516 defer n.lock.Unlock() 517 518 if n.state == closedState { 519 return nil, ErrNodeStopped 520 } 521 return n.inprocHandler, nil 522 } 523 524 // Config returns the configuration of node. 525 func (n *Node) Config() *Config { 526 return n.config 527 } 528 529 // Server retrieves the currently running P2P network layer. This method is meant 530 // only to inspect fields of the currently running server. Callers should not 531 // start or stop the returned server. 532 func (n *Node) Server() *p2p.Server { 533 n.lock.Lock() 534 defer n.lock.Unlock() 535 536 return n.server 537 } 538 539 // DataDir retrieves the current datadir used by the protocol stack. 540 // Deprecated: No files should be stored in this directory, use InstanceDir instead. 541 func (n *Node) DataDir() string { 542 return n.config.DataDir 543 } 544 545 // InstanceDir retrieves the instance directory used by the protocol stack. 546 func (n *Node) InstanceDir() string { 547 return n.config.instanceDir() 548 } 549 550 // AccountManager retrieves the account manager used by the protocol stack. 551 func (n *Node) AccountManager() *accounts.Manager { 552 return n.accman 553 } 554 555 // IPCEndpoint retrieves the current IPC endpoint used by the protocol stack. 556 func (n *Node) IPCEndpoint() string { 557 return n.ipc.endpoint 558 } 559 560 // HTTPEndpoint returns the URL of the HTTP server. 561 func (n *Node) HTTPEndpoint() string { 562 return "http://" + n.http.listenAddr() 563 } 564 565 // WSEndpoint retrieves the current WS endpoint used by the protocol stack. 566 func (n *Node) WSEndpoint() string { 567 if n.http.wsAllowed() { 568 return "ws://" + n.http.listenAddr() 569 } 570 return "ws://" + n.ws.listenAddr() 571 } 572 573 // EventMux retrieves the event multiplexer used by all the network services in 574 // the current protocol stack. 575 func (n *Node) EventMux() *event.TypeMux { 576 return n.eventmux 577 } 578 579 // OpenDatabase opens an existing database with the given name (or creates one if no 580 // previous can be found) from within the node's instance directory. If the node is 581 // ephemeral, a memory database is returned. 582 func (n *Node) OpenDatabase(name string, cache, handles int, namespace string) (ethdb.Database, error) { 583 n.lock.Lock() 584 defer n.lock.Unlock() 585 if n.state == closedState { 586 return nil, ErrNodeStopped 587 } 588 589 var db ethdb.Database 590 var err error 591 if n.config.DataDir == "" { 592 db = rawdb.NewMemoryDatabase() 593 } else { 594 db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace) 595 } 596 597 if err == nil { 598 db = n.wrapDatabase(db) 599 } 600 return db, err 601 } 602 603 // OpenDatabaseWithFreezer opens an existing database with the given name (or 604 // creates one if no previous can be found) from within the node's data directory, 605 // also attaching a chain freezer to it that moves ancient chain data from the 606 // database to immutable append-only files. If the node is an ephemeral one, a 607 // memory database is returned. 608 func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string) (ethdb.Database, error) { 609 n.lock.Lock() 610 defer n.lock.Unlock() 611 if n.state == closedState { 612 return nil, ErrNodeStopped 613 } 614 615 var db ethdb.Database 616 var err error 617 if n.config.DataDir == "" { 618 db = rawdb.NewMemoryDatabase() 619 } else { 620 root := n.ResolvePath(name) 621 switch { 622 case freezer == "": 623 freezer = filepath.Join(root, "ancient") 624 case !filepath.IsAbs(freezer): 625 freezer = n.ResolvePath(freezer) 626 } 627 db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace) 628 } 629 630 if err == nil { 631 db = n.wrapDatabase(db) 632 } 633 return db, err 634 } 635 636 // ResolvePath returns the absolute path of a resource in the instance directory. 637 func (n *Node) ResolvePath(x string) string { 638 return n.config.ResolvePath(x) 639 } 640 641 // closeTrackingDB wraps the Close method of a database. When the database is closed by the 642 // service, the wrapper removes it from the node's database map. This ensures that Node 643 // won't auto-close the database if it is closed by the service that opened it. 644 type closeTrackingDB struct { 645 ethdb.Database 646 n *Node 647 } 648 649 func (db *closeTrackingDB) Close() error { 650 db.n.lock.Lock() 651 delete(db.n.databases, db) 652 db.n.lock.Unlock() 653 return db.Database.Close() 654 } 655 656 // wrapDatabase ensures the database will be auto-closed when Node is closed. 657 func (n *Node) wrapDatabase(db ethdb.Database) ethdb.Database { 658 wrapper := &closeTrackingDB{db, n} 659 n.databases[wrapper] = struct{}{} 660 return wrapper 661 } 662 663 // closeDatabases closes all open databases. 664 func (n *Node) closeDatabases() (errors []error) { 665 for db := range n.databases { 666 delete(n.databases, db) 667 if err := db.Database.Close(); err != nil { 668 errors = append(errors, err) 669 } 670 } 671 return errors 672 } 673 674 // Quorum 675 func (n *Node) GetSecuritySupports() (tlsConfigSource security.TLSConfigurationSource, authManager security.AuthenticationManager, err error) { 676 if n.pluginManager.IsEnabled(plugin.SecurityPluginInterfaceName) { 677 sp := new(plugin.SecurityPluginTemplate) 678 if err = n.pluginManager.GetPluginTemplate(plugin.SecurityPluginInterfaceName, sp); err != nil { 679 return 680 } 681 if tlsConfigSource, err = sp.TLSConfigurationSource(); err != nil { 682 return 683 } 684 if authManager, err = sp.AuthenticationManager(); err != nil { 685 return 686 } 687 } else { 688 log.Info("Security Plugin is not enabled") 689 } 690 return 691 } 692 693 // Quorum 694 // 695 // delegate call to node.Config 696 func (n *Node) IsPermissionEnabled() bool { 697 return n.config.IsPermissionEnabled() 698 } 699 700 // Quorum 701 // 702 // delegate call to node.Config 703 func (n *Node) GetNodeKey() *ecdsa.PrivateKey { 704 return n.config.NodeKey() 705 } 706 707 // Quorum 708 // 709 // This can be used to inspect plugins used in the current node 710 func (n *Node) PluginManager() *plugin.PluginManager { 711 return n.pluginManager 712 } 713 714 // Quorum 715 // 716 // This can be used to set the plugin manager in the node (replacing the default Empty one) 717 func (n *Node) SetPluginManager(pm *plugin.PluginManager) { 718 n.pluginManager = pm 719 } 720 721 // Quorum 722 // 723 // Lifecycle retrieves a currently lifecycle registered of a specific type. 724 func (n *Node) Lifecycle(lifecycle interface{}) error { 725 n.lock.Lock() 726 defer n.lock.Unlock() 727 728 // Short circuit if the node's not running 729 if n.server == nil { 730 return ErrNodeStopped 731 } 732 // Otherwise try to find the service to return 733 element := reflect.ValueOf(lifecycle).Elem() 734 for _, runningLifecycle := range n.lifecycles { 735 lElem := reflect.TypeOf(runningLifecycle) 736 if lElem == element.Type() { 737 element.Set(reflect.ValueOf(runningLifecycle)) 738 return nil 739 } 740 } 741 742 return ErrServiceUnknown 743 }