github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/node/node.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package node 18 19 import ( 20 "errors" 21 "fmt" 22 "net/http" 23 "os" 24 "path/filepath" 25 "reflect" 26 "strings" 27 "sync" 28 29 "github.com/prometheus/tsdb/fileutil" 30 31 "github.com/scroll-tech/go-ethereum/accounts" 32 "github.com/scroll-tech/go-ethereum/core/rawdb" 33 "github.com/scroll-tech/go-ethereum/ethdb" 34 "github.com/scroll-tech/go-ethereum/event" 35 "github.com/scroll-tech/go-ethereum/log" 36 "github.com/scroll-tech/go-ethereum/p2p" 37 "github.com/scroll-tech/go-ethereum/rpc" 38 ) 39 40 // Node is a container on which services can be registered. 41 type Node struct { 42 eventmux *event.TypeMux 43 config *Config 44 accman *accounts.Manager 45 log log.Logger 46 keyDir string // key store directory 47 keyDirTemp bool // If true, key directory will be removed by Stop 48 dirLock fileutil.Releaser // prevents concurrent use of instance directory 49 stop chan struct{} // Channel to wait for termination notifications 50 server *p2p.Server // Currently running P2P networking layer 51 startStopLock sync.Mutex // Start/Stop are protected by an additional lock 52 state int // Tracks state of node lifecycle 53 54 lock sync.Mutex 55 lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle 56 rpcAPIs []rpc.API // List of APIs currently provided by the node 57 http *httpServer // 58 ws *httpServer // 59 ipc *ipcServer // Stores information about the ipc http server 60 inprocHandler *rpc.Server // In-process RPC request handler to process the API requests 61 62 databases map[*closeTrackingDB]struct{} // All open databases 63 } 64 65 const ( 66 initializingState = iota 67 runningState 68 closedState 69 ) 70 71 // New creates a new P2P node, ready for protocol registration. 72 func New(conf *Config) (*Node, error) { 73 // Copy config and resolve the datadir so future changes to the current 74 // working directory don't affect the node. 75 confCopy := *conf 76 conf = &confCopy 77 if conf.DataDir != "" { 78 absdatadir, err := filepath.Abs(conf.DataDir) 79 if err != nil { 80 return nil, err 81 } 82 conf.DataDir = absdatadir 83 } 84 if conf.Logger == nil { 85 conf.Logger = log.New() 86 } 87 88 // Ensure that the instance name doesn't cause weird conflicts with 89 // other files in the data directory. 90 if strings.ContainsAny(conf.Name, `/\`) { 91 return nil, errors.New(`Config.Name must not contain '/' or '\'`) 92 } 93 if conf.Name == datadirDefaultKeyStore { 94 return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`) 95 } 96 if strings.HasSuffix(conf.Name, ".ipc") { 97 return nil, errors.New(`Config.Name cannot end in ".ipc"`) 98 } 99 100 node := &Node{ 101 config: conf, 102 inprocHandler: rpc.NewServer(), 103 eventmux: new(event.TypeMux), 104 log: conf.Logger, 105 stop: make(chan struct{}), 106 server: &p2p.Server{Config: conf.P2P}, 107 databases: make(map[*closeTrackingDB]struct{}), 108 } 109 110 // Register built-in APIs. 111 node.rpcAPIs = append(node.rpcAPIs, node.apis()...) 112 113 // Acquire the instance directory lock. 114 if err := node.openDataDir(); err != nil { 115 return nil, err 116 } 117 keyDir, isEphem, err := getKeyStoreDir(conf) 118 if err != nil { 119 return nil, err 120 } 121 node.keyDir = keyDir 122 node.keyDirTemp = isEphem 123 // Creates an empty AccountManager with no backends. Callers (e.g. cmd/geth) 124 // are required to add the backends later on. 125 node.accman = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: conf.InsecureUnlockAllowed}) 126 127 // Initialize the p2p server. This creates the node key and discovery databases. 128 node.server.Config.PrivateKey = node.config.NodeKey() 129 node.server.Config.Name = node.config.NodeName() 130 node.server.Config.Logger = node.log 131 if node.server.Config.StaticNodes == nil { 132 node.server.Config.StaticNodes = node.config.StaticNodes() 133 } 134 if node.server.Config.TrustedNodes == nil { 135 node.server.Config.TrustedNodes = node.config.TrustedNodes() 136 } 137 if node.server.Config.NodeDatabase == "" { 138 node.server.Config.NodeDatabase = node.config.NodeDB() 139 } 140 141 // Check HTTP/WS prefixes are valid. 142 if err := validatePrefix("HTTP", conf.HTTPPathPrefix); err != nil { 143 return nil, err 144 } 145 if err := validatePrefix("WebSocket", conf.WSPathPrefix); err != nil { 146 return nil, err 147 } 148 149 // Configure RPC servers. 150 node.http = newHTTPServer(node.log, conf.HTTPTimeouts) 151 node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) 152 node.ipc = newIPCServer(node.log, conf.IPCEndpoint()) 153 154 return node, nil 155 } 156 157 // Start starts all registered lifecycles, RPC services and p2p networking. 158 // Node can only be started once. 159 func (n *Node) Start() error { 160 n.startStopLock.Lock() 161 defer n.startStopLock.Unlock() 162 163 n.lock.Lock() 164 switch n.state { 165 case runningState: 166 n.lock.Unlock() 167 return ErrNodeRunning 168 case closedState: 169 n.lock.Unlock() 170 return ErrNodeStopped 171 } 172 n.state = runningState 173 // open networking and RPC endpoints 174 err := n.openEndpoints() 175 lifecycles := make([]Lifecycle, len(n.lifecycles)) 176 copy(lifecycles, n.lifecycles) 177 n.lock.Unlock() 178 179 // Check if endpoint startup failed. 180 if err != nil { 181 n.doClose(nil) 182 return err 183 } 184 // Start all registered lifecycles. 185 var started []Lifecycle 186 for _, lifecycle := range lifecycles { 187 if err = lifecycle.Start(); err != nil { 188 break 189 } 190 started = append(started, lifecycle) 191 } 192 // Check if any lifecycle failed to start. 193 if err != nil { 194 n.stopServices(started) 195 n.doClose(nil) 196 } 197 return err 198 } 199 200 // Close stops the Node and releases resources acquired in 201 // Node constructor New. 202 func (n *Node) Close() error { 203 n.startStopLock.Lock() 204 defer n.startStopLock.Unlock() 205 206 n.lock.Lock() 207 state := n.state 208 n.lock.Unlock() 209 switch state { 210 case initializingState: 211 // The node was never started. 212 return n.doClose(nil) 213 case runningState: 214 // The node was started, release resources acquired by Start(). 215 var errs []error 216 if err := n.stopServices(n.lifecycles); err != nil { 217 errs = append(errs, err) 218 } 219 return n.doClose(errs) 220 case closedState: 221 return ErrNodeStopped 222 default: 223 panic(fmt.Sprintf("node is in unknown state %d", state)) 224 } 225 } 226 227 // doClose releases resources acquired by New(), collecting errors. 228 func (n *Node) doClose(errs []error) error { 229 // Close databases. This needs the lock because it needs to 230 // synchronize with OpenDatabase*. 231 n.lock.Lock() 232 n.state = closedState 233 errs = append(errs, n.closeDatabases()...) 234 n.lock.Unlock() 235 236 if err := n.accman.Close(); err != nil { 237 errs = append(errs, err) 238 } 239 if n.keyDirTemp { 240 if err := os.RemoveAll(n.keyDir); err != nil { 241 errs = append(errs, err) 242 } 243 } 244 245 // Release instance directory lock. 246 n.closeDataDir() 247 248 // Unblock n.Wait. 249 close(n.stop) 250 251 // Report any errors that might have occurred. 252 switch len(errs) { 253 case 0: 254 return nil 255 case 1: 256 return errs[0] 257 default: 258 return fmt.Errorf("%v", errs) 259 } 260 } 261 262 // openEndpoints starts all network and RPC endpoints. 263 func (n *Node) openEndpoints() error { 264 // start networking endpoints 265 n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) 266 if err := n.server.Start(); err != nil { 267 return convertFileLockError(err) 268 } 269 // start RPC endpoints 270 err := n.startRPC() 271 if err != nil { 272 n.stopRPC() 273 n.server.Stop() 274 } 275 return err 276 } 277 278 // containsLifecycle checks if 'lfs' contains 'l'. 279 func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool { 280 for _, obj := range lfs { 281 if obj == l { 282 return true 283 } 284 } 285 return false 286 } 287 288 // stopServices terminates running services, RPC and p2p networking. 289 // It is the inverse of Start. 290 func (n *Node) stopServices(running []Lifecycle) error { 291 n.stopRPC() 292 293 // Stop running lifecycles in reverse order. 294 failure := &StopError{Services: make(map[reflect.Type]error)} 295 for i := len(running) - 1; i >= 0; i-- { 296 if err := running[i].Stop(); err != nil { 297 failure.Services[reflect.TypeOf(running[i])] = err 298 } 299 } 300 301 // Stop p2p networking. 302 n.server.Stop() 303 304 if len(failure.Services) > 0 { 305 return failure 306 } 307 return nil 308 } 309 310 func (n *Node) openDataDir() error { 311 if n.config.DataDir == "" { 312 return nil // ephemeral 313 } 314 315 instdir := filepath.Join(n.config.DataDir, n.config.name()) 316 if err := os.MkdirAll(instdir, 0700); err != nil { 317 return err 318 } 319 // Lock the instance directory to prevent concurrent use by another instance as well as 320 // accidental use of the instance directory as a database. 321 release, _, err := fileutil.Flock(filepath.Join(instdir, "LOCK")) 322 if err != nil { 323 return convertFileLockError(err) 324 } 325 n.dirLock = release 326 return nil 327 } 328 329 func (n *Node) closeDataDir() { 330 // Release instance directory lock. 331 if n.dirLock != nil { 332 if err := n.dirLock.Release(); err != nil { 333 n.log.Error("Can't release datadir lock", "err", err) 334 } 335 n.dirLock = nil 336 } 337 } 338 339 // configureRPC is a helper method to configure all the various RPC endpoints during node 340 // startup. It's not meant to be called at any time afterwards as it makes certain 341 // assumptions about the state of the node. 342 func (n *Node) startRPC() error { 343 if err := n.startInProc(); err != nil { 344 return err 345 } 346 347 // Configure IPC. 348 if n.ipc.endpoint != "" { 349 if err := n.ipc.start(n.rpcAPIs); err != nil { 350 return err 351 } 352 } 353 354 // Configure HTTP. 355 if n.config.HTTPHost != "" { 356 config := httpConfig{ 357 CorsAllowedOrigins: n.config.HTTPCors, 358 Vhosts: n.config.HTTPVirtualHosts, 359 Modules: n.config.HTTPModules, 360 prefix: n.config.HTTPPathPrefix, 361 } 362 if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil { 363 return err 364 } 365 if err := n.http.enableRPC(n.rpcAPIs, config); err != nil { 366 return err 367 } 368 } 369 370 // Configure WebSocket. 371 if n.config.WSHost != "" { 372 server := n.wsServerForPort(n.config.WSPort) 373 config := wsConfig{ 374 Modules: n.config.WSModules, 375 Origins: n.config.WSOrigins, 376 prefix: n.config.WSPathPrefix, 377 } 378 if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil { 379 return err 380 } 381 if err := server.enableWS(n.rpcAPIs, config); err != nil { 382 return err 383 } 384 } 385 386 if err := n.http.start(); err != nil { 387 return err 388 } 389 return n.ws.start() 390 } 391 392 func (n *Node) wsServerForPort(port int) *httpServer { 393 if n.config.HTTPHost == "" || n.http.port == port { 394 return n.http 395 } 396 return n.ws 397 } 398 399 func (n *Node) stopRPC() { 400 n.http.stop() 401 n.ws.stop() 402 n.ipc.stop() 403 n.stopInProc() 404 } 405 406 // startInProc registers all RPC APIs on the inproc server. 407 func (n *Node) startInProc() error { 408 for _, api := range n.rpcAPIs { 409 if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil { 410 return err 411 } 412 } 413 return nil 414 } 415 416 // stopInProc terminates the in-process RPC endpoint. 417 func (n *Node) stopInProc() { 418 n.inprocHandler.Stop() 419 } 420 421 // Wait blocks until the node is closed. 422 func (n *Node) Wait() { 423 <-n.stop 424 } 425 426 // RegisterLifecycle registers the given Lifecycle on the node. 427 func (n *Node) RegisterLifecycle(lifecycle Lifecycle) { 428 n.lock.Lock() 429 defer n.lock.Unlock() 430 431 if n.state != initializingState { 432 panic("can't register lifecycle on running/stopped node") 433 } 434 if containsLifecycle(n.lifecycles, lifecycle) { 435 panic(fmt.Sprintf("attempt to register lifecycle %T more than once", lifecycle)) 436 } 437 n.lifecycles = append(n.lifecycles, lifecycle) 438 } 439 440 // RegisterProtocols adds backend's protocols to the node's p2p server. 441 func (n *Node) RegisterProtocols(protocols []p2p.Protocol) { 442 n.lock.Lock() 443 defer n.lock.Unlock() 444 445 if n.state != initializingState { 446 panic("can't register protocols on running/stopped node") 447 } 448 n.server.Protocols = append(n.server.Protocols, protocols...) 449 } 450 451 // RegisterAPIs registers the APIs a service provides on the node. 452 func (n *Node) RegisterAPIs(apis []rpc.API) { 453 n.lock.Lock() 454 defer n.lock.Unlock() 455 456 if n.state != initializingState { 457 panic("can't register APIs on running/stopped node") 458 } 459 n.rpcAPIs = append(n.rpcAPIs, apis...) 460 } 461 462 // RegisterHandler mounts a handler on the given path on the canonical HTTP server. 463 // 464 // The name of the handler is shown in a log message when the HTTP server starts 465 // and should be a descriptive term for the service provided by the handler. 466 func (n *Node) RegisterHandler(name, path string, handler http.Handler) { 467 n.lock.Lock() 468 defer n.lock.Unlock() 469 470 if n.state != initializingState { 471 panic("can't register HTTP handler on running/stopped node") 472 } 473 474 n.http.mux.Handle(path, handler) 475 n.http.handlerNames[path] = name 476 } 477 478 // Attach creates an RPC client attached to an in-process API handler. 479 func (n *Node) Attach() (*rpc.Client, error) { 480 return rpc.DialInProc(n.inprocHandler), nil 481 } 482 483 // RPCHandler returns the in-process RPC request handler. 484 func (n *Node) RPCHandler() (*rpc.Server, error) { 485 n.lock.Lock() 486 defer n.lock.Unlock() 487 488 if n.state == closedState { 489 return nil, ErrNodeStopped 490 } 491 return n.inprocHandler, nil 492 } 493 494 // Config returns the configuration of node. 495 func (n *Node) Config() *Config { 496 return n.config 497 } 498 499 // Server retrieves the currently running P2P network layer. This method is meant 500 // only to inspect fields of the currently running server. Callers should not 501 // start or stop the returned server. 502 func (n *Node) Server() *p2p.Server { 503 n.lock.Lock() 504 defer n.lock.Unlock() 505 506 return n.server 507 } 508 509 // DataDir retrieves the current datadir used by the protocol stack. 510 // Deprecated: No files should be stored in this directory, use InstanceDir instead. 511 func (n *Node) DataDir() string { 512 return n.config.DataDir 513 } 514 515 // InstanceDir retrieves the instance directory used by the protocol stack. 516 func (n *Node) InstanceDir() string { 517 return n.config.instanceDir() 518 } 519 520 // KeyStoreDir retrieves the key directory 521 func (n *Node) KeyStoreDir() string { 522 return n.keyDir 523 } 524 525 // AccountManager retrieves the account manager used by the protocol stack. 526 func (n *Node) AccountManager() *accounts.Manager { 527 return n.accman 528 } 529 530 // IPCEndpoint retrieves the current IPC endpoint used by the protocol stack. 531 func (n *Node) IPCEndpoint() string { 532 return n.ipc.endpoint 533 } 534 535 // HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not 536 // contain the JSON-RPC path prefix set by HTTPPathPrefix. 537 func (n *Node) HTTPEndpoint() string { 538 return "http://" + n.http.listenAddr() 539 } 540 541 // WSEndpoint returns the current JSON-RPC over WebSocket endpoint. 542 func (n *Node) WSEndpoint() string { 543 if n.http.wsAllowed() { 544 return "ws://" + n.http.listenAddr() + n.http.wsConfig.prefix 545 } 546 return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix 547 } 548 549 // EventMux retrieves the event multiplexer used by all the network services in 550 // the current protocol stack. 551 func (n *Node) EventMux() *event.TypeMux { 552 return n.eventmux 553 } 554 555 // OpenDatabase opens an existing database with the given name (or creates one if no 556 // previous can be found) from within the node's instance directory. If the node is 557 // ephemeral, a memory database is returned. 558 func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) { 559 n.lock.Lock() 560 defer n.lock.Unlock() 561 if n.state == closedState { 562 return nil, ErrNodeStopped 563 } 564 565 var db ethdb.Database 566 var err error 567 if n.config.DataDir == "" { 568 db = rawdb.NewMemoryDatabase() 569 } else { 570 db, err = rawdb.NewLevelDBDatabase(n.ResolvePath(name), cache, handles, namespace, readonly) 571 } 572 573 if err == nil { 574 db = n.wrapDatabase(db) 575 } 576 return db, err 577 } 578 579 // OpenDatabaseWithFreezer opens an existing database with the given name (or 580 // creates one if no previous can be found) from within the node's data directory, 581 // also attaching a chain freezer to it that moves ancient chain data from the 582 // database to immutable append-only files. If the node is an ephemeral one, a 583 // memory database is returned. 584 func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly bool) (ethdb.Database, error) { 585 n.lock.Lock() 586 defer n.lock.Unlock() 587 if n.state == closedState { 588 return nil, ErrNodeStopped 589 } 590 591 var db ethdb.Database 592 var err error 593 if n.config.DataDir == "" { 594 db = rawdb.NewMemoryDatabase() 595 } else { 596 root := n.ResolvePath(name) 597 switch { 598 case freezer == "": 599 freezer = filepath.Join(root, "ancient") 600 case !filepath.IsAbs(freezer): 601 freezer = n.ResolvePath(freezer) 602 } 603 db, err = rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace, readonly) 604 } 605 606 if err == nil { 607 db = n.wrapDatabase(db) 608 } 609 return db, err 610 } 611 612 // ResolvePath returns the absolute path of a resource in the instance directory. 613 func (n *Node) ResolvePath(x string) string { 614 return n.config.ResolvePath(x) 615 } 616 617 // closeTrackingDB wraps the Close method of a database. When the database is closed by the 618 // service, the wrapper removes it from the node's database map. This ensures that Node 619 // won't auto-close the database if it is closed by the service that opened it. 620 type closeTrackingDB struct { 621 ethdb.Database 622 n *Node 623 } 624 625 func (db *closeTrackingDB) Close() error { 626 db.n.lock.Lock() 627 delete(db.n.databases, db) 628 db.n.lock.Unlock() 629 return db.Database.Close() 630 } 631 632 // wrapDatabase ensures the database will be auto-closed when Node is closed. 633 func (n *Node) wrapDatabase(db ethdb.Database) ethdb.Database { 634 wrapper := &closeTrackingDB{db, n} 635 n.databases[wrapper] = struct{}{} 636 return wrapper 637 } 638 639 // closeDatabases closes all open databases. 640 func (n *Node) closeDatabases() (errors []error) { 641 for db := range n.databases { 642 delete(n.databases, db) 643 if err := db.Database.Close(); err != nil { 644 errors = append(errors, err) 645 } 646 } 647 return errors 648 }