github.com/klaytn/klaytn@v1.12.1/node/node.go (about) 1 // Modifications Copyright 2018 The klaytn Authors 2 // Copyright 2015 The go-ethereum Authors 3 // This file is part of go-ethereum. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from node/node.go (2018/06/04). 19 // Modified and improved for the klaytn development. 20 21 package node 22 23 import ( 24 "errors" 25 "fmt" 26 "net" 27 "os" 28 "path/filepath" 29 "reflect" 30 "runtime" 31 "strconv" 32 "strings" 33 "sync" 34 "time" 35 36 "github.com/bt51/ntpclient" 37 "github.com/klaytn/klaytn/accounts" 38 "github.com/klaytn/klaytn/api/debug" 39 "github.com/klaytn/klaytn/event" 40 "github.com/klaytn/klaytn/log" 41 metricutils "github.com/klaytn/klaytn/metrics/utils" 42 "github.com/klaytn/klaytn/networks/grpc" 43 "github.com/klaytn/klaytn/networks/p2p" 44 "github.com/klaytn/klaytn/networks/rpc" 45 "github.com/klaytn/klaytn/storage/database" 46 "github.com/prometheus/client_golang/prometheus" 47 "github.com/prometheus/prometheus/util/flock" 48 ) 49 50 var logger = log.NewModuleLogger(log.Node) 51 52 // Node is a container on which services can be registered. 53 type Node struct { 54 eventmux *event.TypeMux 55 config *Config 56 accman *accounts.Manager 57 58 ephemeralKeystore string 59 instanceDirLock flock.Releaser 60 61 serverConfig p2p.Config 62 server p2p.Server 63 64 coreServiceFuncs []ServiceConstructor 65 serviceFuncs []ServiceConstructor 66 67 subservices map[reflect.Type]Service // services to be terminated previously 68 services map[reflect.Type]Service // Currently running services 69 70 rpcAPIs []rpc.API 71 inprocHandler *rpc.Server // In-process RPC request handler to process the API requests 72 73 ipcEndpoint string // IPC endpoint to listen at (empty = IPC disabled) 74 ipcListener net.Listener // IPC RPC listener socket to serve API requests 75 ipcHandler *rpc.Server // IPC RPC request handler to process the API requests 76 77 httpEndpoint string // HTTP endpoint (interface + port) to listen at (empty = HTTP disabled) 78 httpWhitelist []string // HTTP RPC modules to allow through this endpoint 79 httpListener net.Listener // HTTP RPC listener socket to server API requests 80 httpHandler *rpc.Server // HTTP RPC request handler to process the API requests 81 82 wsEndpoint string // Websocket endpoint (interface + port) to listen at (empty = websocket disabled) 83 wsListener net.Listener // Websocket RPC listener socket to server API requests 84 wsHandler *rpc.Server // Websocket RPC request handler to process the API requests 85 86 grpcEndpoint string // gRPC endpoint (interface + port) to listen at (empty = gRPC disabled) 87 grpcListener *grpc.Listener // gRPC listener socket to server API requests 88 grpcHandler *rpc.Server // gRPC request handler to process the API requests 89 90 stop chan struct{} // Channel to wait for termination notifications 91 lock sync.RWMutex 92 93 logger log.Logger 94 } 95 96 // New creates a new P2P node, ready for protocol registration. 97 func New(conf *Config) (*Node, error) { 98 // Copy config and resolve the datadir so future changes to the current 99 // working directory don't affect the node. 100 confCopy := *conf 101 conf = &confCopy 102 if conf.DataDir != "" { 103 absdatadir, err := filepath.Abs(conf.DataDir) 104 if err != nil { 105 return nil, err 106 } 107 conf.DataDir = absdatadir 108 } 109 // Ensure that the instance name doesn't cause weird conflicts with 110 // other files in the data directory. 111 if strings.ContainsAny(conf.Name, `/\`) { 112 return nil, errors.New(`Config.Name must not contain '/' or '\'`) 113 } 114 if conf.Name == datadirDefaultKeyStore { 115 return nil, errors.New(`Config.Name cannot be "` + datadirDefaultKeyStore + `"`) 116 } 117 if strings.HasSuffix(conf.Name, ".ipc") { 118 return nil, errors.New(`Config.Name cannot end in ".ipc"`) 119 } 120 121 // Ensure that the AccountManager method works before the node has started. 122 // We rely on this in cmd/{kcn,ken,kpn}. 123 am, ephemeralKeystore, err := makeAccountManager(conf) 124 if err != nil { 125 return nil, err 126 } 127 if conf.Logger == nil { 128 conf.Logger = logger 129 } 130 131 // Note: any interaction with Config that would create/touch files 132 // in the data directory or instance directory is delayed until Start. 133 return &Node{ 134 accman: am, 135 ephemeralKeystore: ephemeralKeystore, 136 config: conf, 137 coreServiceFuncs: []ServiceConstructor{}, 138 serviceFuncs: []ServiceConstructor{}, 139 ipcEndpoint: conf.IPCEndpoint(), 140 httpEndpoint: conf.HTTPEndpoint(), 141 wsEndpoint: conf.WSEndpoint(), 142 grpcEndpoint: conf.GRPCEndpoint(), 143 eventmux: new(event.TypeMux), 144 logger: conf.Logger, 145 }, nil 146 } 147 148 // Register injects a new service into the node's stack. The service created by 149 // the passed constructor must be unique in its type with regard to sibling ones. 150 func (n *Node) Register(constructor ServiceConstructor) error { 151 n.lock.Lock() 152 defer n.lock.Unlock() 153 154 if n.server != nil { 155 return ErrNodeRunning 156 } 157 n.coreServiceFuncs = append(n.coreServiceFuncs, constructor) 158 return nil 159 } 160 161 func (n *Node) RegisterSubService(constructor ServiceConstructor) error { 162 n.lock.Lock() 163 defer n.lock.Unlock() 164 165 if n.server != nil { 166 return ErrNodeRunning 167 } 168 n.serviceFuncs = append(n.serviceFuncs, constructor) 169 return nil 170 } 171 172 func (n *Node) Start() error { 173 n.lock.Lock() 174 defer n.lock.Unlock() 175 176 if n.server != nil { 177 return ErrNodeRunning 178 } 179 if err := n.openDataDir(); err != nil { 180 return err 181 } 182 183 n.serverConfig = n.config.P2P 184 n.serverConfig.PrivateKey = n.config.NodeKey() 185 n.serverConfig.Name = n.config.NodeName() 186 n.serverConfig.Logger = n.logger 187 if n.serverConfig.StaticNodes == nil { 188 n.serverConfig.StaticNodes = n.config.StaticNodes() 189 } 190 if n.serverConfig.TrustedNodes == nil { 191 n.serverConfig.TrustedNodes = n.config.TrustedNodes() 192 } 193 if n.serverConfig.NodeDatabase == "" { 194 n.serverConfig.NodeDatabase = n.config.NodeDB() 195 } 196 197 p2pServer := p2p.NewServer(n.serverConfig) 198 n.logger.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name) 199 200 // Otherwise copy and specialize the P2P configuration 201 coreservices := make(map[reflect.Type]Service) 202 if err := n.initService(n.coreServiceFuncs, coreservices); err != nil { 203 return err 204 } 205 206 services := make(map[reflect.Type]Service) 207 if err := n.initService(n.serviceFuncs, services); err != nil { 208 return err 209 } 210 211 // Gather the protocols and start the freshly assembled P2P server 212 for _, service := range coreservices { 213 if len(service.Protocols()) > 0 { 214 p2pServer.AddProtocols(service.Protocols()) 215 } 216 for _, s := range services { 217 // TODO-Klaytn-ServiceChain call setcomponents repeatedly for same component 218 s.SetComponents(service.Components()) 219 } 220 } 221 if err := p2pServer.Start(); err != nil { 222 return convertFileLockError(err) 223 } 224 225 // Start each of the coreservices 226 coreStarted := []reflect.Type{} 227 for kind, service := range coreservices { 228 // Start the next service, stopping all previous upon failure 229 if err := service.Start(p2pServer); err != nil { 230 for _, kind := range coreStarted { 231 coreservices[kind].Stop() 232 } 233 p2pServer.Stop() 234 235 return err 236 } 237 // Mark the service started for potential cleanup 238 coreStarted = append(coreStarted, kind) 239 } 240 241 started := []reflect.Type{} 242 for kind, service := range services { 243 if err := service.Start(p2pServer); err != nil { 244 for _, kind := range started { 245 services[kind].Stop() 246 } 247 for _, kind := range coreStarted { 248 coreservices[kind].Stop() 249 } 250 p2pServer.Stop() 251 252 return err 253 } 254 // Mark the service started for potential cleanup 255 started = append(started, kind) 256 } 257 258 for kind, service := range services { 259 coreservices[kind] = service 260 } 261 262 // Lastly start the configured RPC interfaces 263 if err := n.startRPC(coreservices); err != nil { 264 for _, service := range coreservices { 265 service.Stop() 266 } 267 p2pServer.Stop() 268 return err 269 } 270 271 // Finish initializing the startup 272 n.subservices = services 273 n.services = coreservices 274 n.server = p2pServer 275 n.stop = make(chan struct{}) 276 277 // Register a labeled metric containing version and build information 278 // e.g.) klaytn_build_info{version="v1.8.4+b3ab199674" cpu_arch="darwin-arm64" go_version="go1.18.2"} 1 279 if metricutils.Enabled { 280 buildInfo := prometheus.NewGauge(prometheus.GaugeOpts{ 281 Namespace: metricutils.MetricNamespace, 282 Subsystem: "", 283 Name: "build_info", 284 Help: "A metric with a constant '1' value labeled by version", 285 ConstLabels: prometheus.Labels{"version": n.config.Version, "cpu_arch": runtime.GOARCH, "go_version": runtime.Version()}, 286 }) 287 buildInfo.Set(1.0) // dummy value 288 prometheus.DefaultRegisterer.MustRegister(buildInfo) 289 } 290 return nil 291 } 292 293 func (n *Node) initService(serviceFunc []ServiceConstructor, services map[reflect.Type]Service) error { 294 for _, constructor := range serviceFunc { 295 // Create a new context for the particular service 296 ctx := NewServiceContext(n.config, make(map[reflect.Type]Service), n.eventmux, n.accman) 297 for kind, s := range services { // copy needed for threaded access 298 ctx.services[kind] = s 299 } 300 // Construct and save the service 301 service, err := constructor(ctx) 302 if err != nil { 303 return err 304 } 305 kind := reflect.TypeOf(service) 306 if _, exists := services[kind]; exists { 307 return &DuplicateServiceError{Kind: kind} 308 } 309 services[kind] = service 310 } 311 return nil 312 } 313 314 func (n *Node) openDataDir() error { 315 if n.config.DataDir == "" { 316 return nil // ephemeral 317 } 318 319 instdir := filepath.Join(n.config.DataDir, n.config.name()) 320 if err := os.MkdirAll(instdir, 0o700); err != nil { 321 return err 322 } 323 324 release, _, err := flock.New(filepath.Join(instdir, "LOCK")) 325 if err != nil { 326 return convertFileLockError(err) 327 } 328 n.instanceDirLock = release 329 return nil 330 } 331 332 // startRPC is a helper method to start all the various RPC endpoint during node 333 // startup. It's not meant to be called at any time afterwards as it makes certain 334 // assumptions about the state of the node. 335 func (n *Node) startRPC(services map[reflect.Type]Service) error { 336 apis := n.apis() 337 for _, service := range services { 338 apis = append(apis, service.APIs()...) 339 } 340 // Start the various API endpoints, terminating all in case of errors 341 if err := n.startInProc(apis); err != nil { 342 return err 343 } 344 if err := n.startIPC(apis); err != nil { 345 n.stopInProc() 346 return err 347 } 348 349 if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts, n.config.HTTPTimeouts); err != nil { 350 n.stopIPC() 351 n.stopInProc() 352 return err 353 } 354 if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins, n.config.WSExposeAll); err != nil { 355 n.stopHTTP() 356 n.stopIPC() 357 n.stopInProc() 358 return err 359 } 360 361 // start gRPC server 362 if err := n.startgRPC(apis); err != nil { 363 n.stopHTTP() 364 n.stopIPC() 365 n.stopInProc() 366 return err 367 } 368 // All API endpoints started successfully 369 n.rpcAPIs = apis 370 371 return nil 372 } 373 374 // startInProc initializes an in-process RPC endpoint. 375 func (n *Node) startInProc(apis []rpc.API) error { 376 // Register all the APIs exposed by the services 377 handler := rpc.NewServer() 378 for _, api := range apis { 379 if err := handler.RegisterName(api.Namespace, api.Service); err != nil { 380 return err 381 } 382 n.logger.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace) 383 } 384 n.inprocHandler = handler 385 return nil 386 } 387 388 // stopInProc terminates the in-process RPC endpoint. 389 func (n *Node) stopInProc() { 390 if n.inprocHandler != nil { 391 n.inprocHandler.Stop() 392 n.inprocHandler = nil 393 } 394 } 395 396 // startIPC initializes and starts the IPC RPC endpoint. 397 func (n *Node) startIPC(apis []rpc.API) error { 398 if n.ipcEndpoint == "" { 399 return nil // IPC disabled. 400 } 401 listener, handler, err := rpc.StartIPCEndpoint(n.ipcEndpoint, apis) 402 if err != nil { 403 return err 404 } 405 n.ipcListener = listener 406 n.ipcHandler = handler 407 n.logger.Info("IPC endpoint opened", "url", n.ipcEndpoint) 408 return nil 409 } 410 411 // stopIPC terminates the IPC RPC endpoint. 412 func (n *Node) stopIPC() { 413 if n.ipcListener != nil { 414 n.ipcListener.Close() 415 n.ipcListener = nil 416 417 n.logger.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint) 418 } 419 if n.ipcHandler != nil { 420 n.ipcHandler.Stop() 421 n.ipcHandler = nil 422 } 423 } 424 425 // startgRPC initializes and starts the gRPC endpoint. 426 func (n *Node) startgRPC(apis []rpc.API) error { 427 if n.grpcEndpoint == "" { 428 return nil 429 } 430 431 handler := rpc.NewServer() 432 for _, api := range apis { 433 if api.Public { 434 if err := handler.RegisterName(api.Namespace, api.Service); err != nil { 435 return err 436 } 437 n.logger.Debug("gRPC registered", "namespace", api.Namespace) 438 } 439 } 440 441 listener := &grpc.Listener{Addr: n.grpcEndpoint} 442 n.grpcHandler = handler 443 n.grpcListener = listener 444 listener.SetRPCServer(handler) 445 446 go listener.Start() 447 n.logger.Info("gRPC endpoint opened", "url", n.grpcEndpoint) 448 return nil 449 } 450 451 // startHTTP initializes and starts the HTTP RPC endpoint. 452 func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) error { 453 // Short circuit if the HTTP endpoint isn't being exposed 454 if endpoint == "" { 455 return nil 456 } 457 listener, handler, err := rpc.StartHTTPEndpoint(endpoint, apis, modules, cors, vhosts, timeouts) 458 if err != nil { 459 return err 460 } 461 n.logger.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%s", endpoint), "cors", strings.Join(cors, ","), "vhosts", strings.Join(vhosts, ",")) 462 // All listeners booted successfully 463 n.httpEndpoint = endpoint 464 n.httpListener = listener 465 n.httpHandler = handler 466 467 return nil 468 } 469 470 // stopHTTP terminates the HTTP RPC endpoint. 471 func (n *Node) stopHTTP() { 472 if n.httpListener != nil { 473 n.httpListener.Close() 474 n.httpListener = nil 475 476 n.logger.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%s", n.httpEndpoint)) 477 } 478 if n.httpHandler != nil { 479 n.httpHandler.Stop() 480 n.httpHandler = nil 481 } 482 } 483 484 // startWS initializes and starts the websocket RPC endpoint. 485 func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) error { 486 // Short circuit if the WS endpoint isn't being exposed 487 if endpoint == "" { 488 return nil 489 } 490 listener, handler, err := rpc.StartWSEndpoint(endpoint, apis, modules, wsOrigins, exposeAll) 491 if err != nil { 492 return err 493 } 494 n.logger.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%s", listener.Addr())) 495 // All listeners booted successfully 496 n.wsEndpoint = endpoint 497 n.wsListener = listener 498 n.wsHandler = handler 499 500 return nil 501 } 502 503 // stopWS terminates the websocket RPC endpoint. 504 func (n *Node) stopWS() { 505 if n.wsListener != nil { 506 n.wsListener.Close() 507 n.wsListener = nil 508 509 n.logger.Info("WebSocket endpoint closed", "url", fmt.Sprintf("ws://%s", n.wsEndpoint)) 510 } 511 if n.wsHandler != nil { 512 n.wsHandler.Stop() 513 n.wsHandler = nil 514 } 515 } 516 517 func (n *Node) stopgRPC() { 518 if n.grpcListener != nil { 519 n.grpcListener.Stop() 520 n.grpcListener = nil 521 522 n.logger.Info("gRPC endpoint closed", "url", fmt.Sprintf("grpc://%s", n.grpcEndpoint)) 523 } 524 525 if n.grpcHandler != nil { 526 n.grpcHandler.Stop() 527 n.grpcHandler = nil 528 } 529 } 530 531 // Stop terminates a running node along with all it's services. In the node was 532 // not started, an error is returned. 533 func (n *Node) Stop() error { 534 n.lock.Lock() 535 defer n.lock.Unlock() 536 537 // Short circuit if the node's not running 538 if n.server == nil { 539 return ErrNodeStopped 540 } 541 542 // Terminate the API, services and the p2p server. 543 n.stopWS() 544 n.stopHTTP() 545 n.stopIPC() 546 n.stopgRPC() 547 n.rpcAPIs = nil 548 failure := &StopError{ 549 Services: make(map[reflect.Type]error), 550 } 551 // subservices are the services which should be terminated before coreservices are terminated. 552 for kind, service := range n.subservices { 553 if err := service.Stop(); err != nil { 554 failure.Services[kind] = err 555 } 556 // delete the already terminated services. 557 delete(n.services, kind) 558 } 559 for kind, service := range n.services { 560 if err := service.Stop(); err != nil { 561 failure.Services[kind] = err 562 } 563 } 564 n.server.Stop() 565 n.services = nil 566 n.server = nil 567 568 // Release instance directory lock. 569 if n.instanceDirLock != nil { 570 if err := n.instanceDirLock.Release(); err != nil { 571 n.logger.Error("Can't release datadir lock", "err", err) 572 } 573 n.instanceDirLock = nil 574 } 575 576 // unblock n.Wait 577 close(n.stop) 578 579 // Remove the keystore if it was created ephemerally. 580 var keystoreErr error 581 if n.ephemeralKeystore != "" { 582 keystoreErr = os.RemoveAll(n.ephemeralKeystore) 583 } 584 585 if len(failure.Services) > 0 { 586 return failure 587 } 588 if keystoreErr != nil { 589 return keystoreErr 590 } 591 return nil 592 } 593 594 // Wait blocks the thread until the node is stopped. If the node is not running 595 // at the time of invocation, the method immediately returns. 596 func (n *Node) Wait() { 597 n.lock.RLock() 598 if n.server == nil { 599 n.lock.RUnlock() 600 return 601 } 602 stop := n.stop 603 n.lock.RUnlock() 604 605 <-stop 606 } 607 608 // Restart terminates a running node and boots up a new one in its place. If the 609 // node isn't running, an error is returned. 610 func (n *Node) Restart() error { 611 if err := n.Stop(); err != nil { 612 return err 613 } 614 if err := n.Start(); err != nil { 615 return err 616 } 617 return nil 618 } 619 620 // Attach creates an RPC client attached to an in-process API handler. 621 func (n *Node) Attach() (*rpc.Client, error) { 622 n.lock.RLock() 623 defer n.lock.RUnlock() 624 625 if n.server == nil { 626 return nil, ErrNodeStopped 627 } 628 return rpc.DialInProc(n.inprocHandler), nil 629 } 630 631 // RPCHandler returns the in-process RPC request handler. 632 func (n *Node) RPCHandler() (*rpc.Server, error) { 633 n.lock.RLock() 634 defer n.lock.RUnlock() 635 636 if n.inprocHandler == nil { 637 return nil, ErrNodeStopped 638 } 639 return n.inprocHandler, nil 640 } 641 642 // Server retrieves the currently running P2P network layer. This method is meant 643 // only to inspect fields of the currently running server, life cycle management 644 // should be left to this Node entity. 645 func (n *Node) Server() p2p.Server { 646 n.lock.RLock() 647 defer n.lock.RUnlock() 648 649 return n.server 650 } 651 652 // Service retrieves a currently running service registered of a specific type. 653 func (n *Node) Service(service interface{}) error { 654 n.lock.RLock() 655 defer n.lock.RUnlock() 656 657 // Short circuit if the node's not running 658 if n.server == nil { 659 return ErrNodeStopped 660 } 661 // Otherwise try to find the service to return 662 element := reflect.ValueOf(service).Elem() 663 if running, ok := n.services[element.Type()]; ok { 664 element.Set(reflect.ValueOf(running)) 665 return nil 666 } 667 return ErrServiceUnknown 668 } 669 670 // DataDir retrieves the current datadir used by the protocol stack. 671 // Deprecated: No files should be stored in this directory, use InstanceDir instead. 672 func (n *Node) DataDir() string { 673 return n.config.DataDir 674 } 675 676 // InstanceDir retrieves the instance directory used by the protocol stack. 677 func (n *Node) InstanceDir() string { 678 return n.config.instanceDir() 679 } 680 681 // AccountManager retrieves the account manager used by the protocol stack. 682 func (n *Node) AccountManager() *accounts.Manager { 683 return n.accman 684 } 685 686 // IPCEndpoint retrieves the current IPC endpoint used by the protocol stack. 687 func (n *Node) IPCEndpoint() string { 688 return n.ipcEndpoint 689 } 690 691 // HTTPEndpoint retrieves the current HTTP endpoint used by the protocol stack. 692 func (n *Node) HTTPEndpoint() string { 693 return n.httpEndpoint 694 } 695 696 // WSEndpoint retrieves the current WS endpoint used by the protocol stack. 697 func (n *Node) WSEndpoint() string { 698 return n.wsEndpoint 699 } 700 701 // EventMux retrieves the event multiplexer used by all the network services in 702 // the current protocol stack. 703 func (n *Node) EventMux() *event.TypeMux { 704 return n.eventmux 705 } 706 707 // OpenDatabase opens an existing database with the given name (or creates one if no 708 // previous can be found) from within the node's instance directory. If the node is 709 // ephemeral, a memory database is returned. 710 func (n *Node) OpenDatabase(dbc *database.DBConfig) database.DBManager { 711 if n.config.DataDir == "" { 712 return database.NewMemoryDBManager() 713 } 714 dbc.Dir = n.config.ResolvePath(dbc.Dir) 715 return database.NewDBManager(dbc) 716 } 717 718 // ResolvePath returns the absolute path of a resource in the instance directory. 719 func (n *Node) ResolvePath(x string) string { 720 return n.config.ResolvePath(x) 721 } 722 723 func (n *Node) apis() []rpc.API { 724 rpcApi := []rpc.API{ 725 { 726 Namespace: "admin", 727 Version: "1.0", 728 Service: NewPrivateAdminAPI(n), 729 }, { 730 Namespace: "admin", 731 Version: "1.0", 732 Service: NewPublicAdminAPI(n), 733 Public: true, 734 }, { 735 Namespace: "debug", 736 Version: "1.0", 737 Service: NewPublicDebugAPI(n), 738 }, { 739 // "web3" namespace will be deprecated soon. The same APIs in "web3" are available in "klay" namespace. 740 Namespace: "web3", 741 Version: "1.0", 742 Service: NewPublicKlayAPI(n), 743 Public: true, 744 }, { 745 Namespace: "klay", 746 Version: "1.0", 747 Service: NewPublicKlayAPI(n), 748 Public: true, 749 }, { 750 Namespace: "debug", 751 Version: "1.0", 752 Service: debug.Handler, 753 IPCOnly: n.config.DisableUnsafeDebug, 754 }, 755 } 756 757 return rpcApi 758 } 759 760 const ( 761 ntpTolerance = time.Second 762 RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00" 763 ntpMaxRetry = 10 764 ) 765 766 func timeIsNear(lhs, rhs time.Time) bool { 767 diff := lhs.Sub(rhs) 768 // TODO: use time.Duration.Abs() after go1.19 769 if diff < 0 { 770 diff = -diff 771 } 772 return diff < ntpTolerance 773 } 774 775 func NtpCheckWithLocal(n *Node) error { 776 // Skip check if server is empty (e.g. `ntp.disable` flag) 777 if n.config.NtpRemoteServer == "" { 778 return nil 779 } 780 781 url, port, err := net.SplitHostPort(n.config.NtpRemoteServer) 782 if err != nil { 783 return err 784 } 785 portNum, err := strconv.Atoi(port) 786 if err != nil { 787 return err 788 } 789 790 ntpRetryTime := time.Duration(1) 791 var remote *time.Time 792 for i := 0; i < ntpMaxRetry; i++ { 793 time.Sleep(ntpRetryTime) 794 remote, err = ntpclient.GetNetworkTime(url, portNum) 795 if remote != nil { 796 break 797 } 798 799 logger.Warn("Cannot connect to remote ntp server", "url", url, "err", err) 800 ntpRetryTime = ntpRetryTime * 2 801 } 802 803 usage := "You can use \"--ntp.disable\" or \"--ntp.server\" option to change ntp time checking config" 804 if err != nil { 805 logger.Warn("Failed to remote ntp server."+"\n"+usage, "url", url) 806 return nil 807 } 808 809 local := time.Now() 810 if !timeIsNear(local, *remote) { 811 errFormat := "System time is out of sync, local:%s remote:%s" 812 return fmt.Errorf(errFormat+"\n"+usage, local.UTC().Format(RFC3339Nano), remote.UTC().Format(RFC3339Nano)) 813 } 814 logger.Info("Ntp time check", "local", local.UTC().Format(RFC3339Nano), "remote", remote.UTC().Format(RFC3339Nano)) 815 return nil 816 }