github.com/noirx94/tendermintmp@v0.0.1/node/node.go (about) 1 package node 2 3 import ( 4 "bytes" 5 "context" 6 "errors" 7 "fmt" 8 "net" 9 "net/http" 10 "strings" 11 "time" 12 13 "github.com/prometheus/client_golang/prometheus" 14 "github.com/prometheus/client_golang/prometheus/promhttp" 15 "github.com/rs/cors" 16 dbm "github.com/tendermint/tm-db" 17 18 abci "github.com/tendermint/tendermint/abci/types" 19 bcv0 "github.com/tendermint/tendermint/blockchain/v0" 20 bcv1 "github.com/tendermint/tendermint/blockchain/v1" 21 bcv2 "github.com/tendermint/tendermint/blockchain/v2" 22 cfg "github.com/tendermint/tendermint/config" 23 cs "github.com/tendermint/tendermint/consensus" 24 "github.com/tendermint/tendermint/crypto" 25 "github.com/tendermint/tendermint/evidence" 26 tmjson "github.com/tendermint/tendermint/libs/json" 27 "github.com/tendermint/tendermint/libs/log" 28 tmpubsub "github.com/tendermint/tendermint/libs/pubsub" 29 "github.com/tendermint/tendermint/libs/service" 30 "github.com/tendermint/tendermint/light" 31 mempl "github.com/tendermint/tendermint/mempool" 32 "github.com/tendermint/tendermint/p2p" 33 "github.com/tendermint/tendermint/p2p/pex" 34 "github.com/tendermint/tendermint/privval" 35 "github.com/tendermint/tendermint/proxy" 36 rpccore "github.com/tendermint/tendermint/rpc/core" 37 grpccore "github.com/tendermint/tendermint/rpc/grpc" 38 rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" 39 sm "github.com/tendermint/tendermint/state" 40 "github.com/tendermint/tendermint/state/indexer" 41 blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" 42 blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" 43 "github.com/tendermint/tendermint/state/indexer/sink/psql" 44 "github.com/tendermint/tendermint/state/txindex" 45 "github.com/tendermint/tendermint/state/txindex/kv" 46 "github.com/tendermint/tendermint/state/txindex/null" 47 "github.com/tendermint/tendermint/statesync" 48 "github.com/tendermint/tendermint/store" 49 "github.com/tendermint/tendermint/types" 50 tmtime "github.com/tendermint/tendermint/types/time" 51 "github.com/tendermint/tendermint/version" 52 53 _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port 54 55 _ "github.com/lib/pq" // provide the psql db driver 56 ) 57 58 //------------------------------------------------------------------------------ 59 60 // DBContext specifies config information for loading a new DB. 61 type DBContext struct { 62 ID string 63 Config *cfg.Config 64 } 65 66 // DBProvider takes a DBContext and returns an instantiated DB. 67 type DBProvider func(*DBContext) (dbm.DB, error) 68 69 // DefaultDBProvider returns a database using the DBBackend and DBDir 70 // specified in the ctx.Config. 71 func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { 72 dbType := dbm.BackendType(ctx.Config.DBBackend) 73 return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) 74 } 75 76 // GenesisDocProvider returns a GenesisDoc. 77 // It allows the GenesisDoc to be pulled from sources other than the 78 // filesystem, for instance from a distributed key-value store cluster. 79 type GenesisDocProvider func() (*types.GenesisDoc, error) 80 81 // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads 82 // the GenesisDoc from the config.GenesisFile() on the filesystem. 83 func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { 84 return func() (*types.GenesisDoc, error) { 85 return types.GenesisDocFromFile(config.GenesisFile()) 86 } 87 } 88 89 // Provider takes a config and a logger and returns a ready to go Node. 90 type Provider func(*cfg.Config, log.Logger) (*Node, error) 91 92 // DefaultNewNode returns a Tendermint node with default settings for the 93 // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. 94 // It implements NodeProvider. 95 func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { 96 nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) 97 if err != nil { 98 return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) 99 } 100 101 return NewNode(config, 102 privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), 103 nodeKey, 104 proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), 105 DefaultGenesisDocProviderFunc(config), 106 DefaultDBProvider, 107 DefaultMetricsProvider(config.Instrumentation), 108 logger, 109 ) 110 } 111 112 // MetricsProvider returns a consensus, p2p and mempool Metrics. 113 type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) 114 115 // DefaultMetricsProvider returns Metrics build using Prometheus client library 116 // if Prometheus is enabled. Otherwise, it returns no-op Metrics. 117 func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { 118 return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { 119 if config.Prometheus { 120 return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), 121 p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), 122 mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), 123 sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) 124 } 125 return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() 126 } 127 } 128 129 // Option sets a parameter for the node. 130 type Option func(*Node) 131 132 // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors. 133 // See: https://github.com/tendermint/tendermint/issues/4595 134 type fastSyncReactor interface { 135 SwitchToFastSync(sm.State) error 136 } 137 138 // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to 139 // the node's Switch. 140 // 141 // WARNING: using any name from the below list of the existing reactors will 142 // result in replacing it with the custom one. 143 // 144 // - MEMPOOL 145 // - BLOCKCHAIN 146 // - CONSENSUS 147 // - EVIDENCE 148 // - PEX 149 // - STATESYNC 150 func CustomReactors(reactors map[string]p2p.Reactor) Option { 151 return func(n *Node) { 152 for name, reactor := range reactors { 153 if existingReactor := n.sw.Reactor(name); existingReactor != nil { 154 n.sw.Logger.Info("Replacing existing reactor with a custom one", 155 "name", name, "existing", existingReactor, "custom", reactor) 156 n.sw.RemoveReactor(name, existingReactor) 157 } 158 n.sw.AddReactor(name, reactor) 159 // register the new channels to the nodeInfo 160 // NOTE: This is a bit messy now with the type casting but is 161 // cleaned up in the following version when NodeInfo is changed from 162 // and interface to a concrete type 163 if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok { 164 for _, chDesc := range reactor.GetChannels() { 165 if !ni.HasChannel(chDesc.ID) { 166 ni.Channels = append(ni.Channels, chDesc.ID) 167 n.transport.AddChannel(chDesc.ID) 168 } 169 } 170 n.nodeInfo = ni 171 } else { 172 n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.") 173 } 174 } 175 } 176 } 177 178 // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and 179 // build a State object for bootstrapping the node. 180 // WARNING: this interface is considered unstable and subject to change. 181 func StateProvider(stateProvider statesync.StateProvider) Option { 182 return func(n *Node) { 183 n.stateSyncProvider = stateProvider 184 } 185 } 186 187 //------------------------------------------------------------------------------ 188 189 // Node is the highest level interface to a full Tendermint node. 190 // It includes all configuration information and running services. 191 type Node struct { 192 service.BaseService 193 194 // config 195 config *cfg.Config 196 genesisDoc *types.GenesisDoc // initial validator set 197 privValidator types.PrivValidator // local node's validator key 198 199 // network 200 transport *p2p.MultiplexTransport 201 sw *p2p.Switch // p2p connections 202 addrBook pex.AddrBook // known peers 203 nodeInfo p2p.NodeInfo 204 nodeKey *p2p.NodeKey // our node privkey 205 isListening bool 206 207 // services 208 eventBus *types.EventBus // pub/sub for services 209 stateStore sm.Store 210 blockStore *store.BlockStore // store the blockchain to disk 211 bcReactor p2p.Reactor // for fast-syncing 212 mempoolReactor *mempl.Reactor // for gossipping transactions 213 mempool mempl.Mempool 214 stateSync bool // whether the node should state sync on startup 215 stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots 216 stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node 217 stateSyncGenesis sm.State // provides the genesis state for state sync 218 consensusState *cs.State // latest consensus state 219 consensusReactor *cs.Reactor // for participating in the consensus 220 pexReactor *pex.Reactor // for exchanging peer addresses 221 evidencePool *evidence.Pool // tracking evidence 222 proxyApp proxy.AppConns // connection to the application 223 rpcListeners []net.Listener // rpc servers 224 txIndexer txindex.TxIndexer 225 blockIndexer indexer.BlockIndexer 226 indexerService *txindex.IndexerService 227 prometheusSrv *http.Server 228 } 229 230 func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { 231 var blockStoreDB dbm.DB 232 blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) 233 if err != nil { 234 return 235 } 236 blockStore = store.NewBlockStore(blockStoreDB) 237 238 stateDB, err = dbProvider(&DBContext{"state", config}) 239 if err != nil { 240 return 241 } 242 243 return 244 } 245 246 func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { 247 proxyApp := proxy.NewAppConns(clientCreator) 248 proxyApp.SetLogger(logger.With("module", "proxy")) 249 if err := proxyApp.Start(); err != nil { 250 return nil, fmt.Errorf("error starting proxy app connections: %v", err) 251 } 252 return proxyApp, nil 253 } 254 255 func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { 256 eventBus := types.NewEventBus() 257 eventBus.SetLogger(logger.With("module", "events")) 258 if err := eventBus.Start(); err != nil { 259 return nil, err 260 } 261 return eventBus, nil 262 } 263 264 func createAndStartIndexerService( 265 config *cfg.Config, 266 chainID string, 267 dbProvider DBProvider, 268 eventBus *types.EventBus, 269 logger log.Logger, 270 ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) { 271 272 var ( 273 txIndexer txindex.TxIndexer 274 blockIndexer indexer.BlockIndexer 275 ) 276 277 switch config.TxIndex.Indexer { 278 case "kv": 279 store, err := dbProvider(&DBContext{"tx_index", config}) 280 if err != nil { 281 return nil, nil, nil, err 282 } 283 284 txIndexer = kv.NewTxIndex(store) 285 blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) 286 287 case "psql": 288 if config.TxIndex.PsqlConn == "" { 289 return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`) 290 } 291 es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID) 292 if err != nil { 293 return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err) 294 } 295 txIndexer = es.TxIndexer() 296 blockIndexer = es.BlockIndexer() 297 298 default: 299 txIndexer = &null.TxIndex{} 300 blockIndexer = &blockidxnull.BlockerIndexer{} 301 } 302 303 indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus) 304 indexerService.SetLogger(logger.With("module", "txindex")) 305 306 if err := indexerService.Start(); err != nil { 307 return nil, nil, nil, err 308 } 309 310 return indexerService, txIndexer, blockIndexer, nil 311 } 312 313 func doHandshake( 314 stateStore sm.Store, 315 state sm.State, 316 blockStore sm.BlockStore, 317 genDoc *types.GenesisDoc, 318 eventBus types.BlockEventPublisher, 319 proxyApp proxy.AppConns, 320 consensusLogger log.Logger) error { 321 322 handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) 323 handshaker.SetLogger(consensusLogger) 324 handshaker.SetEventBus(eventBus) 325 if err := handshaker.Handshake(proxyApp); err != nil { 326 return fmt.Errorf("error during handshake: %v", err) 327 } 328 return nil 329 } 330 331 func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { 332 // Log the version info. 333 logger.Info("Version info", 334 "tendermint_version", version.TMCoreSemVer, 335 "block", version.BlockProtocol, 336 "p2p", version.P2PProtocol, 337 ) 338 339 // If the state and software differ in block version, at least log it. 340 if state.Version.Consensus.Block != version.BlockProtocol { 341 logger.Info("Software and state have different block protocols", 342 "software", version.BlockProtocol, 343 "state", state.Version.Consensus.Block, 344 ) 345 } 346 347 addr := pubKey.Address() 348 // Log whether this node is a validator or an observer 349 if state.Validators.HasAddress(addr) { 350 consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) 351 } else { 352 consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) 353 } 354 } 355 356 func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { 357 if state.Validators.Size() > 1 { 358 return false 359 } 360 addr, _ := state.Validators.GetByIndex(0) 361 return bytes.Equal(pubKey.Address(), addr) 362 } 363 364 func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, 365 state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { 366 367 mempool := mempl.NewCListMempool( 368 config.Mempool, 369 proxyApp.Mempool(), 370 state.LastBlockHeight, 371 mempl.WithMetrics(memplMetrics), 372 mempl.WithPreCheck(sm.TxPreCheck(state)), 373 mempl.WithPostCheck(sm.TxPostCheck(state)), 374 ) 375 mempoolLogger := logger.With("module", "mempool") 376 mempoolReactor := mempl.NewReactor(config.Mempool, mempool) 377 mempoolReactor.SetLogger(mempoolLogger) 378 379 if config.Consensus.WaitForTxs() { 380 mempool.EnableTxsAvailable() 381 } 382 return mempoolReactor, mempool 383 } 384 385 func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, 386 stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { 387 388 evidenceDB, err := dbProvider(&DBContext{"evidence", config}) 389 if err != nil { 390 return nil, nil, err 391 } 392 evidenceLogger := logger.With("module", "evidence") 393 evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) 394 if err != nil { 395 return nil, nil, err 396 } 397 evidenceReactor := evidence.NewReactor(evidencePool) 398 evidenceReactor.SetLogger(evidenceLogger) 399 return evidenceReactor, evidencePool, nil 400 } 401 402 func createBlockchainReactor(config *cfg.Config, 403 state sm.State, 404 blockExec *sm.BlockExecutor, 405 blockStore *store.BlockStore, 406 fastSync bool, 407 logger log.Logger) (bcReactor p2p.Reactor, err error) { 408 409 switch config.FastSync.Version { 410 case "v0": 411 bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) 412 case "v1": 413 bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) 414 case "v2": 415 bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) 416 default: 417 return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) 418 } 419 420 bcReactor.SetLogger(logger.With("module", "blockchain")) 421 return bcReactor, nil 422 } 423 424 func createConsensusReactor(config *cfg.Config, 425 state sm.State, 426 blockExec *sm.BlockExecutor, 427 blockStore sm.BlockStore, 428 mempool *mempl.CListMempool, 429 evidencePool *evidence.Pool, 430 privValidator types.PrivValidator, 431 csMetrics *cs.Metrics, 432 waitSync bool, 433 eventBus *types.EventBus, 434 consensusLogger log.Logger) (*cs.Reactor, *cs.State) { 435 436 consensusState := cs.NewState( 437 config.Consensus, 438 state.Copy(), 439 blockExec, 440 blockStore, 441 mempool, 442 evidencePool, 443 cs.StateMetrics(csMetrics), 444 ) 445 consensusState.SetLogger(consensusLogger) 446 if privValidator != nil { 447 consensusState.SetPrivValidator(privValidator) 448 } 449 consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) 450 consensusReactor.SetLogger(consensusLogger) 451 // services which will be publishing and/or subscribing for messages (events) 452 // consensusReactor will set it on consensusState and blockExecutor 453 consensusReactor.SetEventBus(eventBus) 454 return consensusReactor, consensusState 455 } 456 457 func createTransport( 458 config *cfg.Config, 459 nodeInfo p2p.NodeInfo, 460 nodeKey *p2p.NodeKey, 461 proxyApp proxy.AppConns, 462 ) ( 463 *p2p.MultiplexTransport, 464 []p2p.PeerFilterFunc, 465 ) { 466 var ( 467 mConnConfig = p2p.MConnConfig(config.P2P) 468 transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) 469 connFilters = []p2p.ConnFilterFunc{} 470 peerFilters = []p2p.PeerFilterFunc{} 471 ) 472 473 if !config.P2P.AllowDuplicateIP { 474 connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) 475 } 476 477 // Filter peers by addr or pubkey with an ABCI query. 478 // If the query return code is OK, add peer. 479 if config.FilterPeers { 480 connFilters = append( 481 connFilters, 482 // ABCI query for address filtering. 483 func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { 484 res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ 485 Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), 486 }) 487 if err != nil { 488 return err 489 } 490 if res.IsErr() { 491 return fmt.Errorf("error querying abci app: %v", res) 492 } 493 494 return nil 495 }, 496 ) 497 498 peerFilters = append( 499 peerFilters, 500 // ABCI query for ID filtering. 501 func(_ p2p.IPeerSet, p p2p.Peer) error { 502 res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ 503 Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), 504 }) 505 if err != nil { 506 return err 507 } 508 if res.IsErr() { 509 return fmt.Errorf("error querying abci app: %v", res) 510 } 511 512 return nil 513 }, 514 ) 515 } 516 517 p2p.MultiplexTransportConnFilters(connFilters...)(transport) 518 519 // Limit the number of incoming connections. 520 max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) 521 p2p.MultiplexTransportMaxIncomingConnections(max)(transport) 522 523 return transport, peerFilters 524 } 525 526 func createSwitch(config *cfg.Config, 527 transport p2p.Transport, 528 p2pMetrics *p2p.Metrics, 529 peerFilters []p2p.PeerFilterFunc, 530 mempoolReactor *mempl.Reactor, 531 bcReactor p2p.Reactor, 532 stateSyncReactor *statesync.Reactor, 533 consensusReactor *cs.Reactor, 534 evidenceReactor *evidence.Reactor, 535 nodeInfo p2p.NodeInfo, 536 nodeKey *p2p.NodeKey, 537 p2pLogger log.Logger) *p2p.Switch { 538 539 sw := p2p.NewSwitch( 540 config.P2P, 541 transport, 542 p2p.WithMetrics(p2pMetrics), 543 p2p.SwitchPeerFilters(peerFilters...), 544 ) 545 sw.SetLogger(p2pLogger) 546 sw.AddReactor("MEMPOOL", mempoolReactor) 547 sw.AddReactor("BLOCKCHAIN", bcReactor) 548 sw.AddReactor("CONSENSUS", consensusReactor) 549 sw.AddReactor("EVIDENCE", evidenceReactor) 550 sw.AddReactor("STATESYNC", stateSyncReactor) 551 552 sw.SetNodeInfo(nodeInfo) 553 sw.SetNodeKey(nodeKey) 554 555 p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) 556 return sw 557 } 558 559 func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, 560 p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { 561 562 addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) 563 addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) 564 565 // Add ourselves to addrbook to prevent dialing ourselves 566 if config.P2P.ExternalAddress != "" { 567 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) 568 if err != nil { 569 return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) 570 } 571 addrBook.AddOurAddress(addr) 572 } 573 if config.P2P.ListenAddress != "" { 574 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) 575 if err != nil { 576 return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) 577 } 578 addrBook.AddOurAddress(addr) 579 } 580 581 sw.SetAddrBook(addrBook) 582 583 return addrBook, nil 584 } 585 586 func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, 587 sw *p2p.Switch, logger log.Logger) *pex.Reactor { 588 589 // TODO persistent peers ? so we can have their DNS addrs saved 590 pexReactor := pex.NewReactor(addrBook, 591 &pex.ReactorConfig{ 592 Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), 593 SeedMode: config.P2P.SeedMode, 594 // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 595 // blocks assuming 10s blocks ~ 28 hours. 596 // TODO (melekes): make it dynamic based on the actual block latencies 597 // from the live network. 598 // https://github.com/tendermint/tendermint/issues/3523 599 SeedDisconnectWaitPeriod: 28 * time.Hour, 600 PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, 601 }) 602 pexReactor.SetLogger(logger.With("module", "pex")) 603 sw.AddReactor("PEX", pexReactor) 604 return pexReactor 605 } 606 607 // startStateSync starts an asynchronous state sync process, then switches to fast sync mode. 608 func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, 609 stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, 610 stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { 611 ssR.Logger.Info("Starting state sync") 612 613 if stateProvider == nil { 614 var err error 615 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 616 defer cancel() 617 stateProvider, err = statesync.NewLightClientStateProvider( 618 ctx, 619 state.ChainID, state.Version, state.InitialHeight, 620 config.RPCServers, light.TrustOptions{ 621 Period: config.TrustPeriod, 622 Height: config.TrustHeight, 623 Hash: config.TrustHashBytes(), 624 }, ssR.Logger.With("module", "light")) 625 if err != nil { 626 return fmt.Errorf("failed to set up light client state provider: %w", err) 627 } 628 } 629 630 go func() { 631 state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) 632 if err != nil { 633 ssR.Logger.Error("State sync failed", "err", err) 634 return 635 } 636 err = stateStore.Bootstrap(state) 637 if err != nil { 638 ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) 639 return 640 } 641 err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) 642 if err != nil { 643 ssR.Logger.Error("Failed to store last seen commit", "err", err) 644 return 645 } 646 647 if fastSync { 648 // FIXME Very ugly to have these metrics bleed through here. 649 conR.Metrics.StateSyncing.Set(0) 650 conR.Metrics.FastSyncing.Set(1) 651 err = bcR.SwitchToFastSync(state) 652 if err != nil { 653 ssR.Logger.Error("Failed to switch to fast sync", "err", err) 654 return 655 } 656 } else { 657 conR.SwitchToConsensus(state, true) 658 } 659 }() 660 return nil 661 } 662 663 // NewNode returns a new, ready to go, Tendermint Node. 664 func NewNode(config *cfg.Config, 665 privValidator types.PrivValidator, 666 nodeKey *p2p.NodeKey, 667 clientCreator proxy.ClientCreator, 668 genesisDocProvider GenesisDocProvider, 669 dbProvider DBProvider, 670 metricsProvider MetricsProvider, 671 logger log.Logger, 672 options ...Option) (*Node, error) { 673 674 blockStore, stateDB, err := initDBs(config, dbProvider) 675 if err != nil { 676 return nil, err 677 } 678 679 stateStore := sm.NewStore(stateDB) 680 681 state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) 682 if err != nil { 683 return nil, err 684 } 685 686 // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). 687 proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) 688 if err != nil { 689 return nil, err 690 } 691 692 // EventBus and IndexerService must be started before the handshake because 693 // we might need to index the txs of the replayed block as this might not have happened 694 // when the node stopped last time (i.e. the node stopped after it saved the block 695 // but before it indexed the txs, or, endblocker panicked) 696 eventBus, err := createAndStartEventBus(logger) 697 if err != nil { 698 return nil, err 699 } 700 701 indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, 702 genDoc.ChainID, dbProvider, eventBus, logger) 703 if err != nil { 704 return nil, err 705 } 706 707 // If an address is provided, listen on the socket for a connection from an 708 // external signing process. 709 if config.PrivValidatorListenAddr != "" { 710 // FIXME: we should start services inside OnStart 711 privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger) 712 if err != nil { 713 return nil, fmt.Errorf("error with private validator socket client: %w", err) 714 } 715 } 716 717 pubKey, err := privValidator.GetPubKey() 718 if err != nil { 719 return nil, fmt.Errorf("can't get pubkey: %w", err) 720 } 721 722 // Determine whether we should attempt state sync. 723 stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) 724 if stateSync && state.LastBlockHeight > 0 { 725 logger.Info("Found local state with non-zero height, skipping state sync") 726 stateSync = false 727 } 728 729 // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, 730 // and replays any blocks as necessary to sync tendermint with the app. 731 consensusLogger := logger.With("module", "consensus") 732 if !stateSync { 733 if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { 734 return nil, err 735 } 736 737 // Reload the state. It will have the Version.Consensus.App set by the 738 // Handshake, and may have other modifications as well (ie. depending on 739 // what happened during block replay). 740 state, err = stateStore.Load() 741 if err != nil { 742 return nil, fmt.Errorf("cannot load state: %w", err) 743 } 744 } 745 746 // Determine whether we should do fast sync. This must happen after the handshake, since the 747 // app may modify the validator set, specifying ourself as the only validator. 748 fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) 749 750 logNodeStartupInfo(state, pubKey, logger, consensusLogger) 751 752 csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) 753 754 // Make MempoolReactor 755 mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) 756 757 // Make Evidence Reactor 758 evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) 759 if err != nil { 760 return nil, err 761 } 762 763 // make block executor for consensus and blockchain reactors to execute blocks 764 blockExec := sm.NewBlockExecutor( 765 stateStore, 766 logger.With("module", "state"), 767 proxyApp.Consensus(), 768 mempool, 769 evidencePool, 770 sm.BlockExecutorWithMetrics(smMetrics), 771 ) 772 773 // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. 774 bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger) 775 if err != nil { 776 return nil, fmt.Errorf("could not create blockchain reactor: %w", err) 777 } 778 779 // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. 780 // FIXME We need to update metrics here, since other reactors don't have access to them. 781 if stateSync { 782 csMetrics.StateSyncing.Set(1) 783 } else if fastSync { 784 csMetrics.FastSyncing.Set(1) 785 } 786 consensusReactor, consensusState := createConsensusReactor( 787 config, state, blockExec, blockStore, mempool, evidencePool, 788 privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, 789 ) 790 791 // Set up state sync reactor, and schedule a sync if requested. 792 // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, 793 // we should clean this whole thing up. See: 794 // https://github.com/tendermint/tendermint/issues/4644 795 stateSyncReactor := statesync.NewReactor( 796 *config.StateSync, 797 proxyApp.Snapshot(), 798 proxyApp.Query(), 799 config.StateSync.TempDir, 800 ) 801 stateSyncReactor.SetLogger(logger.With("module", "statesync")) 802 803 nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) 804 if err != nil { 805 return nil, err 806 } 807 808 // Setup Transport. 809 transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) 810 811 // Setup Switch. 812 p2pLogger := logger.With("module", "p2p") 813 sw := createSwitch( 814 config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, 815 stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, 816 ) 817 818 err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) 819 if err != nil { 820 return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) 821 } 822 823 err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) 824 if err != nil { 825 return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) 826 } 827 828 addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) 829 if err != nil { 830 return nil, fmt.Errorf("could not create addrbook: %w", err) 831 } 832 833 // Optionally, start the pex reactor 834 // 835 // TODO: 836 // 837 // We need to set Seeds and PersistentPeers on the switch, 838 // since it needs to be able to use these (and their DNS names) 839 // even if the PEX is off. We can include the DNS name in the NetAddress, 840 // but it would still be nice to have a clear list of the current "PersistentPeers" 841 // somewhere that we can return with net_info. 842 // 843 // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. 844 // Note we currently use the addrBook regardless at least for AddOurAddress 845 var pexReactor *pex.Reactor 846 if config.P2P.PexReactor { 847 pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) 848 } 849 850 if config.RPC.PprofListenAddress != "" { 851 go func() { 852 logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) 853 logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) 854 }() 855 } 856 857 node := &Node{ 858 config: config, 859 genesisDoc: genDoc, 860 privValidator: privValidator, 861 862 transport: transport, 863 sw: sw, 864 addrBook: addrBook, 865 nodeInfo: nodeInfo, 866 nodeKey: nodeKey, 867 868 stateStore: stateStore, 869 blockStore: blockStore, 870 bcReactor: bcReactor, 871 mempoolReactor: mempoolReactor, 872 mempool: mempool, 873 consensusState: consensusState, 874 consensusReactor: consensusReactor, 875 stateSyncReactor: stateSyncReactor, 876 stateSync: stateSync, 877 stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state 878 pexReactor: pexReactor, 879 evidencePool: evidencePool, 880 proxyApp: proxyApp, 881 txIndexer: txIndexer, 882 indexerService: indexerService, 883 blockIndexer: blockIndexer, 884 eventBus: eventBus, 885 } 886 node.BaseService = *service.NewBaseService(logger, "Node", node) 887 888 for _, option := range options { 889 option(node) 890 } 891 892 return node, nil 893 } 894 895 // OnStart starts the Node. It implements service.Service. 896 func (n *Node) OnStart() error { 897 now := tmtime.Now() 898 genTime := n.genesisDoc.GenesisTime 899 if genTime.After(now) { 900 n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) 901 time.Sleep(genTime.Sub(now)) 902 } 903 904 // Add private IDs to addrbook to block those peers being added 905 n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) 906 907 // Start the RPC server before the P2P server 908 // so we can eg. receive txs for the first block 909 if n.config.RPC.ListenAddress != "" { 910 listeners, err := n.startRPC() 911 if err != nil { 912 return err 913 } 914 n.rpcListeners = listeners 915 } 916 917 if n.config.Instrumentation.Prometheus && 918 n.config.Instrumentation.PrometheusListenAddr != "" { 919 n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) 920 } 921 922 // Start the transport. 923 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) 924 if err != nil { 925 return err 926 } 927 if err := n.transport.Listen(*addr); err != nil { 928 return err 929 } 930 931 n.isListening = true 932 933 if n.config.Mempool.WalEnabled() { 934 err = n.mempool.InitWAL() 935 if err != nil { 936 return fmt.Errorf("init mempool WAL: %w", err) 937 } 938 } 939 940 // Start the switch (the P2P server). 941 err = n.sw.Start() 942 if err != nil { 943 return err 944 } 945 946 // Always connect to persistent peers 947 err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) 948 if err != nil { 949 return fmt.Errorf("could not dial peers from persistent_peers field: %w", err) 950 } 951 952 // Run state sync 953 if n.stateSync { 954 bcR, ok := n.bcReactor.(fastSyncReactor) 955 if !ok { 956 return fmt.Errorf("this blockchain reactor does not support switching from state sync") 957 } 958 err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, 959 n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) 960 if err != nil { 961 return fmt.Errorf("failed to start state sync: %w", err) 962 } 963 } 964 965 return nil 966 } 967 968 // OnStop stops the Node. It implements service.Service. 969 func (n *Node) OnStop() { 970 n.BaseService.OnStop() 971 972 n.Logger.Info("Stopping Node") 973 974 // first stop the non-reactor services 975 if err := n.eventBus.Stop(); err != nil { 976 n.Logger.Error("Error closing eventBus", "err", err) 977 } 978 if err := n.indexerService.Stop(); err != nil { 979 n.Logger.Error("Error closing indexerService", "err", err) 980 } 981 982 // now stop the reactors 983 if err := n.sw.Stop(); err != nil { 984 n.Logger.Error("Error closing switch", "err", err) 985 } 986 987 // stop mempool WAL 988 if n.config.Mempool.WalEnabled() { 989 n.mempool.CloseWAL() 990 } 991 992 if err := n.transport.Close(); err != nil { 993 n.Logger.Error("Error closing transport", "err", err) 994 } 995 996 n.isListening = false 997 998 // finally stop the listeners / external services 999 for _, l := range n.rpcListeners { 1000 n.Logger.Info("Closing rpc listener", "listener", l) 1001 if err := l.Close(); err != nil { 1002 n.Logger.Error("Error closing listener", "listener", l, "err", err) 1003 } 1004 } 1005 1006 if pvsc, ok := n.privValidator.(service.Service); ok { 1007 if err := pvsc.Stop(); err != nil { 1008 n.Logger.Error("Error closing private validator", "err", err) 1009 } 1010 } 1011 1012 if n.prometheusSrv != nil { 1013 if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { 1014 // Error from closing listeners, or context timeout: 1015 n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) 1016 } 1017 } 1018 } 1019 1020 // ConfigureRPC makes sure RPC has all the objects it needs to operate. 1021 func (n *Node) ConfigureRPC() error { 1022 pubKey, err := n.privValidator.GetPubKey() 1023 if err != nil { 1024 return fmt.Errorf("can't get pubkey: %w", err) 1025 } 1026 rpccore.SetEnvironment(&rpccore.Environment{ 1027 ProxyAppQuery: n.proxyApp.Query(), 1028 ProxyAppMempool: n.proxyApp.Mempool(), 1029 1030 StateStore: n.stateStore, 1031 BlockStore: n.blockStore, 1032 EvidencePool: n.evidencePool, 1033 ConsensusState: n.consensusState, 1034 P2PPeers: n.sw, 1035 P2PTransport: n, 1036 1037 PubKey: pubKey, 1038 GenDoc: n.genesisDoc, 1039 TxIndexer: n.txIndexer, 1040 BlockIndexer: n.blockIndexer, 1041 ConsensusReactor: n.consensusReactor, 1042 EventBus: n.eventBus, 1043 Mempool: n.mempool, 1044 1045 Logger: n.Logger.With("module", "rpc"), 1046 1047 Config: *n.config.RPC, 1048 }) 1049 if err := rpccore.InitGenesisChunks(); err != nil { 1050 return err 1051 } 1052 1053 return nil 1054 } 1055 1056 func (n *Node) startRPC() ([]net.Listener, error) { 1057 err := n.ConfigureRPC() 1058 if err != nil { 1059 return nil, err 1060 } 1061 1062 listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") 1063 1064 if n.config.RPC.Unsafe { 1065 rpccore.AddUnsafeRoutes() 1066 } 1067 1068 config := rpcserver.DefaultConfig() 1069 config.MaxBodyBytes = n.config.RPC.MaxBodyBytes 1070 config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes 1071 config.MaxOpenConnections = n.config.RPC.MaxOpenConnections 1072 // If necessary adjust global WriteTimeout to ensure it's greater than 1073 // TimeoutBroadcastTxCommit. 1074 // See https://github.com/tendermint/tendermint/issues/3435 1075 if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { 1076 config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second 1077 } 1078 1079 // we may expose the rpc over both a unix and tcp socket 1080 listeners := make([]net.Listener, len(listenAddrs)) 1081 for i, listenAddr := range listenAddrs { 1082 mux := http.NewServeMux() 1083 rpcLogger := n.Logger.With("module", "rpc-server") 1084 wmLogger := rpcLogger.With("protocol", "websocket") 1085 wm := rpcserver.NewWebsocketManager(rpccore.Routes, 1086 rpcserver.OnDisconnect(func(remoteAddr string) { 1087 err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) 1088 if err != nil && err != tmpubsub.ErrSubscriptionNotFound { 1089 wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) 1090 } 1091 }), 1092 rpcserver.ReadLimit(config.MaxBodyBytes), 1093 ) 1094 wm.SetLogger(wmLogger) 1095 mux.HandleFunc("/websocket", wm.WebsocketHandler) 1096 rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) 1097 listener, err := rpcserver.Listen( 1098 listenAddr, 1099 config, 1100 ) 1101 if err != nil { 1102 return nil, err 1103 } 1104 1105 var rootHandler http.Handler = mux 1106 if n.config.RPC.IsCorsEnabled() { 1107 corsMiddleware := cors.New(cors.Options{ 1108 AllowedOrigins: n.config.RPC.CORSAllowedOrigins, 1109 AllowedMethods: n.config.RPC.CORSAllowedMethods, 1110 AllowedHeaders: n.config.RPC.CORSAllowedHeaders, 1111 }) 1112 rootHandler = corsMiddleware.Handler(mux) 1113 } 1114 if n.config.RPC.IsTLSEnabled() { 1115 go func() { 1116 if err := rpcserver.ServeTLS( 1117 listener, 1118 rootHandler, 1119 n.config.RPC.CertFile(), 1120 n.config.RPC.KeyFile(), 1121 rpcLogger, 1122 config, 1123 ); err != nil { 1124 n.Logger.Error("Error serving server with TLS", "err", err) 1125 } 1126 }() 1127 } else { 1128 go func() { 1129 if err := rpcserver.Serve( 1130 listener, 1131 rootHandler, 1132 rpcLogger, 1133 config, 1134 ); err != nil { 1135 n.Logger.Error("Error serving server", "err", err) 1136 } 1137 }() 1138 } 1139 1140 listeners[i] = listener 1141 } 1142 1143 // we expose a simplified api over grpc for convenience to app devs 1144 grpcListenAddr := n.config.RPC.GRPCListenAddress 1145 if grpcListenAddr != "" { 1146 config := rpcserver.DefaultConfig() 1147 config.MaxBodyBytes = n.config.RPC.MaxBodyBytes 1148 config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes 1149 // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections 1150 config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections 1151 // If necessary adjust global WriteTimeout to ensure it's greater than 1152 // TimeoutBroadcastTxCommit. 1153 // See https://github.com/tendermint/tendermint/issues/3435 1154 if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { 1155 config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second 1156 } 1157 listener, err := rpcserver.Listen(grpcListenAddr, config) 1158 if err != nil { 1159 return nil, err 1160 } 1161 go func() { 1162 if err := grpccore.StartGRPCServer(listener); err != nil { 1163 n.Logger.Error("Error starting gRPC server", "err", err) 1164 } 1165 }() 1166 listeners = append(listeners, listener) 1167 1168 } 1169 1170 return listeners, nil 1171 1172 } 1173 1174 // startPrometheusServer starts a Prometheus HTTP server, listening for metrics 1175 // collectors on addr. 1176 func (n *Node) startPrometheusServer(addr string) *http.Server { 1177 srv := &http.Server{ 1178 Addr: addr, 1179 Handler: promhttp.InstrumentMetricHandler( 1180 prometheus.DefaultRegisterer, promhttp.HandlerFor( 1181 prometheus.DefaultGatherer, 1182 promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, 1183 ), 1184 ), 1185 } 1186 go func() { 1187 if err := srv.ListenAndServe(); err != http.ErrServerClosed { 1188 // Error starting or closing listener: 1189 n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) 1190 } 1191 }() 1192 return srv 1193 } 1194 1195 // Switch returns the Node's Switch. 1196 func (n *Node) Switch() *p2p.Switch { 1197 return n.sw 1198 } 1199 1200 // BlockStore returns the Node's BlockStore. 1201 func (n *Node) BlockStore() *store.BlockStore { 1202 return n.blockStore 1203 } 1204 1205 // ConsensusState returns the Node's ConsensusState. 1206 func (n *Node) ConsensusState() *cs.State { 1207 return n.consensusState 1208 } 1209 1210 // ConsensusReactor returns the Node's ConsensusReactor. 1211 func (n *Node) ConsensusReactor() *cs.Reactor { 1212 return n.consensusReactor 1213 } 1214 1215 // MempoolReactor returns the Node's mempool reactor. 1216 func (n *Node) MempoolReactor() *mempl.Reactor { 1217 return n.mempoolReactor 1218 } 1219 1220 // Mempool returns the Node's mempool. 1221 func (n *Node) Mempool() mempl.Mempool { 1222 return n.mempool 1223 } 1224 1225 // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. 1226 func (n *Node) PEXReactor() *pex.Reactor { 1227 return n.pexReactor 1228 } 1229 1230 // EvidencePool returns the Node's EvidencePool. 1231 func (n *Node) EvidencePool() *evidence.Pool { 1232 return n.evidencePool 1233 } 1234 1235 // EventBus returns the Node's EventBus. 1236 func (n *Node) EventBus() *types.EventBus { 1237 return n.eventBus 1238 } 1239 1240 // PrivValidator returns the Node's PrivValidator. 1241 // XXX: for convenience only! 1242 func (n *Node) PrivValidator() types.PrivValidator { 1243 return n.privValidator 1244 } 1245 1246 // GenesisDoc returns the Node's GenesisDoc. 1247 func (n *Node) GenesisDoc() *types.GenesisDoc { 1248 return n.genesisDoc 1249 } 1250 1251 // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. 1252 func (n *Node) ProxyApp() proxy.AppConns { 1253 return n.proxyApp 1254 } 1255 1256 // Config returns the Node's config. 1257 func (n *Node) Config() *cfg.Config { 1258 return n.config 1259 } 1260 1261 //------------------------------------------------------------------------------ 1262 1263 func (n *Node) Listeners() []string { 1264 return []string{ 1265 fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), 1266 } 1267 } 1268 1269 func (n *Node) IsListening() bool { 1270 return n.isListening 1271 } 1272 1273 // NodeInfo returns the Node's Info from the Switch. 1274 func (n *Node) NodeInfo() p2p.NodeInfo { 1275 return n.nodeInfo 1276 } 1277 1278 func makeNodeInfo( 1279 config *cfg.Config, 1280 nodeKey *p2p.NodeKey, 1281 txIndexer txindex.TxIndexer, 1282 genDoc *types.GenesisDoc, 1283 state sm.State, 1284 ) (p2p.DefaultNodeInfo, error) { 1285 txIndexerStatus := "on" 1286 if _, ok := txIndexer.(*null.TxIndex); ok { 1287 txIndexerStatus = "off" 1288 } 1289 1290 var bcChannel byte 1291 switch config.FastSync.Version { 1292 case "v0": 1293 bcChannel = bcv0.BlockchainChannel 1294 case "v1": 1295 bcChannel = bcv1.BlockchainChannel 1296 case "v2": 1297 bcChannel = bcv2.BlockchainChannel 1298 default: 1299 return p2p.DefaultNodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) 1300 } 1301 1302 nodeInfo := p2p.DefaultNodeInfo{ 1303 ProtocolVersion: p2p.NewProtocolVersion( 1304 version.P2PProtocol, // global 1305 state.Version.Consensus.Block, 1306 state.Version.Consensus.App, 1307 ), 1308 DefaultNodeID: nodeKey.ID(), 1309 Network: genDoc.ChainID, 1310 Version: version.TMCoreSemVer, 1311 Channels: []byte{ 1312 bcChannel, 1313 cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, 1314 mempl.MempoolChannel, 1315 evidence.EvidenceChannel, 1316 statesync.SnapshotChannel, statesync.ChunkChannel, 1317 }, 1318 Moniker: config.Moniker, 1319 Other: p2p.DefaultNodeInfoOther{ 1320 TxIndex: txIndexerStatus, 1321 RPCAddress: config.RPC.ListenAddress, 1322 }, 1323 } 1324 1325 if config.P2P.PexReactor { 1326 nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) 1327 } 1328 1329 lAddr := config.P2P.ExternalAddress 1330 1331 if lAddr == "" { 1332 lAddr = config.P2P.ListenAddress 1333 } 1334 1335 nodeInfo.ListenAddr = lAddr 1336 1337 err := nodeInfo.Validate() 1338 return nodeInfo, err 1339 } 1340 1341 //------------------------------------------------------------------------------ 1342 1343 var ( 1344 genesisDocKey = []byte("genesisDoc") 1345 ) 1346 1347 // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the 1348 // database, or creates one using the given genesisDocProvider. On success this also 1349 // returns the genesis doc loaded through the given provider. 1350 func LoadStateFromDBOrGenesisDocProvider( 1351 stateDB dbm.DB, 1352 genesisDocProvider GenesisDocProvider, 1353 ) (sm.State, *types.GenesisDoc, error) { 1354 // Get genesis doc 1355 genDoc, err := loadGenesisDoc(stateDB) 1356 if err != nil { 1357 genDoc, err = genesisDocProvider() 1358 if err != nil { 1359 return sm.State{}, nil, err 1360 } 1361 // save genesis doc to prevent a certain class of user errors (e.g. when it 1362 // was changed, accidentally or not). Also good for audit trail. 1363 if err := saveGenesisDoc(stateDB, genDoc); err != nil { 1364 return sm.State{}, nil, err 1365 } 1366 } 1367 stateStore := sm.NewStore(stateDB) 1368 state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) 1369 if err != nil { 1370 return sm.State{}, nil, err 1371 } 1372 return state, genDoc, nil 1373 } 1374 1375 // panics if failed to unmarshal bytes 1376 func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { 1377 b, err := db.Get(genesisDocKey) 1378 if err != nil { 1379 panic(err) 1380 } 1381 if len(b) == 0 { 1382 return nil, errors.New("genesis doc not found") 1383 } 1384 var genDoc *types.GenesisDoc 1385 err = tmjson.Unmarshal(b, &genDoc) 1386 if err != nil { 1387 panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) 1388 } 1389 return genDoc, nil 1390 } 1391 1392 // panics if failed to marshal the given genesis document 1393 func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { 1394 b, err := tmjson.Marshal(genDoc) 1395 if err != nil { 1396 return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) 1397 } 1398 if err := db.SetSync(genesisDocKey, b); err != nil { 1399 return err 1400 } 1401 1402 return nil 1403 } 1404 1405 func createAndStartPrivValidatorSocketClient( 1406 listenAddr, 1407 chainID string, 1408 logger log.Logger, 1409 ) (types.PrivValidator, error) { 1410 pve, err := privval.NewSignerListener(listenAddr, logger) 1411 if err != nil { 1412 return nil, fmt.Errorf("failed to start private validator: %w", err) 1413 } 1414 1415 pvsc, err := privval.NewSignerClient(pve, chainID) 1416 if err != nil { 1417 return nil, fmt.Errorf("failed to start private validator: %w", err) 1418 } 1419 1420 // try to get a pubkey from private validate first time 1421 _, err = pvsc.GetPubKey() 1422 if err != nil { 1423 return nil, fmt.Errorf("can't get pubkey: %w", err) 1424 } 1425 1426 const ( 1427 retries = 50 // 50 * 100ms = 5s total 1428 timeout = 100 * time.Millisecond 1429 ) 1430 pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) 1431 1432 return pvscWithRetries, nil 1433 } 1434 1435 // splitAndTrimEmpty slices s into all subslices separated by sep and returns a 1436 // slice of the string s with all leading and trailing Unicode code points 1437 // contained in cutset removed. If sep is empty, SplitAndTrim splits after each 1438 // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of 1439 // -1. also filter out empty strings, only return non-empty strings. 1440 func splitAndTrimEmpty(s, sep, cutset string) []string { 1441 if s == "" { 1442 return []string{} 1443 } 1444 1445 spl := strings.Split(s, sep) 1446 nonEmptyStrings := make([]string, 0, len(spl)) 1447 for i := 0; i < len(spl); i++ { 1448 element := strings.Trim(spl[i], cutset) 1449 if element != "" { 1450 nonEmptyStrings = append(nonEmptyStrings, element) 1451 } 1452 } 1453 return nonEmptyStrings 1454 }