github.com/evdatsion/aphelion-dpos-bft@v0.32.1/node/node.go (about) 1 package node 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "net" 8 "net/http" 9 _ "net/http/pprof" 10 "os" 11 "strings" 12 "time" 13 14 "github.com/pkg/errors" 15 "github.com/prometheus/client_golang/prometheus" 16 "github.com/prometheus/client_golang/prometheus/promhttp" 17 "github.com/rs/cors" 18 19 amino "github.com/evdatsion/go-amino" 20 abci "github.com/evdatsion/aphelion-dpos-bft/abci/types" 21 "github.com/evdatsion/aphelion-dpos-bft/blockchain" 22 bc "github.com/evdatsion/aphelion-dpos-bft/blockchain" 23 cfg "github.com/evdatsion/aphelion-dpos-bft/config" 24 "github.com/evdatsion/aphelion-dpos-bft/consensus" 25 cs "github.com/evdatsion/aphelion-dpos-bft/consensus" 26 "github.com/evdatsion/aphelion-dpos-bft/crypto/ed25519" 27 "github.com/evdatsion/aphelion-dpos-bft/evidence" 28 cmn "github.com/evdatsion/aphelion-dpos-bft/libs/common" 29 dbm "github.com/evdatsion/aphelion-dpos-bft/libs/db" 30 "github.com/evdatsion/aphelion-dpos-bft/libs/log" 31 tmpubsub "github.com/evdatsion/aphelion-dpos-bft/libs/pubsub" 32 mempl "github.com/evdatsion/aphelion-dpos-bft/mempool" 33 "github.com/evdatsion/aphelion-dpos-bft/p2p" 34 "github.com/evdatsion/aphelion-dpos-bft/p2p/pex" 35 "github.com/evdatsion/aphelion-dpos-bft/privval" 36 "github.com/evdatsion/aphelion-dpos-bft/proxy" 37 rpccore "github.com/evdatsion/aphelion-dpos-bft/rpc/core" 38 ctypes "github.com/evdatsion/aphelion-dpos-bft/rpc/core/types" 39 grpccore "github.com/evdatsion/aphelion-dpos-bft/rpc/grpc" 40 rpcserver "github.com/evdatsion/aphelion-dpos-bft/rpc/lib/server" 41 sm "github.com/evdatsion/aphelion-dpos-bft/state" 42 "github.com/evdatsion/aphelion-dpos-bft/state/txindex" 43 "github.com/evdatsion/aphelion-dpos-bft/state/txindex/kv" 44 "github.com/evdatsion/aphelion-dpos-bft/state/txindex/null" 45 "github.com/evdatsion/aphelion-dpos-bft/types" 46 tmtime "github.com/evdatsion/aphelion-dpos-bft/types/time" 47 "github.com/evdatsion/aphelion-dpos-bft/version" 48 ) 49 50 // CustomReactorNamePrefix is a prefix for all custom reactors to prevent 51 // clashes with built-in reactors. 52 const CustomReactorNamePrefix = "CUSTOM_" 53 54 //------------------------------------------------------------------------------ 55 56 // DBContext specifies config information for loading a new DB. 57 type DBContext struct { 58 ID string 59 Config *cfg.Config 60 } 61 62 // DBProvider takes a DBContext and returns an instantiated DB. 63 type DBProvider func(*DBContext) (dbm.DB, error) 64 65 // DefaultDBProvider returns a database using the DBBackend and DBDir 66 // specified in the ctx.Config. 67 func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { 68 dbType := dbm.DBBackendType(ctx.Config.DBBackend) 69 return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil 70 } 71 72 // GenesisDocProvider returns a GenesisDoc. 73 // It allows the GenesisDoc to be pulled from sources other than the 74 // filesystem, for instance from a distributed key-value store cluster. 75 type GenesisDocProvider func() (*types.GenesisDoc, error) 76 77 // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads 78 // the GenesisDoc from the config.GenesisFile() on the filesystem. 79 func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { 80 return func() (*types.GenesisDoc, error) { 81 return types.GenesisDocFromFile(config.GenesisFile()) 82 } 83 } 84 85 // NodeProvider takes a config and a logger and returns a ready to go Node. 86 type NodeProvider func(*cfg.Config, log.Logger) (*Node, error) 87 88 // DefaultNewNode returns a Tendermint node with default settings for the 89 // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. 90 // It implements NodeProvider. 91 func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { 92 // Generate node PrivKey 93 nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) 94 if err != nil { 95 return nil, err 96 } 97 98 // Convert old PrivValidator if it exists. 99 oldPrivVal := config.OldPrivValidatorFile() 100 newPrivValKey := config.PrivValidatorKeyFile() 101 newPrivValState := config.PrivValidatorStateFile() 102 if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) { 103 oldPV, err := privval.LoadOldFilePV(oldPrivVal) 104 if err != nil { 105 return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v\n", oldPrivVal, err) 106 } 107 logger.Info("Upgrading PrivValidator file", 108 "old", oldPrivVal, 109 "newKey", newPrivValKey, 110 "newState", newPrivValState, 111 ) 112 oldPV.Upgrade(newPrivValKey, newPrivValState) 113 } 114 115 return NewNode(config, 116 privval.LoadOrGenFilePV(newPrivValKey, newPrivValState), 117 nodeKey, 118 proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), 119 DefaultGenesisDocProviderFunc(config), 120 DefaultDBProvider, 121 DefaultMetricsProvider(config.Instrumentation), 122 logger, 123 ) 124 } 125 126 // MetricsProvider returns a consensus, p2p and mempool Metrics. 127 type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) 128 129 // DefaultMetricsProvider returns Metrics build using Prometheus client library 130 // if Prometheus is enabled. Otherwise, it returns no-op Metrics. 131 func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { 132 return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { 133 if config.Prometheus { 134 return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), 135 p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), 136 mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), 137 sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) 138 } 139 return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() 140 } 141 } 142 143 // Option sets a parameter for the node. 144 type Option func(*Node) 145 146 // CustomReactors allows you to add custom reactors to the node's Switch. 147 func CustomReactors(reactors map[string]p2p.Reactor) Option { 148 return func(n *Node) { 149 for name, reactor := range reactors { 150 n.sw.AddReactor(CustomReactorNamePrefix+name, reactor) 151 } 152 } 153 } 154 155 //------------------------------------------------------------------------------ 156 157 // Node is the highest level interface to a full Tendermint node. 158 // It includes all configuration information and running services. 159 type Node struct { 160 cmn.BaseService 161 162 // config 163 config *cfg.Config 164 genesisDoc *types.GenesisDoc // initial validator set 165 privValidator types.PrivValidator // local node's validator key 166 167 // network 168 transport *p2p.MultiplexTransport 169 sw *p2p.Switch // p2p connections 170 addrBook pex.AddrBook // known peers 171 nodeInfo p2p.NodeInfo 172 nodeKey *p2p.NodeKey // our node privkey 173 isListening bool 174 175 // services 176 eventBus *types.EventBus // pub/sub for services 177 stateDB dbm.DB 178 blockStore *bc.BlockStore // store the blockchain to disk 179 bcReactor *bc.BlockchainReactor // for fast-syncing 180 mempoolReactor *mempl.Reactor // for gossipping transactions 181 mempool mempl.Mempool 182 consensusState *cs.ConsensusState // latest consensus state 183 consensusReactor *cs.ConsensusReactor // for participating in the consensus 184 pexReactor *pex.PEXReactor // for exchanging peer addresses 185 evidencePool *evidence.EvidencePool // tracking evidence 186 proxyApp proxy.AppConns // connection to the application 187 rpcListeners []net.Listener // rpc servers 188 txIndexer txindex.TxIndexer 189 indexerService *txindex.IndexerService 190 prometheusSrv *http.Server 191 } 192 193 func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *bc.BlockStore, stateDB dbm.DB, err error) { 194 var blockStoreDB dbm.DB 195 blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) 196 if err != nil { 197 return 198 } 199 blockStore = bc.NewBlockStore(blockStoreDB) 200 201 stateDB, err = dbProvider(&DBContext{"state", config}) 202 if err != nil { 203 return 204 } 205 206 return 207 } 208 209 func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { 210 proxyApp := proxy.NewAppConns(clientCreator) 211 proxyApp.SetLogger(logger.With("module", "proxy")) 212 if err := proxyApp.Start(); err != nil { 213 return nil, fmt.Errorf("error starting proxy app connections: %v", err) 214 } 215 return proxyApp, nil 216 } 217 218 func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { 219 eventBus := types.NewEventBus() 220 eventBus.SetLogger(logger.With("module", "events")) 221 if err := eventBus.Start(); err != nil { 222 return nil, err 223 } 224 return eventBus, nil 225 } 226 227 func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, 228 eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) { 229 230 var txIndexer txindex.TxIndexer 231 switch config.TxIndex.Indexer { 232 case "kv": 233 store, err := dbProvider(&DBContext{"tx_index", config}) 234 if err != nil { 235 return nil, nil, err 236 } 237 if config.TxIndex.IndexTags != "" { 238 txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " "))) 239 } else if config.TxIndex.IndexAllTags { 240 txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) 241 } else { 242 txIndexer = kv.NewTxIndex(store) 243 } 244 default: 245 txIndexer = &null.TxIndex{} 246 } 247 248 indexerService := txindex.NewIndexerService(txIndexer, eventBus) 249 indexerService.SetLogger(logger.With("module", "txindex")) 250 if err := indexerService.Start(); err != nil { 251 return nil, nil, err 252 } 253 return indexerService, txIndexer, nil 254 } 255 256 func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore, 257 genDoc *types.GenesisDoc, eventBus *types.EventBus, proxyApp proxy.AppConns, consensusLogger log.Logger) error { 258 259 handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) 260 handshaker.SetLogger(consensusLogger) 261 handshaker.SetEventBus(eventBus) 262 if err := handshaker.Handshake(proxyApp); err != nil { 263 return fmt.Errorf("error during handshake: %v", err) 264 } 265 return nil 266 } 267 268 func logNodeStartupInfo(state sm.State, privValidator types.PrivValidator, logger, 269 consensusLogger log.Logger) { 270 271 // Log the version info. 272 logger.Info("Version info", 273 "software", version.TMCoreSemVer, 274 "block", version.BlockProtocol, 275 "p2p", version.P2PProtocol, 276 ) 277 278 // If the state and software differ in block version, at least log it. 279 if state.Version.Consensus.Block != version.BlockProtocol { 280 logger.Info("Software and state have different block protocols", 281 "software", version.BlockProtocol, 282 "state", state.Version.Consensus.Block, 283 ) 284 } 285 286 pubKey := privValidator.GetPubKey() 287 addr := pubKey.Address() 288 // Log whether this node is a validator or an observer 289 if state.Validators.HasAddress(addr) { 290 consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) 291 } else { 292 consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) 293 } 294 } 295 296 func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool { 297 if state.Validators.Size() > 1 { 298 return false 299 } 300 addr, _ := state.Validators.GetByIndex(0) 301 return bytes.Equal(privVal.GetPubKey().Address(), addr) 302 } 303 304 func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, 305 state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { 306 307 mempool := mempl.NewCListMempool( 308 config.Mempool, 309 proxyApp.Mempool(), 310 state.LastBlockHeight, 311 mempl.WithMetrics(memplMetrics), 312 mempl.WithPreCheck(sm.TxPreCheck(state)), 313 mempl.WithPostCheck(sm.TxPostCheck(state)), 314 ) 315 mempoolLogger := logger.With("module", "mempool") 316 mempoolReactor := mempl.NewReactor(config.Mempool, mempool) 317 mempoolReactor.SetLogger(mempoolLogger) 318 319 if config.Consensus.WaitForTxs() { 320 mempool.EnableTxsAvailable() 321 } 322 return mempoolReactor, mempool 323 } 324 325 func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, 326 stateDB dbm.DB, logger log.Logger) (*evidence.EvidenceReactor, *evidence.EvidencePool, error) { 327 328 evidenceDB, err := dbProvider(&DBContext{"evidence", config}) 329 if err != nil { 330 return nil, nil, err 331 } 332 evidenceLogger := logger.With("module", "evidence") 333 evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB) 334 evidencePool.SetLogger(evidenceLogger) 335 evidenceReactor := evidence.NewEvidenceReactor(evidencePool) 336 evidenceReactor.SetLogger(evidenceLogger) 337 return evidenceReactor, evidencePool, nil 338 } 339 340 func createConsensusReactor(config *cfg.Config, 341 state sm.State, 342 blockExec *sm.BlockExecutor, 343 blockStore sm.BlockStore, 344 mempool *mempl.CListMempool, 345 evidencePool *evidence.EvidencePool, 346 privValidator types.PrivValidator, 347 csMetrics *cs.Metrics, 348 fastSync bool, 349 eventBus *types.EventBus, 350 consensusLogger log.Logger) (*consensus.ConsensusReactor, *consensus.ConsensusState) { 351 352 consensusState := cs.NewConsensusState( 353 config.Consensus, 354 state.Copy(), 355 blockExec, 356 blockStore, 357 mempool, 358 evidencePool, 359 cs.StateMetrics(csMetrics), 360 ) 361 consensusState.SetLogger(consensusLogger) 362 if privValidator != nil { 363 consensusState.SetPrivValidator(privValidator) 364 } 365 consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) 366 consensusReactor.SetLogger(consensusLogger) 367 // services which will be publishing and/or subscribing for messages (events) 368 // consensusReactor will set it on consensusState and blockExecutor 369 consensusReactor.SetEventBus(eventBus) 370 return consensusReactor, consensusState 371 } 372 373 func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) { 374 var ( 375 mConnConfig = p2p.MConnConfig(config.P2P) 376 transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) 377 connFilters = []p2p.ConnFilterFunc{} 378 peerFilters = []p2p.PeerFilterFunc{} 379 ) 380 381 if !config.P2P.AllowDuplicateIP { 382 connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) 383 } 384 385 // Filter peers by addr or pubkey with an ABCI query. 386 // If the query return code is OK, add peer. 387 if config.FilterPeers { 388 connFilters = append( 389 connFilters, 390 // ABCI query for address filtering. 391 func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { 392 res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ 393 Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), 394 }) 395 if err != nil { 396 return err 397 } 398 if res.IsErr() { 399 return fmt.Errorf("error querying abci app: %v", res) 400 } 401 402 return nil 403 }, 404 ) 405 406 peerFilters = append( 407 peerFilters, 408 // ABCI query for ID filtering. 409 func(_ p2p.IPeerSet, p p2p.Peer) error { 410 res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ 411 Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), 412 }) 413 if err != nil { 414 return err 415 } 416 if res.IsErr() { 417 return fmt.Errorf("error querying abci app: %v", res) 418 } 419 420 return nil 421 }, 422 ) 423 } 424 425 p2p.MultiplexTransportConnFilters(connFilters...)(transport) 426 return transport, peerFilters 427 } 428 429 func createSwitch(config *cfg.Config, 430 transport *p2p.MultiplexTransport, 431 p2pMetrics *p2p.Metrics, 432 peerFilters []p2p.PeerFilterFunc, 433 mempoolReactor *mempl.Reactor, 434 bcReactor *blockchain.BlockchainReactor, 435 consensusReactor *consensus.ConsensusReactor, 436 evidenceReactor *evidence.EvidenceReactor, 437 nodeInfo p2p.NodeInfo, 438 nodeKey *p2p.NodeKey, 439 p2pLogger log.Logger) *p2p.Switch { 440 441 sw := p2p.NewSwitch( 442 config.P2P, 443 transport, 444 p2p.WithMetrics(p2pMetrics), 445 p2p.SwitchPeerFilters(peerFilters...), 446 ) 447 sw.SetLogger(p2pLogger) 448 sw.AddReactor("MEMPOOL", mempoolReactor) 449 sw.AddReactor("BLOCKCHAIN", bcReactor) 450 sw.AddReactor("CONSENSUS", consensusReactor) 451 sw.AddReactor("EVIDENCE", evidenceReactor) 452 453 sw.SetNodeInfo(nodeInfo) 454 sw.SetNodeKey(nodeKey) 455 456 p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) 457 return sw 458 } 459 460 func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, 461 p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { 462 463 addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) 464 addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) 465 466 // Add ourselves to addrbook to prevent dialing ourselves 467 if config.P2P.ExternalAddress != "" { 468 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) 469 if err != nil { 470 return nil, errors.Wrap(err, "p2p.external_address is incorrect") 471 } 472 addrBook.AddOurAddress(addr) 473 } 474 if config.P2P.ListenAddress != "" { 475 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) 476 if err != nil { 477 return nil, errors.Wrap(err, "p2p.laddr is incorrect") 478 } 479 addrBook.AddOurAddress(addr) 480 } 481 482 sw.SetAddrBook(addrBook) 483 484 return addrBook, nil 485 } 486 487 func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, 488 sw *p2p.Switch, logger log.Logger) *pex.PEXReactor { 489 490 // TODO persistent peers ? so we can have their DNS addrs saved 491 pexReactor := pex.NewPEXReactor(addrBook, 492 &pex.PEXReactorConfig{ 493 Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), 494 SeedMode: config.P2P.SeedMode, 495 // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 496 // blocks assuming 10s blocks ~ 28 hours. 497 // TODO (melekes): make it dynamic based on the actual block latencies 498 // from the live network. 499 // https://github.com/evdatsion/aphelion-dpos-bft/issues/3523 500 SeedDisconnectWaitPeriod: 28 * time.Hour, 501 }) 502 pexReactor.SetLogger(logger.With("module", "pex")) 503 sw.AddReactor("PEX", pexReactor) 504 return pexReactor 505 } 506 507 // NewNode returns a new, ready to go, Tendermint Node. 508 func NewNode(config *cfg.Config, 509 privValidator types.PrivValidator, 510 nodeKey *p2p.NodeKey, 511 clientCreator proxy.ClientCreator, 512 genesisDocProvider GenesisDocProvider, 513 dbProvider DBProvider, 514 metricsProvider MetricsProvider, 515 logger log.Logger, 516 options ...Option) (*Node, error) { 517 518 blockStore, stateDB, err := initDBs(config, dbProvider) 519 if err != nil { 520 return nil, err 521 } 522 523 state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) 524 if err != nil { 525 return nil, err 526 } 527 528 // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). 529 proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) 530 if err != nil { 531 return nil, err 532 } 533 534 // EventBus and IndexerService must be started before the handshake because 535 // we might need to index the txs of the replayed block as this might not have happened 536 // when the node stopped last time (i.e. the node stopped after it saved the block 537 // but before it indexed the txs, or, endblocker panicked) 538 eventBus, err := createAndStartEventBus(logger) 539 if err != nil { 540 return nil, err 541 } 542 543 // Transaction indexing 544 indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger) 545 if err != nil { 546 return nil, err 547 } 548 549 // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, 550 // and replays any blocks as necessary to sync tendermint with the app. 551 consensusLogger := logger.With("module", "consensus") 552 if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { 553 return nil, err 554 } 555 556 // Reload the state. It will have the Version.Consensus.App set by the 557 // Handshake, and may have other modifications as well (ie. depending on 558 // what happened during block replay). 559 state = sm.LoadState(stateDB) 560 561 // If an address is provided, listen on the socket for a connection from an 562 // external signing process. 563 if config.PrivValidatorListenAddr != "" { 564 // FIXME: we should start services inside OnStart 565 privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) 566 if err != nil { 567 return nil, errors.Wrap(err, "error with private validator socket client") 568 } 569 } 570 571 logNodeStartupInfo(state, privValidator, logger, consensusLogger) 572 573 // Decide whether to fast-sync or not 574 // We don't fast-sync when the only validator is us. 575 fastSync := config.FastSync && !onlyValidatorIsUs(state, privValidator) 576 577 csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) 578 579 // Make MempoolReactor 580 mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) 581 582 // Make Evidence Reactor 583 evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger) 584 if err != nil { 585 return nil, err 586 } 587 588 // make block executor for consensus and blockchain reactors to execute blocks 589 blockExec := sm.NewBlockExecutor( 590 stateDB, 591 logger.With("module", "state"), 592 proxyApp.Consensus(), 593 mempool, 594 evidencePool, 595 sm.BlockExecutorWithMetrics(smMetrics), 596 ) 597 598 // Make BlockchainReactor 599 bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) 600 bcReactor.SetLogger(logger.With("module", "blockchain")) 601 602 // Make ConsensusReactor 603 consensusReactor, consensusState := createConsensusReactor( 604 config, state, blockExec, blockStore, mempool, evidencePool, 605 privValidator, csMetrics, fastSync, eventBus, consensusLogger, 606 ) 607 608 nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) 609 if err != nil { 610 return nil, err 611 } 612 613 // Setup Transport. 614 transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) 615 616 // Setup Switch. 617 p2pLogger := logger.With("module", "p2p") 618 sw := createSwitch( 619 config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, 620 consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, 621 ) 622 623 err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) 624 if err != nil { 625 return nil, errors.Wrap(err, "could not add peers from persistent_peers field") 626 } 627 628 addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) 629 if err != nil { 630 return nil, errors.Wrap(err, "could not create addrbook") 631 } 632 633 // Optionally, start the pex reactor 634 // 635 // TODO: 636 // 637 // We need to set Seeds and PersistentPeers on the switch, 638 // since it needs to be able to use these (and their DNS names) 639 // even if the PEX is off. We can include the DNS name in the NetAddress, 640 // but it would still be nice to have a clear list of the current "PersistentPeers" 641 // somewhere that we can return with net_info. 642 // 643 // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. 644 // Note we currently use the addrBook regardless at least for AddOurAddress 645 var pexReactor *pex.PEXReactor 646 if config.P2P.PexReactor { 647 pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) 648 } 649 650 if config.ProfListenAddress != "" { 651 go func() { 652 logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil)) 653 }() 654 } 655 656 node := &Node{ 657 config: config, 658 genesisDoc: genDoc, 659 privValidator: privValidator, 660 661 transport: transport, 662 sw: sw, 663 addrBook: addrBook, 664 nodeInfo: nodeInfo, 665 nodeKey: nodeKey, 666 667 stateDB: stateDB, 668 blockStore: blockStore, 669 bcReactor: bcReactor, 670 mempoolReactor: mempoolReactor, 671 mempool: mempool, 672 consensusState: consensusState, 673 consensusReactor: consensusReactor, 674 pexReactor: pexReactor, 675 evidencePool: evidencePool, 676 proxyApp: proxyApp, 677 txIndexer: txIndexer, 678 indexerService: indexerService, 679 eventBus: eventBus, 680 } 681 node.BaseService = *cmn.NewBaseService(logger, "Node", node) 682 683 for _, option := range options { 684 option(node) 685 } 686 687 return node, nil 688 } 689 690 // OnStart starts the Node. It implements cmn.Service. 691 func (n *Node) OnStart() error { 692 now := tmtime.Now() 693 genTime := n.genesisDoc.GenesisTime 694 if genTime.After(now) { 695 n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) 696 time.Sleep(genTime.Sub(now)) 697 } 698 699 // Add private IDs to addrbook to block those peers being added 700 n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) 701 702 // Start the RPC server before the P2P server 703 // so we can eg. receive txs for the first block 704 if n.config.RPC.ListenAddress != "" { 705 listeners, err := n.startRPC() 706 if err != nil { 707 return err 708 } 709 n.rpcListeners = listeners 710 } 711 712 if n.config.Instrumentation.Prometheus && 713 n.config.Instrumentation.PrometheusListenAddr != "" { 714 n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) 715 } 716 717 // Start the transport. 718 addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) 719 if err != nil { 720 return err 721 } 722 if err := n.transport.Listen(*addr); err != nil { 723 return err 724 } 725 726 n.isListening = true 727 728 if n.config.Mempool.WalEnabled() { 729 n.mempool.InitWAL() // no need to have the mempool wal during tests 730 } 731 732 // Start the switch (the P2P server). 733 err = n.sw.Start() 734 if err != nil { 735 return err 736 } 737 738 // Always connect to persistent peers 739 err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) 740 if err != nil { 741 return errors.Wrap(err, "could not dial peers from persistent_peers field") 742 } 743 744 return nil 745 } 746 747 // OnStop stops the Node. It implements cmn.Service. 748 func (n *Node) OnStop() { 749 n.BaseService.OnStop() 750 751 n.Logger.Info("Stopping Node") 752 753 // first stop the non-reactor services 754 n.eventBus.Stop() 755 n.indexerService.Stop() 756 757 // now stop the reactors 758 n.sw.Stop() 759 760 // stop mempool WAL 761 if n.config.Mempool.WalEnabled() { 762 n.mempool.CloseWAL() 763 } 764 765 if err := n.transport.Close(); err != nil { 766 n.Logger.Error("Error closing transport", "err", err) 767 } 768 769 n.isListening = false 770 771 // finally stop the listeners / external services 772 for _, l := range n.rpcListeners { 773 n.Logger.Info("Closing rpc listener", "listener", l) 774 if err := l.Close(); err != nil { 775 n.Logger.Error("Error closing listener", "listener", l, "err", err) 776 } 777 } 778 779 if pvsc, ok := n.privValidator.(cmn.Service); ok { 780 pvsc.Stop() 781 } 782 783 if n.prometheusSrv != nil { 784 if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { 785 // Error from closing listeners, or context timeout: 786 n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) 787 } 788 } 789 } 790 791 // ConfigureRPC sets all variables in rpccore so they will serve 792 // rpc calls from this node 793 func (n *Node) ConfigureRPC() { 794 rpccore.SetStateDB(n.stateDB) 795 rpccore.SetBlockStore(n.blockStore) 796 rpccore.SetConsensusState(n.consensusState) 797 rpccore.SetMempool(n.mempool) 798 rpccore.SetEvidencePool(n.evidencePool) 799 rpccore.SetP2PPeers(n.sw) 800 rpccore.SetP2PTransport(n) 801 pubKey := n.privValidator.GetPubKey() 802 rpccore.SetPubKey(pubKey) 803 rpccore.SetGenesisDoc(n.genesisDoc) 804 rpccore.SetAddrBook(n.addrBook) 805 rpccore.SetProxyAppQuery(n.proxyApp.Query()) 806 rpccore.SetTxIndexer(n.txIndexer) 807 rpccore.SetConsensusReactor(n.consensusReactor) 808 rpccore.SetEventBus(n.eventBus) 809 rpccore.SetLogger(n.Logger.With("module", "rpc")) 810 rpccore.SetConfig(*n.config.RPC) 811 } 812 813 func (n *Node) startRPC() ([]net.Listener, error) { 814 n.ConfigureRPC() 815 listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") 816 coreCodec := amino.NewCodec() 817 ctypes.RegisterAmino(coreCodec) 818 819 if n.config.RPC.Unsafe { 820 rpccore.AddUnsafeRoutes() 821 } 822 823 // we may expose the rpc over both a unix and tcp socket 824 listeners := make([]net.Listener, len(listenAddrs)) 825 for i, listenAddr := range listenAddrs { 826 mux := http.NewServeMux() 827 rpcLogger := n.Logger.With("module", "rpc-server") 828 wmLogger := rpcLogger.With("protocol", "websocket") 829 wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, 830 rpcserver.OnDisconnect(func(remoteAddr string) { 831 err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) 832 if err != nil && err != tmpubsub.ErrSubscriptionNotFound { 833 wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) 834 } 835 })) 836 wm.SetLogger(wmLogger) 837 mux.HandleFunc("/websocket", wm.WebsocketHandler) 838 rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) 839 840 config := rpcserver.DefaultConfig() 841 config.MaxOpenConnections = n.config.RPC.MaxOpenConnections 842 // If necessary adjust global WriteTimeout to ensure it's greater than 843 // TimeoutBroadcastTxCommit. 844 // See https://github.com/evdatsion/aphelion-dpos-bft/issues/3435 845 if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { 846 config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second 847 } 848 849 listener, err := rpcserver.Listen( 850 listenAddr, 851 config, 852 ) 853 if err != nil { 854 return nil, err 855 } 856 857 var rootHandler http.Handler = mux 858 if n.config.RPC.IsCorsEnabled() { 859 corsMiddleware := cors.New(cors.Options{ 860 AllowedOrigins: n.config.RPC.CORSAllowedOrigins, 861 AllowedMethods: n.config.RPC.CORSAllowedMethods, 862 AllowedHeaders: n.config.RPC.CORSAllowedHeaders, 863 }) 864 rootHandler = corsMiddleware.Handler(mux) 865 } 866 if n.config.RPC.IsTLSEnabled() { 867 go rpcserver.StartHTTPAndTLSServer( 868 listener, 869 rootHandler, 870 n.config.RPC.CertFile(), 871 n.config.RPC.KeyFile(), 872 rpcLogger, 873 config, 874 ) 875 } else { 876 go rpcserver.StartHTTPServer( 877 listener, 878 rootHandler, 879 rpcLogger, 880 config, 881 ) 882 } 883 884 listeners[i] = listener 885 } 886 887 // we expose a simplified api over grpc for convenience to app devs 888 grpcListenAddr := n.config.RPC.GRPCListenAddress 889 if grpcListenAddr != "" { 890 config := rpcserver.DefaultConfig() 891 config.MaxOpenConnections = n.config.RPC.MaxOpenConnections 892 listener, err := rpcserver.Listen(grpcListenAddr, config) 893 if err != nil { 894 return nil, err 895 } 896 go grpccore.StartGRPCServer(listener) 897 listeners = append(listeners, listener) 898 } 899 900 return listeners, nil 901 } 902 903 // startPrometheusServer starts a Prometheus HTTP server, listening for metrics 904 // collectors on addr. 905 func (n *Node) startPrometheusServer(addr string) *http.Server { 906 srv := &http.Server{ 907 Addr: addr, 908 Handler: promhttp.InstrumentMetricHandler( 909 prometheus.DefaultRegisterer, promhttp.HandlerFor( 910 prometheus.DefaultGatherer, 911 promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, 912 ), 913 ), 914 } 915 go func() { 916 if err := srv.ListenAndServe(); err != http.ErrServerClosed { 917 // Error starting or closing listener: 918 n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) 919 } 920 }() 921 return srv 922 } 923 924 // Switch returns the Node's Switch. 925 func (n *Node) Switch() *p2p.Switch { 926 return n.sw 927 } 928 929 // BlockStore returns the Node's BlockStore. 930 func (n *Node) BlockStore() *bc.BlockStore { 931 return n.blockStore 932 } 933 934 // ConsensusState returns the Node's ConsensusState. 935 func (n *Node) ConsensusState() *cs.ConsensusState { 936 return n.consensusState 937 } 938 939 // ConsensusReactor returns the Node's ConsensusReactor. 940 func (n *Node) ConsensusReactor() *cs.ConsensusReactor { 941 return n.consensusReactor 942 } 943 944 // MempoolReactor returns the Node's mempool reactor. 945 func (n *Node) MempoolReactor() *mempl.Reactor { 946 return n.mempoolReactor 947 } 948 949 // Mempool returns the Node's mempool. 950 func (n *Node) Mempool() mempl.Mempool { 951 return n.mempool 952 } 953 954 // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. 955 func (n *Node) PEXReactor() *pex.PEXReactor { 956 return n.pexReactor 957 } 958 959 // EvidencePool returns the Node's EvidencePool. 960 func (n *Node) EvidencePool() *evidence.EvidencePool { 961 return n.evidencePool 962 } 963 964 // EventBus returns the Node's EventBus. 965 func (n *Node) EventBus() *types.EventBus { 966 return n.eventBus 967 } 968 969 // PrivValidator returns the Node's PrivValidator. 970 // XXX: for convenience only! 971 func (n *Node) PrivValidator() types.PrivValidator { 972 return n.privValidator 973 } 974 975 // GenesisDoc returns the Node's GenesisDoc. 976 func (n *Node) GenesisDoc() *types.GenesisDoc { 977 return n.genesisDoc 978 } 979 980 // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. 981 func (n *Node) ProxyApp() proxy.AppConns { 982 return n.proxyApp 983 } 984 985 // Config returns the Node's config. 986 func (n *Node) Config() *cfg.Config { 987 return n.config 988 } 989 990 //------------------------------------------------------------------------------ 991 992 func (n *Node) Listeners() []string { 993 return []string{ 994 fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), 995 } 996 } 997 998 func (n *Node) IsListening() bool { 999 return n.isListening 1000 } 1001 1002 // NodeInfo returns the Node's Info from the Switch. 1003 func (n *Node) NodeInfo() p2p.NodeInfo { 1004 return n.nodeInfo 1005 } 1006 1007 func makeNodeInfo( 1008 config *cfg.Config, 1009 nodeKey *p2p.NodeKey, 1010 txIndexer txindex.TxIndexer, 1011 genDoc *types.GenesisDoc, 1012 state sm.State, 1013 ) (p2p.NodeInfo, error) { 1014 txIndexerStatus := "on" 1015 if _, ok := txIndexer.(*null.TxIndex); ok { 1016 txIndexerStatus = "off" 1017 } 1018 nodeInfo := p2p.DefaultNodeInfo{ 1019 ProtocolVersion: p2p.NewProtocolVersion( 1020 version.P2PProtocol, // global 1021 state.Version.Consensus.Block, 1022 state.Version.Consensus.App, 1023 ), 1024 ID_: nodeKey.ID(), 1025 Network: genDoc.ChainID, 1026 Version: version.TMCoreSemVer, 1027 Channels: []byte{ 1028 bc.BlockchainChannel, 1029 cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, 1030 mempl.MempoolChannel, 1031 evidence.EvidenceChannel, 1032 }, 1033 Moniker: config.Moniker, 1034 Other: p2p.DefaultNodeInfoOther{ 1035 TxIndex: txIndexerStatus, 1036 RPCAddress: config.RPC.ListenAddress, 1037 }, 1038 } 1039 1040 if config.P2P.PexReactor { 1041 nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) 1042 } 1043 1044 lAddr := config.P2P.ExternalAddress 1045 1046 if lAddr == "" { 1047 lAddr = config.P2P.ListenAddress 1048 } 1049 1050 nodeInfo.ListenAddr = lAddr 1051 1052 err := nodeInfo.Validate() 1053 return nodeInfo, err 1054 } 1055 1056 //------------------------------------------------------------------------------ 1057 1058 var ( 1059 genesisDocKey = []byte("genesisDoc") 1060 ) 1061 1062 // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the 1063 // database, or creates one using the given genesisDocProvider and persists the 1064 // result to the database. On success this also returns the genesis doc loaded 1065 // through the given provider. 1066 func LoadStateFromDBOrGenesisDocProvider(stateDB dbm.DB, genesisDocProvider GenesisDocProvider) (sm.State, *types.GenesisDoc, error) { 1067 // Get genesis doc 1068 genDoc, err := loadGenesisDoc(stateDB) 1069 if err != nil { 1070 genDoc, err = genesisDocProvider() 1071 if err != nil { 1072 return sm.State{}, nil, err 1073 } 1074 // save genesis doc to prevent a certain class of user errors (e.g. when it 1075 // was changed, accidentally or not). Also good for audit trail. 1076 saveGenesisDoc(stateDB, genDoc) 1077 } 1078 state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) 1079 if err != nil { 1080 return sm.State{}, nil, err 1081 } 1082 return state, genDoc, nil 1083 } 1084 1085 // panics if failed to unmarshal bytes 1086 func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { 1087 b := db.Get(genesisDocKey) 1088 if len(b) == 0 { 1089 return nil, errors.New("Genesis doc not found") 1090 } 1091 var genDoc *types.GenesisDoc 1092 err := cdc.UnmarshalJSON(b, &genDoc) 1093 if err != nil { 1094 panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) 1095 } 1096 return genDoc, nil 1097 } 1098 1099 // panics if failed to marshal the given genesis document 1100 func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { 1101 b, err := cdc.MarshalJSON(genDoc) 1102 if err != nil { 1103 panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) 1104 } 1105 db.SetSync(genesisDocKey, b) 1106 } 1107 1108 func createAndStartPrivValidatorSocketClient( 1109 listenAddr string, 1110 logger log.Logger, 1111 ) (types.PrivValidator, error) { 1112 var listener net.Listener 1113 1114 protocol, address := cmn.ProtocolAndAddress(listenAddr) 1115 ln, err := net.Listen(protocol, address) 1116 if err != nil { 1117 return nil, err 1118 } 1119 switch protocol { 1120 case "unix": 1121 listener = privval.NewUnixListener(ln) 1122 case "tcp": 1123 // TODO: persist this key so external signer 1124 // can actually authenticate us 1125 listener = privval.NewTCPListener(ln, ed25519.GenPrivKey()) 1126 default: 1127 return nil, fmt.Errorf( 1128 "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", 1129 protocol, 1130 ) 1131 } 1132 1133 pvsc := privval.NewSignerValidatorEndpoint(logger.With("module", "privval"), listener) 1134 if err := pvsc.Start(); err != nil { 1135 return nil, errors.Wrap(err, "failed to start private validator") 1136 } 1137 1138 return pvsc, nil 1139 } 1140 1141 // splitAndTrimEmpty slices s into all subslices separated by sep and returns a 1142 // slice of the string s with all leading and trailing Unicode code points 1143 // contained in cutset removed. If sep is empty, SplitAndTrim splits after each 1144 // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of 1145 // -1. also filter out empty strings, only return non-empty strings. 1146 func splitAndTrimEmpty(s, sep, cutset string) []string { 1147 if s == "" { 1148 return []string{} 1149 } 1150 1151 spl := strings.Split(s, sep) 1152 nonEmptyStrings := make([]string, 0, len(spl)) 1153 for i := 0; i < len(spl); i++ { 1154 element := strings.Trim(spl[i], cutset) 1155 if element != "" { 1156 nonEmptyStrings = append(nonEmptyStrings, element) 1157 } 1158 } 1159 return nonEmptyStrings 1160 }