github.com/516108736/tendermint@v0.36.0/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/prometheus/client_golang/prometheus"
    15  	"github.com/prometheus/client_golang/prometheus/promhttp"
    16  	"github.com/rs/cors"
    17  
    18  	dbm "github.com/tendermint/tm-db"
    19  
    20  	abci "github.com/tendermint/tendermint/abci/types"
    21  	bcv0 "github.com/tendermint/tendermint/blockchain/v0"
    22  	bcv1 "github.com/tendermint/tendermint/blockchain/v1"
    23  	bcv2 "github.com/tendermint/tendermint/blockchain/v2"
    24  	cfg "github.com/tendermint/tendermint/config"
    25  	cs "github.com/tendermint/tendermint/consensus"
    26  	"github.com/tendermint/tendermint/crypto"
    27  	"github.com/tendermint/tendermint/evidence"
    28  	tmjson "github.com/tendermint/tendermint/libs/json"
    29  	"github.com/tendermint/tendermint/libs/log"
    30  	tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
    31  	"github.com/tendermint/tendermint/libs/service"
    32  	"github.com/tendermint/tendermint/light"
    33  	mempl "github.com/tendermint/tendermint/mempool"
    34  	"github.com/tendermint/tendermint/p2p"
    35  	"github.com/tendermint/tendermint/p2p/pex"
    36  	"github.com/tendermint/tendermint/privval"
    37  	"github.com/tendermint/tendermint/proxy"
    38  	rpccore "github.com/tendermint/tendermint/rpc/core"
    39  	grpccore "github.com/tendermint/tendermint/rpc/grpc"
    40  	rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
    41  	sm "github.com/tendermint/tendermint/state"
    42  	"github.com/tendermint/tendermint/state/txindex"
    43  	"github.com/tendermint/tendermint/state/txindex/kv"
    44  	"github.com/tendermint/tendermint/state/txindex/null"
    45  	"github.com/tendermint/tendermint/statesync"
    46  	"github.com/tendermint/tendermint/store"
    47  	"github.com/tendermint/tendermint/types"
    48  	tmtime "github.com/tendermint/tendermint/types/time"
    49  	"github.com/tendermint/tendermint/version"
    50  )
    51  
    52  //------------------------------------------------------------------------------
    53  
    54  // DBContext specifies config information for loading a new DB.
    55  type DBContext struct {
    56  	ID     string
    57  	Config *cfg.Config
    58  }
    59  
    60  // DBProvider takes a DBContext and returns an instantiated DB.
    61  type DBProvider func(*DBContext) (dbm.DB, error)
    62  
    63  // DefaultDBProvider returns a database using the DBBackend and DBDir
    64  // specified in the ctx.Config.
    65  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    66  	dbType := dbm.BackendType(ctx.Config.DBBackend)
    67  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
    68  }
    69  
    70  // GenesisDocProvider returns a GenesisDoc.
    71  // It allows the GenesisDoc to be pulled from sources other than the
    72  // filesystem, for instance from a distributed key-value store cluster.
    73  type GenesisDocProvider func() (*types.GenesisDoc, error)
    74  
    75  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    76  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    77  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    78  	return func() (*types.GenesisDoc, error) {
    79  		return types.GenesisDocFromFile(config.GenesisFile())
    80  	}
    81  }
    82  
    83  // Provider takes a config and a logger and returns a ready to go Node.
    84  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    85  
    86  // DefaultNewNode returns a Tendermint node with default settings for the
    87  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    88  // It implements NodeProvider.
    89  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
    90  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
    91  	if err != nil {
    92  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
    93  	}
    94  
    95  	return NewNode(config,
    96  		privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
    97  		nodeKey,
    98  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
    99  		DefaultGenesisDocProviderFunc(config),
   100  		DefaultDBProvider,
   101  		DefaultMetricsProvider(config.Instrumentation),
   102  		logger,
   103  	)
   104  }
   105  
   106  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   107  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   108  
   109  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   110  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   111  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   112  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   113  		if config.Prometheus {
   114  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   115  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   116  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   117  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   118  		}
   119  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   120  	}
   121  }
   122  
   123  // Option sets a parameter for the node.
   124  type Option func(*Node)
   125  
   126  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   127  // See: https://github.com/tendermint/tendermint/issues/4595
   128  type fastSyncReactor interface {
   129  	SwitchToFastSync(sm.State) error
   130  }
   131  
   132  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   133  // the node's Switch.
   134  //
   135  // WARNING: using any name from the below list of the existing reactors will
   136  // result in replacing it with the custom one.
   137  //
   138  //  - MEMPOOL
   139  //  - BLOCKCHAIN
   140  //  - CONSENSUS
   141  //  - EVIDENCE
   142  //  - PEX
   143  //  - STATESYNC
   144  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   145  	return func(n *Node) {
   146  		for name, reactor := range reactors {
   147  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   148  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   149  					"name", name, "existing", existingReactor, "custom", reactor)
   150  				n.sw.RemoveReactor(name, existingReactor)
   151  			}
   152  			n.sw.AddReactor(name, reactor)
   153  		}
   154  	}
   155  }
   156  
   157  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   158  // build a State object for bootstrapping the node.
   159  // WARNING: this interface is considered unstable and subject to change.
   160  func StateProvider(stateProvider statesync.StateProvider) Option {
   161  	return func(n *Node) {
   162  		n.stateSyncProvider = stateProvider
   163  	}
   164  }
   165  
   166  //------------------------------------------------------------------------------
   167  
   168  // Node is the highest level interface to a full Tendermint node.
   169  // It includes all configuration information and running services.
   170  type Node struct {
   171  	service.BaseService
   172  
   173  	// config
   174  	config        *cfg.Config
   175  	genesisDoc    *types.GenesisDoc   // initial validator set
   176  	privValidator types.PrivValidator // local node's validator key
   177  
   178  	// network
   179  	transport   *p2p.MultiplexTransport
   180  	sw          *p2p.Switch  // p2p connections
   181  	addrBook    pex.AddrBook // known peers
   182  	nodeInfo    p2p.NodeInfo
   183  	nodeKey     *p2p.NodeKey // our node privkey
   184  	isListening bool
   185  
   186  	// services
   187  	eventBus          *types.EventBus // pub/sub for services
   188  	stateStore        sm.Store
   189  	blockStore        *store.BlockStore // store the blockchain to disk
   190  	bcReactor         p2p.Reactor       // for fast-syncing
   191  	mempoolReactor    *mempl.Reactor    // for gossipping transactions
   192  	mempool           mempl.Mempool
   193  	stateSync         bool                    // whether the node should state sync on startup
   194  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   195  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   196  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   197  	consensusState    *cs.State               // latest consensus state
   198  	consensusReactor  *cs.Reactor             // for participating in the consensus
   199  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   200  	evidencePool      *evidence.Pool          // tracking evidence
   201  	proxyApp          proxy.AppConns          // connection to the application
   202  	rpcListeners      []net.Listener          // rpc servers
   203  	txIndexer         txindex.TxIndexer
   204  	indexerService    *txindex.IndexerService
   205  	prometheusSrv     *http.Server
   206  }
   207  
   208  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   209  	var blockStoreDB dbm.DB
   210  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   211  	if err != nil {
   212  		return
   213  	}
   214  	blockStore = store.NewBlockStore(blockStoreDB)
   215  
   216  	stateDB, err = dbProvider(&DBContext{"state", config})
   217  	if err != nil {
   218  		return
   219  	}
   220  
   221  	return
   222  }
   223  
   224  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   225  	proxyApp := proxy.NewAppConns(clientCreator)
   226  	proxyApp.SetLogger(logger.With("module", "proxy"))
   227  	if err := proxyApp.Start(); err != nil {
   228  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   229  	}
   230  	return proxyApp, nil
   231  }
   232  
   233  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   234  	eventBus := types.NewEventBus()
   235  	eventBus.SetLogger(logger.With("module", "events"))
   236  	if err := eventBus.Start(); err != nil {
   237  		return nil, err
   238  	}
   239  	return eventBus, nil
   240  }
   241  
   242  func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
   243  	eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) {
   244  
   245  	var txIndexer txindex.TxIndexer
   246  	switch config.TxIndex.Indexer {
   247  	case "kv":
   248  		store, err := dbProvider(&DBContext{"tx_index", config})
   249  		if err != nil {
   250  			return nil, nil, err
   251  		}
   252  		txIndexer = kv.NewTxIndex(store)
   253  	default:
   254  		txIndexer = &null.TxIndex{}
   255  	}
   256  
   257  	indexerService := txindex.NewIndexerService(txIndexer, eventBus)
   258  	indexerService.SetLogger(logger.With("module", "txindex"))
   259  	if err := indexerService.Start(); err != nil {
   260  		return nil, nil, err
   261  	}
   262  	return indexerService, txIndexer, nil
   263  }
   264  
   265  func doHandshake(
   266  	stateStore sm.Store,
   267  	state sm.State,
   268  	blockStore sm.BlockStore,
   269  	genDoc *types.GenesisDoc,
   270  	eventBus types.BlockEventPublisher,
   271  	proxyApp proxy.AppConns,
   272  	consensusLogger log.Logger) error {
   273  
   274  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   275  	handshaker.SetLogger(consensusLogger)
   276  	handshaker.SetEventBus(eventBus)
   277  	if err := handshaker.Handshake(proxyApp); err != nil {
   278  		return fmt.Errorf("error during handshake: %v", err)
   279  	}
   280  	return nil
   281  }
   282  
   283  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   284  	// Log the version info.
   285  	logger.Info("Version info",
   286  		"software", version.TMCoreSemVer,
   287  		"block", version.BlockProtocol,
   288  		"p2p", version.P2PProtocol,
   289  	)
   290  
   291  	// If the state and software differ in block version, at least log it.
   292  	if state.Version.Consensus.Block != version.BlockProtocol {
   293  		logger.Info("Software and state have different block protocols",
   294  			"software", version.BlockProtocol,
   295  			"state", state.Version.Consensus.Block,
   296  		)
   297  	}
   298  
   299  	addr := pubKey.Address()
   300  	// Log whether this node is a validator or an observer
   301  	if state.Validators.HasAddress(addr) {
   302  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   303  	} else {
   304  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   305  	}
   306  }
   307  
   308  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   309  	if state.Validators.Size() > 1 {
   310  		return false
   311  	}
   312  	addr, _ := state.Validators.GetByIndex(0)
   313  	return bytes.Equal(pubKey.Address(), addr)
   314  }
   315  
   316  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   317  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   318  
   319  	mempool := mempl.NewCListMempool(
   320  		config.Mempool,
   321  		proxyApp.Mempool(),
   322  		state.LastBlockHeight,
   323  		mempl.WithMetrics(memplMetrics),
   324  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   325  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   326  	)
   327  	mempoolLogger := logger.With("module", "mempool")
   328  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   329  	mempoolReactor.SetLogger(mempoolLogger)
   330  
   331  	if config.Consensus.WaitForTxs() {
   332  		mempool.EnableTxsAvailable()
   333  	}
   334  	return mempoolReactor, mempool
   335  }
   336  
   337  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   338  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   339  
   340  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   341  	if err != nil {
   342  		return nil, nil, err
   343  	}
   344  	evidenceLogger := logger.With("module", "evidence")
   345  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
   346  	if err != nil {
   347  		return nil, nil, err
   348  	}
   349  	evidenceReactor := evidence.NewReactor(evidencePool)
   350  	evidenceReactor.SetLogger(evidenceLogger)
   351  	return evidenceReactor, evidencePool, nil
   352  }
   353  
   354  func createBlockchainReactor(config *cfg.Config,
   355  	state sm.State,
   356  	blockExec *sm.BlockExecutor,
   357  	blockStore *store.BlockStore,
   358  	fastSync bool,
   359  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   360  
   361  	switch config.FastSync.Version {
   362  	case "v0":
   363  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   364  	case "v1":
   365  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   366  	case "v2":
   367  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   368  	default:
   369  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   370  	}
   371  
   372  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   373  	return bcReactor, nil
   374  }
   375  
   376  func createConsensusReactor(config *cfg.Config,
   377  	state sm.State,
   378  	blockExec *sm.BlockExecutor,
   379  	blockStore sm.BlockStore,
   380  	mempool *mempl.CListMempool,
   381  	evidencePool *evidence.Pool,
   382  	privValidator types.PrivValidator,
   383  	csMetrics *cs.Metrics,
   384  	waitSync bool,
   385  	eventBus *types.EventBus,
   386  	consensusLogger log.Logger) (*cs.Reactor, *cs.State) {
   387  
   388  	consensusState := cs.NewState(
   389  		config.Consensus,
   390  		state.Copy(),
   391  		blockExec,
   392  		blockStore,
   393  		mempool,
   394  		evidencePool,
   395  		cs.StateMetrics(csMetrics),
   396  	)
   397  	consensusState.SetLogger(consensusLogger)
   398  	if privValidator != nil {
   399  		consensusState.SetPrivValidator(privValidator)
   400  	}
   401  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   402  	consensusReactor.SetLogger(consensusLogger)
   403  	// services which will be publishing and/or subscribing for messages (events)
   404  	// consensusReactor will set it on consensusState and blockExecutor
   405  	consensusReactor.SetEventBus(eventBus)
   406  	return consensusReactor, consensusState
   407  }
   408  
   409  func createTransport(
   410  	config *cfg.Config,
   411  	nodeInfo p2p.NodeInfo,
   412  	nodeKey *p2p.NodeKey,
   413  	proxyApp proxy.AppConns,
   414  ) (
   415  	*p2p.MultiplexTransport,
   416  	[]p2p.PeerFilterFunc,
   417  ) {
   418  	var (
   419  		mConnConfig = p2p.MConnConfig(config.P2P)
   420  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   421  		connFilters = []p2p.ConnFilterFunc{}
   422  		peerFilters = []p2p.PeerFilterFunc{}
   423  	)
   424  
   425  	if !config.P2P.AllowDuplicateIP {
   426  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   427  	}
   428  
   429  	// Filter peers by addr or pubkey with an ABCI query.
   430  	// If the query return code is OK, add peer.
   431  	if config.FilterPeers {
   432  		connFilters = append(
   433  			connFilters,
   434  			// ABCI query for address filtering.
   435  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   436  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   437  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   438  				})
   439  				if err != nil {
   440  					return err
   441  				}
   442  				if res.IsErr() {
   443  					return fmt.Errorf("error querying abci app: %v", res)
   444  				}
   445  
   446  				return nil
   447  			},
   448  		)
   449  
   450  		peerFilters = append(
   451  			peerFilters,
   452  			// ABCI query for ID filtering.
   453  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   454  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   455  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   456  				})
   457  				if err != nil {
   458  					return err
   459  				}
   460  				if res.IsErr() {
   461  					return fmt.Errorf("error querying abci app: %v", res)
   462  				}
   463  
   464  				return nil
   465  			},
   466  		)
   467  	}
   468  
   469  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   470  
   471  	// Limit the number of incoming connections.
   472  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   473  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   474  
   475  	return transport, peerFilters
   476  }
   477  
   478  func createSwitch(config *cfg.Config,
   479  	transport p2p.Transport,
   480  	p2pMetrics *p2p.Metrics,
   481  	peerFilters []p2p.PeerFilterFunc,
   482  	mempoolReactor *mempl.Reactor,
   483  	bcReactor p2p.Reactor,
   484  	stateSyncReactor *statesync.Reactor,
   485  	consensusReactor *cs.Reactor,
   486  	evidenceReactor *evidence.Reactor,
   487  	nodeInfo p2p.NodeInfo,
   488  	nodeKey *p2p.NodeKey,
   489  	p2pLogger log.Logger) *p2p.Switch {
   490  
   491  	sw := p2p.NewSwitch(
   492  		config.P2P,
   493  		transport,
   494  		p2p.WithMetrics(p2pMetrics),
   495  		p2p.SwitchPeerFilters(peerFilters...),
   496  	)
   497  	sw.SetLogger(p2pLogger)
   498  	sw.AddReactor("MEMPOOL", mempoolReactor)
   499  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   500  	sw.AddReactor("CONSENSUS", consensusReactor)
   501  	sw.AddReactor("EVIDENCE", evidenceReactor)
   502  	sw.AddReactor("STATESYNC", stateSyncReactor)
   503  
   504  	sw.SetNodeInfo(nodeInfo)
   505  	sw.SetNodeKey(nodeKey)
   506  
   507  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   508  	return sw
   509  }
   510  
   511  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   512  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   513  
   514  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   515  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   516  
   517  	// Add ourselves to addrbook to prevent dialing ourselves
   518  	if config.P2P.ExternalAddress != "" {
   519  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   520  		if err != nil {
   521  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   522  		}
   523  		addrBook.AddOurAddress(addr)
   524  	}
   525  	if config.P2P.ListenAddress != "" {
   526  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   527  		if err != nil {
   528  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   529  		}
   530  		addrBook.AddOurAddress(addr)
   531  	}
   532  
   533  	sw.SetAddrBook(addrBook)
   534  
   535  	return addrBook, nil
   536  }
   537  
   538  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   539  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   540  
   541  	// TODO persistent peers ? so we can have their DNS addrs saved
   542  	pexReactor := pex.NewReactor(addrBook,
   543  		&pex.ReactorConfig{
   544  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   545  			SeedMode: config.P2P.SeedMode,
   546  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   547  			// blocks assuming 10s blocks ~ 28 hours.
   548  			// TODO (melekes): make it dynamic based on the actual block latencies
   549  			// from the live network.
   550  			// https://github.com/tendermint/tendermint/issues/3523
   551  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   552  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   553  		})
   554  	pexReactor.SetLogger(logger.With("module", "pex"))
   555  	sw.AddReactor("PEX", pexReactor)
   556  	return pexReactor
   557  }
   558  
   559  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   560  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   561  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   562  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
   563  	ssR.Logger.Info("Starting state sync")
   564  
   565  	if stateProvider == nil {
   566  		var err error
   567  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   568  		defer cancel()
   569  		stateProvider, err = statesync.NewLightClientStateProvider(
   570  			ctx,
   571  			state.ChainID, state.Version, state.InitialHeight,
   572  			config.RPCServers, light.TrustOptions{
   573  				Period: config.TrustPeriod,
   574  				Height: config.TrustHeight,
   575  				Hash:   config.TrustHashBytes(),
   576  			}, ssR.Logger.With("module", "light"))
   577  		if err != nil {
   578  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   579  		}
   580  	}
   581  
   582  	go func() {
   583  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   584  		if err != nil {
   585  			ssR.Logger.Error("State sync failed", "err", err)
   586  			return
   587  		}
   588  		err = stateStore.Bootstrap(state)
   589  		if err != nil {
   590  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   591  			return
   592  		}
   593  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   594  		if err != nil {
   595  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   596  			return
   597  		}
   598  
   599  		if fastSync {
   600  			// FIXME Very ugly to have these metrics bleed through here.
   601  			conR.Metrics.StateSyncing.Set(0)
   602  			conR.Metrics.FastSyncing.Set(1)
   603  			err = bcR.SwitchToFastSync(state)
   604  			if err != nil {
   605  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   606  				return
   607  			}
   608  		} else {
   609  			conR.SwitchToConsensus(state, true)
   610  		}
   611  	}()
   612  	return nil
   613  }
   614  
   615  // NewNode returns a new, ready to go, Tendermint Node.
   616  func NewNode(config *cfg.Config,
   617  	privValidator types.PrivValidator,
   618  	nodeKey *p2p.NodeKey,
   619  	clientCreator proxy.ClientCreator,
   620  	genesisDocProvider GenesisDocProvider,
   621  	dbProvider DBProvider,
   622  	metricsProvider MetricsProvider,
   623  	logger log.Logger,
   624  	options ...Option) (*Node, error) {
   625  
   626  	blockStore, stateDB, err := initDBs(config, dbProvider)
   627  	if err != nil {
   628  		return nil, err
   629  	}
   630  
   631  	stateStore := sm.NewStore(stateDB)
   632  
   633  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   634  	if err != nil {
   635  		return nil, err
   636  	}
   637  
   638  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   639  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   640  	if err != nil {
   641  		return nil, err
   642  	}
   643  
   644  	// EventBus and IndexerService must be started before the handshake because
   645  	// we might need to index the txs of the replayed block as this might not have happened
   646  	// when the node stopped last time (i.e. the node stopped after it saved the block
   647  	// but before it indexed the txs, or, endblocker panicked)
   648  	eventBus, err := createAndStartEventBus(logger)
   649  	if err != nil {
   650  		return nil, err
   651  	}
   652  
   653  	// Transaction indexing
   654  	indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   655  	if err != nil {
   656  		return nil, err
   657  	}
   658  
   659  	// If an address is provided, listen on the socket for a connection from an
   660  	// external signing process.
   661  	if config.PrivValidatorListenAddr != "" {
   662  		// FIXME: we should start services inside OnStart
   663  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   664  		if err != nil {
   665  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   666  		}
   667  	}
   668  
   669  	pubKey, err := privValidator.GetPubKey()
   670  	if err != nil {
   671  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   672  	}
   673  
   674  	// Determine whether we should attempt state sync.
   675  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   676  	if stateSync && state.LastBlockHeight > 0 {
   677  		logger.Info("Found local state with non-zero height, skipping state sync")
   678  		stateSync = false
   679  	}
   680  
   681  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   682  	// and replays any blocks as necessary to sync tendermint with the app.
   683  	consensusLogger := logger.With("module", "consensus")
   684  	if !stateSync {
   685  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   686  			return nil, err
   687  		}
   688  
   689  		// Reload the state. It will have the Version.Consensus.App set by the
   690  		// Handshake, and may have other modifications as well (ie. depending on
   691  		// what happened during block replay).
   692  		state, err = stateStore.Load()
   693  		if err != nil {
   694  			return nil, fmt.Errorf("cannot load state: %w", err)
   695  		}
   696  	}
   697  
   698  	// Determine whether we should do fast sync. This must happen after the handshake, since the
   699  	// app may modify the validator set, specifying ourself as the only validator.
   700  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   701  
   702  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   703  
   704  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   705  
   706  	// Make MempoolReactor
   707  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   708  
   709  	// Make Evidence Reactor
   710  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   711  	if err != nil {
   712  		return nil, err
   713  	}
   714  
   715  	// make block executor for consensus and blockchain reactors to execute blocks
   716  	blockExec := sm.NewBlockExecutor(
   717  		stateStore,
   718  		logger.With("module", "state"),
   719  		proxyApp.Consensus(),
   720  		mempool,
   721  		evidencePool,
   722  		sm.BlockExecutorWithMetrics(smMetrics),
   723  	)
   724  
   725  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   726  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   727  	if err != nil {
   728  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   729  	}
   730  
   731  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   732  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   733  	if stateSync {
   734  		csMetrics.StateSyncing.Set(1)
   735  	} else if fastSync {
   736  		csMetrics.FastSyncing.Set(1)
   737  	}
   738  	consensusReactor, consensusState := createConsensusReactor(
   739  		config, state, blockExec, blockStore, mempool, evidencePool,
   740  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger,
   741  	)
   742  
   743  	// Set up state sync reactor, and schedule a sync if requested.
   744  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   745  	// we should clean this whole thing up. See:
   746  	// https://github.com/tendermint/tendermint/issues/4644
   747  	stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(),
   748  		config.StateSync.TempDir)
   749  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   750  
   751  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   752  	if err != nil {
   753  		return nil, err
   754  	}
   755  
   756  	// Setup Transport.
   757  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   758  
   759  	// Setup Switch.
   760  	p2pLogger := logger.With("module", "p2p")
   761  	sw := createSwitch(
   762  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   763  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   764  	)
   765  
   766  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   767  	if err != nil {
   768  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   769  	}
   770  
   771  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   772  	if err != nil {
   773  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   774  	}
   775  
   776  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   777  	if err != nil {
   778  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   779  	}
   780  
   781  	// Optionally, start the pex reactor
   782  	//
   783  	// TODO:
   784  	//
   785  	// We need to set Seeds and PersistentPeers on the switch,
   786  	// since it needs to be able to use these (and their DNS names)
   787  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   788  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   789  	// somewhere that we can return with net_info.
   790  	//
   791  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   792  	// Note we currently use the addrBook regardless at least for AddOurAddress
   793  	var pexReactor *pex.Reactor
   794  	if config.P2P.PexReactor {
   795  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   796  	}
   797  
   798  	if config.RPC.PprofListenAddress != "" {
   799  		go func() {
   800  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   801  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   802  		}()
   803  	}
   804  
   805  	node := &Node{
   806  		config:        config,
   807  		genesisDoc:    genDoc,
   808  		privValidator: privValidator,
   809  
   810  		transport: transport,
   811  		sw:        sw,
   812  		addrBook:  addrBook,
   813  		nodeInfo:  nodeInfo,
   814  		nodeKey:   nodeKey,
   815  
   816  		stateStore:       stateStore,
   817  		blockStore:       blockStore,
   818  		bcReactor:        bcReactor,
   819  		mempoolReactor:   mempoolReactor,
   820  		mempool:          mempool,
   821  		consensusState:   consensusState,
   822  		consensusReactor: consensusReactor,
   823  		stateSyncReactor: stateSyncReactor,
   824  		stateSync:        stateSync,
   825  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   826  		pexReactor:       pexReactor,
   827  		evidencePool:     evidencePool,
   828  		proxyApp:         proxyApp,
   829  		txIndexer:        txIndexer,
   830  		indexerService:   indexerService,
   831  		eventBus:         eventBus,
   832  	}
   833  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   834  
   835  	for _, option := range options {
   836  		option(node)
   837  	}
   838  
   839  	return node, nil
   840  }
   841  
   842  // OnStart starts the Node. It implements service.Service.
   843  func (n *Node) OnStart() error {
   844  	now := tmtime.Now()
   845  	genTime := n.genesisDoc.GenesisTime
   846  	if genTime.After(now) {
   847  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   848  		time.Sleep(genTime.Sub(now))
   849  	}
   850  
   851  	// Add private IDs to addrbook to block those peers being added
   852  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   853  
   854  	// Start the RPC server before the P2P server
   855  	// so we can eg. receive txs for the first block
   856  	if n.config.RPC.ListenAddress != "" {
   857  		listeners, err := n.startRPC()
   858  		if err != nil {
   859  			return err
   860  		}
   861  		n.rpcListeners = listeners
   862  	}
   863  
   864  	if n.config.Instrumentation.Prometheus &&
   865  		n.config.Instrumentation.PrometheusListenAddr != "" {
   866  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   867  	}
   868  
   869  	// Start the transport.
   870  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   871  	if err != nil {
   872  		return err
   873  	}
   874  	if err := n.transport.Listen(*addr); err != nil {
   875  		return err
   876  	}
   877  
   878  	n.isListening = true
   879  
   880  	if n.config.Mempool.WalEnabled() {
   881  		err = n.mempool.InitWAL()
   882  		if err != nil {
   883  			return fmt.Errorf("init mempool WAL: %w", err)
   884  		}
   885  	}
   886  
   887  	// Start the switch (the P2P server).
   888  	err = n.sw.Start()
   889  	if err != nil {
   890  		return err
   891  	}
   892  
   893  	// Always connect to persistent peers
   894  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   895  	if err != nil {
   896  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   897  	}
   898  
   899  	// Run state sync
   900  	if n.stateSync {
   901  		bcR, ok := n.bcReactor.(fastSyncReactor)
   902  		if !ok {
   903  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   904  		}
   905  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   906  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   907  		if err != nil {
   908  			return fmt.Errorf("failed to start state sync: %w", err)
   909  		}
   910  	}
   911  
   912  	return nil
   913  }
   914  
   915  // OnStop stops the Node. It implements service.Service.
   916  func (n *Node) OnStop() {
   917  	n.BaseService.OnStop()
   918  
   919  	n.Logger.Info("Stopping Node")
   920  
   921  	// first stop the non-reactor services
   922  	if err := n.eventBus.Stop(); err != nil {
   923  		n.Logger.Error("Error closing eventBus", "err", err)
   924  	}
   925  	if err := n.indexerService.Stop(); err != nil {
   926  		n.Logger.Error("Error closing indexerService", "err", err)
   927  	}
   928  
   929  	// now stop the reactors
   930  	if err := n.sw.Stop(); err != nil {
   931  		n.Logger.Error("Error closing switch", "err", err)
   932  	}
   933  
   934  	// stop mempool WAL
   935  	if n.config.Mempool.WalEnabled() {
   936  		n.mempool.CloseWAL()
   937  	}
   938  
   939  	if err := n.transport.Close(); err != nil {
   940  		n.Logger.Error("Error closing transport", "err", err)
   941  	}
   942  
   943  	n.isListening = false
   944  
   945  	// finally stop the listeners / external services
   946  	for _, l := range n.rpcListeners {
   947  		n.Logger.Info("Closing rpc listener", "listener", l)
   948  		if err := l.Close(); err != nil {
   949  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
   950  		}
   951  	}
   952  
   953  	if pvsc, ok := n.privValidator.(service.Service); ok {
   954  		if err := pvsc.Stop(); err != nil {
   955  			n.Logger.Error("Error closing private validator", "err", err)
   956  		}
   957  	}
   958  
   959  	if n.prometheusSrv != nil {
   960  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
   961  			// Error from closing listeners, or context timeout:
   962  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
   963  		}
   964  	}
   965  }
   966  
   967  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
   968  func (n *Node) ConfigureRPC() error {
   969  	pubKey, err := n.privValidator.GetPubKey()
   970  	if err != nil {
   971  		return fmt.Errorf("can't get pubkey: %w", err)
   972  	}
   973  	rpccore.SetEnvironment(&rpccore.Environment{
   974  		ProxyAppQuery:   n.proxyApp.Query(),
   975  		ProxyAppMempool: n.proxyApp.Mempool(),
   976  
   977  		StateStore:     n.stateStore,
   978  		BlockStore:     n.blockStore,
   979  		EvidencePool:   n.evidencePool,
   980  		ConsensusState: n.consensusState,
   981  		P2PPeers:       n.sw,
   982  		P2PTransport:   n,
   983  
   984  		PubKey:           pubKey,
   985  		GenDoc:           n.genesisDoc,
   986  		TxIndexer:        n.txIndexer,
   987  		ConsensusReactor: n.consensusReactor,
   988  		EventBus:         n.eventBus,
   989  		Mempool:          n.mempool,
   990  
   991  		Logger: n.Logger.With("module", "rpc"),
   992  
   993  		Config: *n.config.RPC,
   994  	})
   995  	return nil
   996  }
   997  
   998  func (n *Node) startRPC() ([]net.Listener, error) {
   999  	err := n.ConfigureRPC()
  1000  	if err != nil {
  1001  		return nil, err
  1002  	}
  1003  
  1004  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1005  
  1006  	if n.config.RPC.Unsafe {
  1007  		rpccore.AddUnsafeRoutes()
  1008  	}
  1009  
  1010  	config := rpcserver.DefaultConfig()
  1011  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1012  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1013  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1014  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1015  	// TimeoutBroadcastTxCommit.
  1016  	// See https://github.com/tendermint/tendermint/issues/3435
  1017  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1018  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1019  	}
  1020  
  1021  	// we may expose the rpc over both a unix and tcp socket
  1022  	listeners := make([]net.Listener, len(listenAddrs))
  1023  	for i, listenAddr := range listenAddrs {
  1024  		mux := http.NewServeMux()
  1025  		rpcLogger := n.Logger.With("module", "rpc-server")
  1026  		wmLogger := rpcLogger.With("protocol", "websocket")
  1027  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1028  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1029  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1030  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1031  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1032  				}
  1033  			}),
  1034  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1035  		)
  1036  		wm.SetLogger(wmLogger)
  1037  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1038  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1039  		listener, err := rpcserver.Listen(
  1040  			listenAddr,
  1041  			config,
  1042  		)
  1043  		if err != nil {
  1044  			return nil, err
  1045  		}
  1046  
  1047  		var rootHandler http.Handler = mux
  1048  		if n.config.RPC.IsCorsEnabled() {
  1049  			corsMiddleware := cors.New(cors.Options{
  1050  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1051  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1052  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1053  			})
  1054  			rootHandler = corsMiddleware.Handler(mux)
  1055  		}
  1056  		if n.config.RPC.IsTLSEnabled() {
  1057  			go func() {
  1058  				if err := rpcserver.ServeTLS(
  1059  					listener,
  1060  					rootHandler,
  1061  					n.config.RPC.CertFile(),
  1062  					n.config.RPC.KeyFile(),
  1063  					rpcLogger,
  1064  					config,
  1065  				); err != nil {
  1066  					n.Logger.Error("Error serving server with TLS", "err", err)
  1067  				}
  1068  			}()
  1069  		} else {
  1070  			go func() {
  1071  				if err := rpcserver.Serve(
  1072  					listener,
  1073  					rootHandler,
  1074  					rpcLogger,
  1075  					config,
  1076  				); err != nil {
  1077  					n.Logger.Error("Error serving server", "err", err)
  1078  				}
  1079  			}()
  1080  		}
  1081  
  1082  		listeners[i] = listener
  1083  	}
  1084  
  1085  	// we expose a simplified api over grpc for convenience to app devs
  1086  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1087  	if grpcListenAddr != "" {
  1088  		config := rpcserver.DefaultConfig()
  1089  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1090  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1091  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1092  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1093  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1094  		// TimeoutBroadcastTxCommit.
  1095  		// See https://github.com/tendermint/tendermint/issues/3435
  1096  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1097  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1098  		}
  1099  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1100  		if err != nil {
  1101  			return nil, err
  1102  		}
  1103  		go func() {
  1104  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1105  				n.Logger.Error("Error starting gRPC server", "err", err)
  1106  			}
  1107  		}()
  1108  		listeners = append(listeners, listener)
  1109  
  1110  	}
  1111  
  1112  	return listeners, nil
  1113  
  1114  }
  1115  
  1116  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1117  // collectors on addr.
  1118  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1119  	srv := &http.Server{
  1120  		Addr: addr,
  1121  		Handler: promhttp.InstrumentMetricHandler(
  1122  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1123  				prometheus.DefaultGatherer,
  1124  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1125  			),
  1126  		),
  1127  	}
  1128  	go func() {
  1129  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1130  			// Error starting or closing listener:
  1131  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1132  		}
  1133  	}()
  1134  	return srv
  1135  }
  1136  
  1137  // Switch returns the Node's Switch.
  1138  func (n *Node) Switch() *p2p.Switch {
  1139  	return n.sw
  1140  }
  1141  
  1142  // BlockStore returns the Node's BlockStore.
  1143  func (n *Node) BlockStore() *store.BlockStore {
  1144  	return n.blockStore
  1145  }
  1146  
  1147  // ConsensusState returns the Node's ConsensusState.
  1148  func (n *Node) ConsensusState() *cs.State {
  1149  	return n.consensusState
  1150  }
  1151  
  1152  // ConsensusReactor returns the Node's ConsensusReactor.
  1153  func (n *Node) ConsensusReactor() *cs.Reactor {
  1154  	return n.consensusReactor
  1155  }
  1156  
  1157  // MempoolReactor returns the Node's mempool reactor.
  1158  func (n *Node) MempoolReactor() *mempl.Reactor {
  1159  	return n.mempoolReactor
  1160  }
  1161  
  1162  // Mempool returns the Node's mempool.
  1163  func (n *Node) Mempool() mempl.Mempool {
  1164  	return n.mempool
  1165  }
  1166  
  1167  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1168  func (n *Node) PEXReactor() *pex.Reactor {
  1169  	return n.pexReactor
  1170  }
  1171  
  1172  // EvidencePool returns the Node's EvidencePool.
  1173  func (n *Node) EvidencePool() *evidence.Pool {
  1174  	return n.evidencePool
  1175  }
  1176  
  1177  // EventBus returns the Node's EventBus.
  1178  func (n *Node) EventBus() *types.EventBus {
  1179  	return n.eventBus
  1180  }
  1181  
  1182  // PrivValidator returns the Node's PrivValidator.
  1183  // XXX: for convenience only!
  1184  func (n *Node) PrivValidator() types.PrivValidator {
  1185  	return n.privValidator
  1186  }
  1187  
  1188  // GenesisDoc returns the Node's GenesisDoc.
  1189  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1190  	return n.genesisDoc
  1191  }
  1192  
  1193  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1194  func (n *Node) ProxyApp() proxy.AppConns {
  1195  	return n.proxyApp
  1196  }
  1197  
  1198  // Config returns the Node's config.
  1199  func (n *Node) Config() *cfg.Config {
  1200  	return n.config
  1201  }
  1202  
  1203  //------------------------------------------------------------------------------
  1204  
  1205  func (n *Node) Listeners() []string {
  1206  	return []string{
  1207  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1208  	}
  1209  }
  1210  
  1211  func (n *Node) IsListening() bool {
  1212  	return n.isListening
  1213  }
  1214  
  1215  // NodeInfo returns the Node's Info from the Switch.
  1216  func (n *Node) NodeInfo() p2p.NodeInfo {
  1217  	return n.nodeInfo
  1218  }
  1219  
  1220  func makeNodeInfo(
  1221  	config *cfg.Config,
  1222  	nodeKey *p2p.NodeKey,
  1223  	txIndexer txindex.TxIndexer,
  1224  	genDoc *types.GenesisDoc,
  1225  	state sm.State,
  1226  ) (p2p.NodeInfo, error) {
  1227  	txIndexerStatus := "on"
  1228  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1229  		txIndexerStatus = "off"
  1230  	}
  1231  
  1232  	var bcChannel byte
  1233  	switch config.FastSync.Version {
  1234  	case "v0":
  1235  		bcChannel = bcv0.BlockchainChannel
  1236  	case "v1":
  1237  		bcChannel = bcv1.BlockchainChannel
  1238  	case "v2":
  1239  		bcChannel = bcv2.BlockchainChannel
  1240  	default:
  1241  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1242  	}
  1243  
  1244  	nodeInfo := p2p.DefaultNodeInfo{
  1245  		ProtocolVersion: p2p.NewProtocolVersion(
  1246  			version.P2PProtocol, // global
  1247  			state.Version.Consensus.Block,
  1248  			state.Version.Consensus.App,
  1249  		),
  1250  		DefaultNodeID: nodeKey.ID(),
  1251  		Network:       genDoc.ChainID,
  1252  		Version:       version.TMCoreSemVer,
  1253  		Channels: []byte{
  1254  			bcChannel,
  1255  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1256  			mempl.MempoolChannel,
  1257  			evidence.EvidenceChannel,
  1258  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1259  		},
  1260  		Moniker: config.Moniker,
  1261  		Other: p2p.DefaultNodeInfoOther{
  1262  			TxIndex:    txIndexerStatus,
  1263  			RPCAddress: config.RPC.ListenAddress,
  1264  		},
  1265  	}
  1266  
  1267  	if config.P2P.PexReactor {
  1268  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1269  	}
  1270  
  1271  	lAddr := config.P2P.ExternalAddress
  1272  
  1273  	if lAddr == "" {
  1274  		lAddr = config.P2P.ListenAddress
  1275  	}
  1276  
  1277  	nodeInfo.ListenAddr = lAddr
  1278  
  1279  	err := nodeInfo.Validate()
  1280  	return nodeInfo, err
  1281  }
  1282  
  1283  //------------------------------------------------------------------------------
  1284  
  1285  var (
  1286  	genesisDocKey = []byte("genesisDoc")
  1287  )
  1288  
  1289  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1290  // database, or creates one using the given genesisDocProvider. On success this also
  1291  // returns the genesis doc loaded through the given provider.
  1292  func LoadStateFromDBOrGenesisDocProvider(
  1293  	stateDB dbm.DB,
  1294  	genesisDocProvider GenesisDocProvider,
  1295  ) (sm.State, *types.GenesisDoc, error) {
  1296  	// Get genesis doc
  1297  	genDoc, err := loadGenesisDoc(stateDB)
  1298  	if err != nil {
  1299  		genDoc, err = genesisDocProvider()
  1300  		if err != nil {
  1301  			return sm.State{}, nil, err
  1302  		}
  1303  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1304  		// was changed, accidentally or not). Also good for audit trail.
  1305  		if err := saveGenesisDoc(stateDB, genDoc); err != nil {
  1306  			return sm.State{}, nil, err
  1307  		}
  1308  	}
  1309  	stateStore := sm.NewStore(stateDB)
  1310  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1311  	if err != nil {
  1312  		return sm.State{}, nil, err
  1313  	}
  1314  	return state, genDoc, nil
  1315  }
  1316  
  1317  // panics if failed to unmarshal bytes
  1318  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1319  	b, err := db.Get(genesisDocKey)
  1320  	if err != nil {
  1321  		panic(err)
  1322  	}
  1323  	if len(b) == 0 {
  1324  		return nil, errors.New("genesis doc not found")
  1325  	}
  1326  	var genDoc *types.GenesisDoc
  1327  	err = tmjson.Unmarshal(b, &genDoc)
  1328  	if err != nil {
  1329  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1330  	}
  1331  	return genDoc, nil
  1332  }
  1333  
  1334  // panics if failed to marshal the given genesis document
  1335  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
  1336  	b, err := tmjson.Marshal(genDoc)
  1337  	if err != nil {
  1338  		return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
  1339  	}
  1340  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1341  		return err
  1342  	}
  1343  
  1344  	return nil
  1345  }
  1346  
  1347  func createAndStartPrivValidatorSocketClient(
  1348  	listenAddr,
  1349  	chainID string,
  1350  	logger log.Logger,
  1351  ) (types.PrivValidator, error) {
  1352  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1353  	if err != nil {
  1354  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1355  	}
  1356  
  1357  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1358  	if err != nil {
  1359  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1360  	}
  1361  
  1362  	// try to get a pubkey from private validate first time
  1363  	_, err = pvsc.GetPubKey()
  1364  	if err != nil {
  1365  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1366  	}
  1367  
  1368  	const (
  1369  		retries = 50 // 50 * 100ms = 5s total
  1370  		timeout = 100 * time.Millisecond
  1371  	)
  1372  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1373  
  1374  	return pvscWithRetries, nil
  1375  }
  1376  
  1377  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1378  // slice of the string s with all leading and trailing Unicode code points
  1379  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1380  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1381  // -1.  also filter out empty strings, only return non-empty strings.
  1382  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1383  	if s == "" {
  1384  		return []string{}
  1385  	}
  1386  
  1387  	spl := strings.Split(s, sep)
  1388  	nonEmptyStrings := make([]string, 0, len(spl))
  1389  	for i := 0; i < len(spl); i++ {
  1390  		element := strings.Trim(spl[i], cutset)
  1391  		if element != "" {
  1392  			nonEmptyStrings = append(nonEmptyStrings, element)
  1393  		}
  1394  	}
  1395  	return nonEmptyStrings
  1396  }