github.com/vipernet-xyz/tendermint-core@v0.32.0/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"net"
     8  	"net/http"
     9  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/pkg/errors"
    14  	"github.com/prometheus/client_golang/prometheus"
    15  	"github.com/prometheus/client_golang/prometheus/promhttp"
    16  	"github.com/rs/cors"
    17  
    18  	"github.com/tendermint/go-amino"
    19  	dbm "github.com/tendermint/tm-db"
    20  
    21  	abci "github.com/tendermint/tendermint/abci/types"
    22  	bcv0 "github.com/tendermint/tendermint/blockchain/v0"
    23  	bcv1 "github.com/tendermint/tendermint/blockchain/v1"
    24  	bcv2 "github.com/tendermint/tendermint/blockchain/v2"
    25  	cfg "github.com/tendermint/tendermint/config"
    26  	"github.com/tendermint/tendermint/consensus"
    27  	cs "github.com/tendermint/tendermint/consensus"
    28  	"github.com/tendermint/tendermint/crypto"
    29  	"github.com/tendermint/tendermint/evidence"
    30  	"github.com/tendermint/tendermint/libs/log"
    31  	tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
    32  	"github.com/tendermint/tendermint/libs/service"
    33  	mempl "github.com/tendermint/tendermint/mempool"
    34  	"github.com/tendermint/tendermint/p2p"
    35  	"github.com/tendermint/tendermint/p2p/pex"
    36  	"github.com/tendermint/tendermint/privval"
    37  	"github.com/tendermint/tendermint/proxy"
    38  	rpccore "github.com/tendermint/tendermint/rpc/core"
    39  	ctypes "github.com/tendermint/tendermint/rpc/core/types"
    40  	grpccore "github.com/tendermint/tendermint/rpc/grpc"
    41  	rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
    42  	sm "github.com/tendermint/tendermint/state"
    43  	"github.com/tendermint/tendermint/state/txindex"
    44  	"github.com/tendermint/tendermint/state/txindex/kv"
    45  	"github.com/tendermint/tendermint/state/txindex/null"
    46  	"github.com/tendermint/tendermint/store"
    47  	"github.com/tendermint/tendermint/types"
    48  	tmtime "github.com/tendermint/tendermint/types/time"
    49  	"github.com/tendermint/tendermint/version"
    50  )
    51  
    52  //------------------------------------------------------------------------------
    53  
    54  // DBContext specifies config information for loading a new DB.
    55  type DBContext struct {
    56  	ID     string
    57  	Config *cfg.Config
    58  }
    59  
    60  // DBProvider takes a DBContext and returns an instantiated DB.
    61  type DBProvider func(*DBContext) (dbm.DB, error)
    62  
    63  // DefaultDBProvider returns a database using the DBBackend and DBDir
    64  // specified in the ctx.Config.
    65  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    66  	if ctx.Config.DBBackend == string(dbm.MemDBBackend) {
    67  		dbType := dbm.BackendType(ctx.Config.DBBackend)
    68  		return dbm.NewDB(ctx.ID, dbType, ctx.Config.RootDir), nil
    69  	}
    70  	return dbm.NewGoLevelDBWithOpts(ctx.ID, ctx.Config.DBDir(), ctx.Config.LevelDBOptions.ToGoLevelDBOpts())
    71  }
    72  
    73  // GenesisDocProvider returns a GenesisDoc.
    74  // It allows the GenesisDoc to be pulled from sources other than the
    75  // filesystem, for instance from a distributed key-value store cluster.
    76  type GenesisDocProvider func() (*types.GenesisDoc, error)
    77  
    78  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    79  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    80  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    81  	return func() (*types.GenesisDoc, error) {
    82  		return types.GenesisDocFromFile(config.GenesisFile())
    83  	}
    84  }
    85  
    86  // Provider takes a config and a logger and returns a ready to go Node.
    87  type Provider func(*cfg.Config, log.Logger) (*Node, error)
    88  
    89  // DefaultNewNode returns a Tendermint node with default settings for the
    90  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    91  // It implements NodeProvider.
    92  func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
    93  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
    94  	if err != nil {
    95  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
    96  	}
    97  
    98  	return NewNode(nil, config, 0,
    99  		privval.LoadOrGenFilePVLean(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   100  		nodeKey,
   101  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   102  		nil,
   103  		DefaultGenesisDocProviderFunc(config),
   104  		DefaultDBProvider,
   105  		DefaultMetricsProvider(config.Instrumentation),
   106  		logger,
   107  	)
   108  }
   109  
   110  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   111  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   112  
   113  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   114  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   115  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   116  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   117  		if config.Prometheus {
   118  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   119  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   120  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   121  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   122  		}
   123  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   124  	}
   125  }
   126  
   127  // Option sets a parameter for the node.
   128  type Option func(*Node)
   129  
   130  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   131  // the node's Switch.
   132  //
   133  // WARNING: using any name from the below list of the existing reactors will
   134  // result in replacing it with the custom one.
   135  //
   136  //  - MEMPOOL
   137  //  - BLOCKCHAIN
   138  //  - CONSENSUS
   139  //  - EVIDENCE
   140  //  - PEX
   141  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   142  	return func(n *Node) {
   143  		for name, reactor := range reactors {
   144  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   145  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   146  					"name", name, "existing", existingReactor, "custom", reactor)
   147  				n.sw.RemoveReactor(name, existingReactor)
   148  			}
   149  			n.sw.AddReactor(name, reactor)
   150  		}
   151  	}
   152  }
   153  
   154  //------------------------------------------------------------------------------
   155  
   156  // Node is the highest level interface to a full Tendermint node.
   157  // It includes all configuration information and running services.
   158  type Node struct {
   159  	service.BaseService
   160  
   161  	// config
   162  	config        *cfg.Config
   163  	genesisDoc    *types.GenesisDoc    // initial validator set
   164  	privValidator types.PrivValidators // local node's validator key
   165  
   166  	// network
   167  	transport   *p2p.MultiplexTransport
   168  	sw          *p2p.Switch  // p2p connections
   169  	addrBook    pex.AddrBook // known peers
   170  	nodeInfo    p2p.NodeInfo
   171  	nodeKey     *p2p.NodeKey // our node privkey
   172  	isListening bool
   173  
   174  	// services
   175  	eventBus         *types.EventBus // pub/sub for services
   176  	stateDB          dbm.DB
   177  	blockStore       *store.BlockStore // store the blockchain to disk
   178  	bcReactor        p2p.Reactor       // for fast-syncing
   179  	mempoolReactor   *mempl.Reactor    // for gossipping transactions
   180  	mempool          mempl.Mempool
   181  	consensusState   *cs.State      // latest consensus state
   182  	consensusReactor *cs.Reactor    // for participating in the consensus
   183  	pexReactor       *pex.Reactor   // for exchanging peer addresses
   184  	evidencePool     *evidence.Pool // tracking evidence
   185  	proxyApp         proxy.AppConns // connection to the application
   186  	rpcListeners     []net.Listener // rpc servers
   187  	txIndexer        txindex.TxIndexer
   188  	indexerService   *txindex.IndexerService
   189  	prometheusSrv    *http.Server
   190  }
   191  
   192  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   193  	var blockStoreDB dbm.DB
   194  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   195  	if err != nil {
   196  		return
   197  	}
   198  	blockStore = store.NewBlockStore(blockStoreDB)
   199  
   200  	stateDB, err = dbProvider(&DBContext{"state", config})
   201  	if err != nil {
   202  		return
   203  	}
   204  
   205  	return
   206  }
   207  
   208  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   209  	proxyApp := proxy.NewAppConns(clientCreator)
   210  	proxyApp.SetLogger(logger.With("module", "proxy"))
   211  	if err := proxyApp.Start(); err != nil {
   212  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   213  	}
   214  	return proxyApp, nil
   215  }
   216  
   217  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   218  	eventBus := types.NewEventBus()
   219  	eventBus.SetLogger(logger.With("module", "events"))
   220  	if err := eventBus.Start(); err != nil {
   221  		return nil, err
   222  	}
   223  	return eventBus, nil
   224  }
   225  
   226  func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
   227  	eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) {
   228  
   229  	var txIndexer txindex.TxIndexer
   230  	switch config.TxIndex.Indexer {
   231  	case "kv":
   232  		store, err := dbProvider(&DBContext{"tx_index", config})
   233  		if err != nil {
   234  			return nil, nil, err
   235  		}
   236  		switch {
   237  		case config.TxIndex.IndexKeys != "":
   238  			txIndexer = kv.NewTxIndex(store, kv.IndexEvents(splitAndTrimEmpty(config.TxIndex.IndexKeys, ",", " ")))
   239  		case config.TxIndex.IndexAllKeys:
   240  			txIndexer = kv.NewTxIndex(store, kv.IndexAllEvents())
   241  		default:
   242  			txIndexer = kv.NewTxIndex(store)
   243  		}
   244  	default:
   245  		txIndexer = &null.TxIndex{}
   246  	}
   247  
   248  	indexerService := txindex.NewIndexerService(txIndexer, eventBus)
   249  	indexerService.SetLogger(logger.With("module", "txindex"))
   250  	if err := indexerService.Start(); err != nil {
   251  		return nil, nil, err
   252  	}
   253  	return indexerService, txIndexer, nil
   254  }
   255  
   256  func startCustomIndexerService(txIndexer txindex.TxIndexer, eventBus *types.EventBus,
   257  	logger log.Logger) (*txindex.IndexerService, error) {
   258  	indexerService := txindex.NewIndexerService(txIndexer, eventBus)
   259  	indexerService.SetLogger(logger.With("module", "txindex"))
   260  	if err := indexerService.Start(); err != nil {
   261  		return nil, err
   262  	}
   263  	return indexerService, nil
   264  }
   265  
   266  func doHandshake(
   267  	stateDB dbm.DB,
   268  	state sm.State,
   269  	blockStore sm.BlockStore,
   270  	genDoc *types.GenesisDoc,
   271  	eventBus types.BlockEventPublisher,
   272  	proxyApp proxy.AppConns,
   273  	consensusLogger log.Logger, indexer txindex.TxIndexer) error {
   274  
   275  	handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
   276  	handshaker.SetLogger(consensusLogger)
   277  	handshaker.SetEventBus(eventBus)
   278  	if err := handshaker.Handshake(proxyApp, indexer); err != nil {
   279  		return fmt.Errorf("error during handshake: %v", err)
   280  	}
   281  	return nil
   282  }
   283  
   284  func logNodeStartupInfo(state sm.State, pubKeys []crypto.PubKey, logger, consensusLogger log.Logger) {
   285  	// Log the version info.
   286  	logger.Info("Version info",
   287  		"software", version.TMCoreSemVer,
   288  		"block", version.BlockProtocol,
   289  		"p2p", version.P2PProtocol,
   290  	)
   291  
   292  	// If the state and software differ in block version, at least log it.
   293  	if state.Version.Consensus.Block != version.BlockProtocol {
   294  		logger.Info("Software and state have different block protocols",
   295  			"software", version.BlockProtocol,
   296  			"state", state.Version.Consensus.Block,
   297  		)
   298  	}
   299  
   300  	for _, pubKey := range pubKeys {
   301  		addr := pubKey.Address()
   302  		// Log whether this node is a validator or an observer
   303  		if state.Validators.HasAddress(addr) {
   304  			consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   305  		} else {
   306  			consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   307  		}
   308  	}
   309  }
   310  
   311  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   312  	if state.Validators.Size() > 1 {
   313  		return false
   314  	}
   315  	addr, _ := state.Validators.GetByIndex(0)
   316  	return bytes.Equal(pubKey.Address(), addr)
   317  }
   318  
   319  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   320  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   321  
   322  	mempool := mempl.NewCListMempool(
   323  		config.Mempool,
   324  		proxyApp.Mempool(),
   325  		state.LastBlockHeight,
   326  		mempl.WithMetrics(memplMetrics),
   327  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   328  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   329  	)
   330  	mempoolLogger := logger.With("module", "mempool")
   331  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   332  	mempoolReactor.SetLogger(mempoolLogger)
   333  
   334  	if config.Consensus.WaitForTxs() {
   335  		mempool.EnableTxsAvailable()
   336  	}
   337  	return mempoolReactor, mempool
   338  }
   339  
   340  func CreateEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   341  	stateDB dbm.DB, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   342  
   343  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   344  	if err != nil {
   345  		return nil, nil, err
   346  	}
   347  	evidenceLogger := logger.With("module", "evidence")
   348  	evidencePool := evidence.NewPool(stateDB, evidenceDB)
   349  	evidencePool.SetLogger(evidenceLogger)
   350  	evidenceReactor := evidence.NewReactor(evidencePool)
   351  	evidenceReactor.SetLogger(evidenceLogger)
   352  	return evidenceReactor, evidencePool, nil
   353  }
   354  
   355  func createBlockchainReactor(config *cfg.Config,
   356  	state sm.State,
   357  	blockExec *sm.BlockExecutor,
   358  	blockStore *store.BlockStore,
   359  	fastSync bool,
   360  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   361  
   362  	switch config.FastSync.Version {
   363  	case "v0":
   364  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   365  	case "v1":
   366  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   367  	case "v2":
   368  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   369  	default:
   370  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   371  	}
   372  
   373  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   374  	return bcReactor, nil
   375  }
   376  
   377  func createConsensusReactor(config *cfg.Config,
   378  	state sm.State,
   379  	blockExec *sm.BlockExecutor,
   380  	blockStore sm.BlockStore,
   381  	upgradeHeight int64,
   382  	mempool *mempl.CListMempool,
   383  	evidencePool *evidence.Pool,
   384  	privValidator types.PrivValidators,
   385  	csMetrics *cs.Metrics,
   386  	fastSync bool,
   387  	eventBus *types.EventBus,
   388  	consensusLogger log.Logger) (*consensus.Reactor, *consensus.State) {
   389  
   390  	consensusState := cs.NewState(
   391  		config.Consensus,
   392  		state.Copy(),
   393  		upgradeHeight,
   394  		blockExec,
   395  		blockStore,
   396  		mempool,
   397  		evidencePool,
   398  		cs.StateMetrics(csMetrics),
   399  	)
   400  	consensusState.SetLogger(consensusLogger)
   401  	if privValidator != nil {
   402  		consensusState.SetPrivValidators(privValidator)
   403  	}
   404  	consensusReactor := cs.NewReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
   405  	consensusReactor.SetLogger(consensusLogger)
   406  	// services which will be publishing and/or subscribing for messages (events)
   407  	// consensusReactor will set it on consensusState and blockExecutor
   408  	consensusReactor.SetEventBus(eventBus)
   409  	return consensusReactor, consensusState
   410  }
   411  
   412  func createTransport(
   413  	config *cfg.Config,
   414  	nodeInfo p2p.NodeInfo,
   415  	nodeKey *p2p.NodeKey,
   416  	proxyApp proxy.AppConns,
   417  ) (
   418  	*p2p.MultiplexTransport,
   419  	[]p2p.PeerFilterFunc,
   420  ) {
   421  	var (
   422  		mConnConfig = p2p.MConnConfig(config.P2P)
   423  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   424  		connFilters = []p2p.ConnFilterFunc{}
   425  		peerFilters = []p2p.PeerFilterFunc{}
   426  	)
   427  
   428  	if !config.P2P.AllowDuplicateIP {
   429  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   430  	}
   431  
   432  	// Filter peers by addr or pubkey with an ABCI query.
   433  	// If the query return code is OK, add peer.
   434  	if config.FilterPeers {
   435  		connFilters = append(
   436  			connFilters,
   437  			// ABCI query for address filtering.
   438  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   439  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   440  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   441  				})
   442  				if err != nil {
   443  					return err
   444  				}
   445  				if res.IsErr() {
   446  					return fmt.Errorf("error querying abci app: %v", res)
   447  				}
   448  
   449  				return nil
   450  			},
   451  		)
   452  
   453  		peerFilters = append(
   454  			peerFilters,
   455  			// ABCI query for ID filtering.
   456  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   457  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   458  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   459  				})
   460  				if err != nil {
   461  					return err
   462  				}
   463  				if res.IsErr() {
   464  					return fmt.Errorf("error querying abci app: %v", res)
   465  				}
   466  
   467  				return nil
   468  			},
   469  		)
   470  	}
   471  
   472  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   473  
   474  	// Limit the number of incoming connections.
   475  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   476  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   477  
   478  	return transport, peerFilters
   479  }
   480  
   481  func createSwitch(config *cfg.Config,
   482  	transport p2p.Transport,
   483  	p2pMetrics *p2p.Metrics,
   484  	peerFilters []p2p.PeerFilterFunc,
   485  	mempoolReactor *mempl.Reactor,
   486  	bcReactor p2p.Reactor,
   487  	consensusReactor *consensus.Reactor,
   488  	evidenceReactor *evidence.Reactor,
   489  	nodeInfo p2p.NodeInfo,
   490  	nodeKey *p2p.NodeKey,
   491  	p2pLogger log.Logger) *p2p.Switch {
   492  
   493  	sw := p2p.NewSwitch(
   494  		config.P2P,
   495  		transport,
   496  		p2p.WithMetrics(p2pMetrics),
   497  		p2p.SwitchPeerFilters(peerFilters...),
   498  	)
   499  	sw.SetLogger(p2pLogger)
   500  	sw.AddReactor("MEMPOOL", mempoolReactor)
   501  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   502  	sw.AddReactor("CONSENSUS", consensusReactor)
   503  	sw.AddReactor("EVIDENCE", evidenceReactor)
   504  
   505  	sw.SetNodeInfo(nodeInfo)
   506  	sw.SetNodeKey(nodeKey)
   507  
   508  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   509  	return sw
   510  }
   511  
   512  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   513  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   514  
   515  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   516  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   517  
   518  	// Add ourselves to addrbook to prevent dialing ourselves
   519  	if config.P2P.ExternalAddress != "" {
   520  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   521  		if err != nil {
   522  			return nil, errors.Wrap(err, "p2p.external_address is incorrect")
   523  		}
   524  		addrBook.AddOurAddress(addr)
   525  	}
   526  	if config.P2P.ListenAddress != "" {
   527  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   528  		if err != nil {
   529  			return nil, errors.Wrap(err, "p2p.laddr is incorrect")
   530  		}
   531  		addrBook.AddOurAddress(addr)
   532  	}
   533  
   534  	sw.SetAddrBook(addrBook)
   535  
   536  	return addrBook, nil
   537  }
   538  
   539  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   540  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   541  
   542  	// TODO persistent peers ? so we can have their DNS addrs saved
   543  	pexReactor := pex.NewReactor(addrBook,
   544  		&pex.ReactorConfig{
   545  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   546  			SeedMode: config.P2P.SeedMode,
   547  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   548  			// blocks assuming 10s blocks ~ 28 hours.
   549  			// TODO (melekes): make it dynamic based on the actual block latencies
   550  			// from the live network.
   551  			// https://github.com/tendermint/tendermint/issues/3523
   552  			SeedDisconnectWaitPeriod:     1 * time.Hour,
   553  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   554  		})
   555  	pexReactor.SetLogger(logger.With("module", "pex"))
   556  	sw.AddReactor("PEX", pexReactor)
   557  	return pexReactor
   558  }
   559  
   560  // App is the interface of the baseApp that will be passed to tendermint
   561  type BaseApp interface {
   562  	SetTxIndexer(txIndexer txindex.TxIndexer)
   563  	SetBlockstore(bs *store.BlockStore)
   564  	SetTendermintNode(n *Node)
   565  }
   566  
   567  // NewNode returns a new, ready to go, Tendermint Node.
   568  func NewNode(baseApp BaseApp, config *cfg.Config,
   569  	upgradeHeight int64,
   570  	privValidator types.PrivValidators,
   571  	nodeKey *p2p.NodeKey,
   572  	clientCreator proxy.ClientCreator,
   573  	txIndexer txindex.TxIndexer,
   574  	genesisDocProvider GenesisDocProvider,
   575  	dbProvider DBProvider,
   576  	metricsProvider MetricsProvider,
   577  	logger log.Logger,
   578  	options ...Option) (*Node, error) {
   579  
   580  	blockStore, stateDB, err := initDBs(config, dbProvider)
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  
   590  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   591  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   592  	if err != nil {
   593  		return nil, err
   594  	}
   595  
   596  	// EventBus and IndexerService must be started before the handshake because
   597  	// we might need to index the txs of the replayed block as this might not have happened
   598  	// when the node stopped last time (i.e. the node stopped after it saved the block
   599  	// but before it indexed the txs, or, endblocker panicked)
   600  	eventBus, err := createAndStartEventBus(logger)
   601  	if err != nil {
   602  		return nil, err
   603  	}
   604  	var indexerService *txindex.IndexerService
   605  	// Transaction indexing
   606  	if txIndexer != nil {
   607  		indexerService, err = startCustomIndexerService(txIndexer, eventBus, logger)
   608  	} else {
   609  		indexerService, txIndexer, err = createAndStartIndexerService(config, dbProvider, eventBus, logger)
   610  	}
   611  	if err != nil {
   612  		return nil, err
   613  	}
   614  	if baseApp != nil {
   615  		baseApp.SetTxIndexer(txIndexer)
   616  		baseApp.SetBlockstore(blockStore)
   617  	}
   618  
   619  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   620  	// and replays any blocks as necessary to sync tendermint with the app.
   621  	consensusLogger := logger.With("module", "consensus")
   622  	if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger, txIndexer); err != nil {
   623  		return nil, err
   624  	}
   625  
   626  	// Reload the state. It will have the Version.Consensus.App set by the
   627  	// Handshake, and may have other modifications as well (ie. depending on
   628  	// what happened during block replay).
   629  	state = sm.LoadState(stateDB)
   630  
   631  	pubKeys, err := privValidator.GetPubKeys()
   632  	if err != nil {
   633  		return nil, errors.Wrap(err, "can't get pubkey")
   634  	}
   635  
   636  	logNodeStartupInfo(state, pubKeys, logger, consensusLogger)
   637  
   638  	// Decide whether to fast-sync or not
   639  	// We don't fast-sync when the only validator is us.
   640  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKeys[0])
   641  
   642  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   643  
   644  	// Make MempoolReactor
   645  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   646  
   647  	// Make Evidence Reactor
   648  	evidenceReactor, evidencePool, err := CreateEvidenceReactor(config, dbProvider, stateDB, logger)
   649  	if err != nil {
   650  		return nil, err
   651  	}
   652  
   653  	// make block executor for consensus and blockchain reactors to execute blocks
   654  	blockExec := sm.NewBlockExecutor(
   655  		stateDB,
   656  		logger.With("module", "state"),
   657  		proxyApp.Consensus(),
   658  		mempool,
   659  		evidencePool,
   660  		txIndexer,
   661  		sm.BlockExecutorWithMetrics(smMetrics),
   662  	)
   663  
   664  	// Make BlockchainReactor
   665  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync, logger)
   666  	if err != nil {
   667  		return nil, errors.Wrap(err, "could not create blockchain reactor")
   668  	}
   669  
   670  	// Make ConsensusReactor
   671  	consensusReactor, consensusState := createConsensusReactor(
   672  		config, state, blockExec, blockStore, upgradeHeight, mempool, evidencePool,
   673  		privValidator, csMetrics, fastSync, eventBus, consensusLogger,
   674  	)
   675  
   676  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   677  	if err != nil {
   678  		return nil, err
   679  	}
   680  
   681  	// Setup Transport.
   682  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   683  
   684  	// Setup Switch.
   685  	p2pLogger := logger.With("module", "p2p")
   686  	sw := createSwitch(
   687  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   688  		consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   689  	)
   690  
   691  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   692  	if err != nil {
   693  		return nil, errors.Wrap(err, "could not add peers from persistent_peers field")
   694  	}
   695  
   696  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   697  	if err != nil {
   698  		return nil, errors.Wrap(err, "could not add peer ids from unconditional_peer_ids field")
   699  	}
   700  
   701  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   702  	if err != nil {
   703  		return nil, errors.Wrap(err, "could not create addrbook")
   704  	}
   705  
   706  	// Optionally, start the pex reactor
   707  	//
   708  	// TODO:
   709  	//
   710  	// We need to set Seeds and PersistentPeers on the switch,
   711  	// since it needs to be able to use these (and their DNS names)
   712  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   713  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   714  	// somewhere that we can return with net_info.
   715  	//
   716  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   717  	// Note we currently use the addrBook regardless at least for AddOurAddress
   718  	var pexReactor *pex.Reactor
   719  	if config.P2P.PexReactor {
   720  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   721  	}
   722  
   723  	if config.ProfListenAddress != "" {
   724  		go func() {
   725  			logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil))
   726  		}()
   727  	}
   728  
   729  	node := &Node{
   730  		config:        config,
   731  		genesisDoc:    genDoc,
   732  		privValidator: privValidator,
   733  
   734  		transport: transport,
   735  		sw:        sw,
   736  		addrBook:  addrBook,
   737  		nodeInfo:  nodeInfo,
   738  		nodeKey:   nodeKey,
   739  
   740  		stateDB:          stateDB,
   741  		blockStore:       blockStore,
   742  		bcReactor:        bcReactor,
   743  		mempoolReactor:   mempoolReactor,
   744  		mempool:          mempool,
   745  		consensusState:   consensusState,
   746  		consensusReactor: consensusReactor,
   747  		pexReactor:       pexReactor,
   748  		evidencePool:     evidencePool,
   749  		proxyApp:         proxyApp,
   750  		txIndexer:        txIndexer,
   751  		indexerService:   indexerService,
   752  		eventBus:         eventBus,
   753  	}
   754  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   755  
   756  	for _, option := range options {
   757  		option(node)
   758  	}
   759  
   760  	if baseApp != nil {
   761  		baseApp.SetTendermintNode(node)
   762  	}
   763  
   764  	return node, nil
   765  }
   766  
   767  // OnStart starts the Node. It implements service.Service.
   768  func (n *Node) OnStart() error {
   769  	now := tmtime.Now()
   770  	genTime := n.genesisDoc.GenesisTime
   771  	if genTime.After(now) {
   772  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   773  		time.Sleep(genTime.Sub(now))
   774  	}
   775  
   776  	// Add private IDs to addrbook to block those peers being added
   777  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   778  
   779  	// Start the RPC server before the P2P server
   780  	// so we can eg. receive txs for the first block
   781  	if n.config.RPC.ListenAddress != "" {
   782  		listeners, err := n.startRPC()
   783  		if err != nil {
   784  			return err
   785  		}
   786  		n.rpcListeners = listeners
   787  	}
   788  
   789  	if n.config.Instrumentation.Prometheus &&
   790  		n.config.Instrumentation.PrometheusListenAddr != "" {
   791  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   792  	}
   793  
   794  	// Start the transport.
   795  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   796  	if err != nil {
   797  		return err
   798  	}
   799  	if err := n.transport.Listen(*addr); err != nil {
   800  		return err
   801  	}
   802  
   803  	n.isListening = true
   804  
   805  	if n.config.Mempool.WalEnabled() {
   806  		err = n.mempool.InitWAL()
   807  		if err != nil {
   808  			return fmt.Errorf("init mempool WAL: %w", err)
   809  		}
   810  	}
   811  
   812  	// Start the switch (the P2P server).
   813  	err = n.sw.Start()
   814  	if err != nil {
   815  		return err
   816  	}
   817  
   818  	// Always connect to persistent peers
   819  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   820  	if err != nil {
   821  		return errors.Wrap(err, "could not dial peers from persistent_peers field")
   822  	}
   823  
   824  	return nil
   825  }
   826  
   827  // OnStop stops the Node. It implements service.Service.
   828  func (n *Node) OnStop() {
   829  	n.BaseService.OnStop()
   830  
   831  	n.Logger.Info("Stopping Node")
   832  
   833  	// first stop the non-reactor services
   834  	n.eventBus.Stop()
   835  	n.indexerService.Stop()
   836  
   837  	// now stop the reactors
   838  	n.sw.Stop()
   839  
   840  	// stop mempool WAL
   841  	if n.config.Mempool.WalEnabled() {
   842  		n.mempool.CloseWAL()
   843  	}
   844  
   845  	if err := n.transport.Close(); err != nil {
   846  		n.Logger.Error("Error closing transport", "err", err)
   847  	}
   848  
   849  	n.isListening = false
   850  
   851  	// finally stop the listeners / external services
   852  	for _, l := range n.rpcListeners {
   853  		n.Logger.Info("Closing rpc listener", "listener", l)
   854  		if err := l.Close(); err != nil {
   855  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
   856  		}
   857  	}
   858  
   859  	if pvsc, ok := n.privValidator.(service.Service); ok {
   860  		pvsc.Stop()
   861  	}
   862  
   863  	if n.prometheusSrv != nil {
   864  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
   865  			// Error from closing listeners, or context timeout:
   866  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
   867  		}
   868  	}
   869  }
   870  
   871  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
   872  func (n *Node) ConfigureRPC() error {
   873  	pubKeys, err := n.privValidator.GetPubKeys()
   874  	if err != nil {
   875  		return fmt.Errorf("can't get pubkey: %w", err)
   876  	}
   877  	rpccore.SetEnvironment(&rpccore.Environment{
   878  		ProxyAppQuery: n.proxyApp.Query(),
   879  
   880  		StateDB:        n.stateDB,
   881  		BlockStore:     n.blockStore,
   882  		EvidencePool:   n.evidencePool,
   883  		ConsensusState: n.consensusState,
   884  		P2PPeers:       n.sw,
   885  		P2PTransport:   n,
   886  
   887  		PubKey:           pubKeys,
   888  		GenDoc:           n.genesisDoc,
   889  		TxIndexer:        n.txIndexer,
   890  		ConsensusReactor: n.consensusReactor,
   891  		EventBus:         n.eventBus,
   892  		Mempool:          n.mempool,
   893  
   894  		Logger: n.Logger.With("module", "rpc"),
   895  
   896  		Config: *n.config.RPC,
   897  	})
   898  	return nil
   899  }
   900  
   901  func (n *Node) startRPC() ([]net.Listener, error) {
   902  	err := n.ConfigureRPC()
   903  	if err != nil {
   904  		return nil, err
   905  	}
   906  
   907  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
   908  	coreCodec := amino.NewCodec()
   909  	ctypes.RegisterAmino(coreCodec)
   910  
   911  	if n.config.RPC.Unsafe {
   912  		rpccore.AddUnsafeRoutes()
   913  	}
   914  
   915  	config := rpcserver.DefaultConfig()
   916  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
   917  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
   918  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
   919  	// If necessary adjust global WriteTimeout to ensure it's greater than
   920  	// TimeoutBroadcastTxCommit.
   921  	// See https://github.com/tendermint/tendermint/issues/3435
   922  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
   923  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
   924  	}
   925  
   926  	// we may expose the rpc over both a unix and tcp socket
   927  	listeners := make([]net.Listener, len(listenAddrs))
   928  	for i, listenAddr := range listenAddrs {
   929  		mux := http.NewServeMux()
   930  		rpcLogger := n.Logger.With("module", "rpc-server")
   931  		wmLogger := rpcLogger.With("protocol", "websocket")
   932  		wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec,
   933  			rpcserver.OnDisconnect(func(remoteAddr string) {
   934  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
   935  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
   936  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
   937  				}
   938  			}),
   939  			rpcserver.ReadLimit(config.MaxBodyBytes),
   940  		)
   941  		wm.SetLogger(wmLogger)
   942  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
   943  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
   944  		listener, err := rpcserver.Listen(
   945  			listenAddr,
   946  			config,
   947  		)
   948  		if err != nil {
   949  			return nil, err
   950  		}
   951  
   952  		var rootHandler http.Handler = mux
   953  		if n.config.RPC.IsCorsEnabled() {
   954  			corsMiddleware := cors.New(cors.Options{
   955  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
   956  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
   957  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
   958  			})
   959  			rootHandler = corsMiddleware.Handler(mux)
   960  		}
   961  		if n.config.RPC.IsTLSEnabled() {
   962  			go rpcserver.ServeTLS(
   963  				listener,
   964  				rootHandler,
   965  				n.config.RPC.CertFile(),
   966  				n.config.RPC.KeyFile(),
   967  				rpcLogger,
   968  				config,
   969  			)
   970  		} else {
   971  			go rpcserver.Serve(
   972  				listener,
   973  				rootHandler,
   974  				rpcLogger,
   975  				config,
   976  			)
   977  		}
   978  
   979  		listeners[i] = listener
   980  	}
   981  
   982  	// we expose a simplified api over grpc for convenience to app devs
   983  	grpcListenAddr := n.config.RPC.GRPCListenAddress
   984  	if grpcListenAddr != "" {
   985  		config := rpcserver.DefaultConfig()
   986  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
   987  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
   988  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
   989  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
   990  		// If necessary adjust global WriteTimeout to ensure it's greater than
   991  		// TimeoutBroadcastTxCommit.
   992  		// See https://github.com/tendermint/tendermint/issues/3435
   993  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
   994  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
   995  		}
   996  		listener, err := rpcserver.Listen(grpcListenAddr, config)
   997  		if err != nil {
   998  			return nil, err
   999  		}
  1000  		go grpccore.StartGRPCServer(listener)
  1001  		listeners = append(listeners, listener)
  1002  	}
  1003  
  1004  	return listeners, nil
  1005  }
  1006  
  1007  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1008  // collectors on addr.
  1009  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1010  	srv := &http.Server{
  1011  		Addr: addr,
  1012  		Handler: promhttp.InstrumentMetricHandler(
  1013  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1014  				prometheus.DefaultGatherer,
  1015  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1016  			),
  1017  		),
  1018  	}
  1019  	go func() {
  1020  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1021  			// Error starting or closing listener:
  1022  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1023  		}
  1024  	}()
  1025  	return srv
  1026  }
  1027  
  1028  // Switch returns the Node's Switch.
  1029  func (n *Node) Switch() *p2p.Switch {
  1030  	return n.sw
  1031  }
  1032  
  1033  // BlockStore returns the Node's BlockStore.
  1034  func (n *Node) BlockStore() *store.BlockStore {
  1035  	return n.blockStore
  1036  }
  1037  
  1038  // ConsensusState returns the Node's ConsensusState.
  1039  func (n *Node) ConsensusState() *cs.State {
  1040  	return n.consensusState
  1041  }
  1042  
  1043  // ConsensusReactor returns the Node's ConsensusReactor.
  1044  func (n *Node) ConsensusReactor() *cs.Reactor {
  1045  	return n.consensusReactor
  1046  }
  1047  
  1048  // MempoolReactor returns the Node's mempool reactor.
  1049  func (n *Node) MempoolReactor() *mempl.Reactor {
  1050  	return n.mempoolReactor
  1051  }
  1052  
  1053  // Mempool returns the Node's mempool.
  1054  func (n *Node) Mempool() mempl.Mempool {
  1055  	return n.mempool
  1056  }
  1057  
  1058  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1059  func (n *Node) PEXReactor() *pex.Reactor {
  1060  	return n.pexReactor
  1061  }
  1062  
  1063  func (n *Node) TxIndexer() txindex.TxIndexer {
  1064  	return n.txIndexer
  1065  }
  1066  
  1067  // EvidencePool returns the Node's EvidencePool.
  1068  func (n *Node) EvidencePool() *evidence.Pool {
  1069  	return n.evidencePool
  1070  }
  1071  
  1072  // EventBus returns the Node's EventBus.
  1073  func (n *Node) EventBus() *types.EventBus {
  1074  	return n.eventBus
  1075  }
  1076  
  1077  // PrivValidator returns the Node's PrivValidator.
  1078  // XXX: for convenience only!
  1079  func (n *Node) PrivValidator() types.PrivValidators {
  1080  	return n.privValidator
  1081  }
  1082  
  1083  // GenesisDoc returns the Node's GenesisDoc.
  1084  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1085  	return n.genesisDoc
  1086  }
  1087  
  1088  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1089  func (n *Node) ProxyApp() proxy.AppConns {
  1090  	return n.proxyApp
  1091  }
  1092  
  1093  // Config returns the Node's config.
  1094  func (n *Node) Config() *cfg.Config {
  1095  	return n.config
  1096  }
  1097  
  1098  //------------------------------------------------------------------------------
  1099  
  1100  func (n *Node) Listeners() []string {
  1101  	return []string{
  1102  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1103  	}
  1104  }
  1105  
  1106  func (n *Node) IsListening() bool {
  1107  	return n.isListening
  1108  }
  1109  
  1110  // NodeInfo returns the Node's Info from the Switch.
  1111  func (n *Node) NodeInfo() p2p.NodeInfo {
  1112  	return n.nodeInfo
  1113  }
  1114  
  1115  func makeNodeInfo(
  1116  	config *cfg.Config,
  1117  	nodeKey *p2p.NodeKey,
  1118  	txIndexer txindex.TxIndexer,
  1119  	genDoc *types.GenesisDoc,
  1120  	state sm.State,
  1121  ) (p2p.NodeInfo, error) {
  1122  	txIndexerStatus := "on"
  1123  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1124  		txIndexerStatus = "off"
  1125  	}
  1126  
  1127  	var bcChannel byte
  1128  	switch config.FastSync.Version {
  1129  	case "v0":
  1130  		bcChannel = bcv0.BlockchainChannel
  1131  	case "v1":
  1132  		bcChannel = bcv1.BlockchainChannel
  1133  	case "v2":
  1134  		bcChannel = bcv2.BlockchainChannel
  1135  	default:
  1136  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1137  	}
  1138  
  1139  	nodeInfo := p2p.DefaultNodeInfo{
  1140  		ProtocolVersion: p2p.NewProtocolVersion(
  1141  			version.P2PProtocol, // global
  1142  			state.Version.Consensus.Block,
  1143  			state.Version.Consensus.App,
  1144  		),
  1145  		DefaultNodeID: nodeKey.ID(),
  1146  		Network:       genDoc.ChainID,
  1147  		Version:       version.TMCoreSemVer,
  1148  		Channels: []byte{
  1149  			bcChannel,
  1150  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1151  			mempl.MempoolChannel,
  1152  			evidence.EvidenceChannel,
  1153  		},
  1154  		Moniker: config.Moniker,
  1155  		Other: p2p.DefaultNodeInfoOther{
  1156  			TxIndex:    txIndexerStatus,
  1157  			RPCAddress: config.RPC.ListenAddress,
  1158  		},
  1159  	}
  1160  
  1161  	if config.P2P.PexReactor {
  1162  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1163  	}
  1164  
  1165  	lAddr := config.P2P.ExternalAddress
  1166  
  1167  	if lAddr == "" {
  1168  		lAddr = config.P2P.ListenAddress
  1169  	}
  1170  
  1171  	nodeInfo.ListenAddr = lAddr
  1172  
  1173  	err := nodeInfo.Validate()
  1174  	return nodeInfo, err
  1175  }
  1176  
  1177  //------------------------------------------------------------------------------
  1178  
  1179  var (
  1180  	genesisDocKey = []byte("genesisDoc")
  1181  )
  1182  
  1183  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1184  // database, or creates one using the given genesisDocProvider and persists the
  1185  // result to the database. On success this also returns the genesis doc loaded
  1186  // through the given provider.
  1187  func LoadStateFromDBOrGenesisDocProvider(
  1188  	stateDB dbm.DB,
  1189  	genesisDocProvider GenesisDocProvider,
  1190  ) (sm.State, *types.GenesisDoc, error) {
  1191  	// Get genesis doc
  1192  	genDoc, err := loadGenesisDoc(stateDB)
  1193  	if err != nil {
  1194  		genDoc, err = genesisDocProvider()
  1195  		if err != nil {
  1196  			return sm.State{}, nil, err
  1197  		}
  1198  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1199  		// was changed, accidentally or not). Also good for audit trail.
  1200  		saveGenesisDoc(stateDB, genDoc)
  1201  	}
  1202  	state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  1203  	if err != nil {
  1204  		return sm.State{}, nil, err
  1205  	}
  1206  	return state, genDoc, nil
  1207  }
  1208  
  1209  // panics if failed to unmarshal bytes
  1210  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1211  	b, err := db.Get(genesisDocKey)
  1212  	if err != nil {
  1213  		panic(err)
  1214  	}
  1215  	if len(b) == 0 {
  1216  		return nil, errors.New("genesis doc not found")
  1217  	}
  1218  	var genDoc *types.GenesisDoc
  1219  	err = cdc.UnmarshalJSON(b, &genDoc)
  1220  	if err != nil {
  1221  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1222  	}
  1223  	return genDoc, nil
  1224  }
  1225  
  1226  // panics if failed to marshal the given genesis document
  1227  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1228  	b, err := cdc.MarshalJSON(genDoc)
  1229  	if err != nil {
  1230  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1231  	}
  1232  	db.SetSync(genesisDocKey, b)
  1233  }
  1234  
  1235  func createAndStartPrivValidatorSocketClient(
  1236  	listenAddr string,
  1237  	logger log.Logger,
  1238  ) (types.PrivValidator, error) {
  1239  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1240  	if err != nil {
  1241  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1242  	}
  1243  
  1244  	pvsc, err := privval.NewSignerClient(pve)
  1245  	if err != nil {
  1246  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1247  	}
  1248  
  1249  	// try to get a pubkey from private validate first time
  1250  	_, err = pvsc.GetPubKey()
  1251  	if err != nil {
  1252  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1253  	}
  1254  
  1255  	const (
  1256  		retries = 50 // 50 * 100ms = 5s total
  1257  		timeout = 100 * time.Millisecond
  1258  	)
  1259  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1260  
  1261  	return pvscWithRetries, nil
  1262  }
  1263  
  1264  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1265  // slice of the string s with all leading and trailing Unicode code points
  1266  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1267  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1268  // -1.  also filter out empty strings, only return non-empty strings.
  1269  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1270  	if s == "" {
  1271  		return []string{}
  1272  	}
  1273  
  1274  	spl := strings.Split(s, sep)
  1275  	nonEmptyStrings := make([]string, 0, len(spl))
  1276  	for i := 0; i < len(spl); i++ {
  1277  		element := strings.Trim(spl[i], cutset)
  1278  		if element != "" {
  1279  			nonEmptyStrings = append(nonEmptyStrings, element)
  1280  		}
  1281  	}
  1282  	return nonEmptyStrings
  1283  }