github.com/lazyledger/lazyledger-core@v0.35.0-dev.0.20210613111200-4c651f053571/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"net"
    10  	"net/http"
    11  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    12  	"strings"
    13  	"time"
    14  
    15  	ipld "github.com/ipfs/go-ipld-format"
    16  	"github.com/libp2p/go-libp2p-core/routing"
    17  	"github.com/prometheus/client_golang/prometheus"
    18  	"github.com/prometheus/client_golang/prometheus/promhttp"
    19  	"github.com/rs/cors"
    20  
    21  	abci "github.com/lazyledger/lazyledger-core/abci/types"
    22  	bcv0 "github.com/lazyledger/lazyledger-core/blockchain/v0"
    23  	cfg "github.com/lazyledger/lazyledger-core/config"
    24  	cs "github.com/lazyledger/lazyledger-core/consensus"
    25  	"github.com/lazyledger/lazyledger-core/crypto"
    26  	"github.com/lazyledger/lazyledger-core/evidence"
    27  	"github.com/lazyledger/lazyledger-core/ipfs"
    28  	dbm "github.com/lazyledger/lazyledger-core/libs/db"
    29  	"github.com/lazyledger/lazyledger-core/libs/db/badgerdb"
    30  	tmjson "github.com/lazyledger/lazyledger-core/libs/json"
    31  	"github.com/lazyledger/lazyledger-core/libs/log"
    32  	tmpubsub "github.com/lazyledger/lazyledger-core/libs/pubsub"
    33  	"github.com/lazyledger/lazyledger-core/libs/service"
    34  	"github.com/lazyledger/lazyledger-core/light"
    35  	mempl "github.com/lazyledger/lazyledger-core/mempool"
    36  	"github.com/lazyledger/lazyledger-core/p2p"
    37  	"github.com/lazyledger/lazyledger-core/p2p/pex"
    38  	"github.com/lazyledger/lazyledger-core/privval"
    39  	"github.com/lazyledger/lazyledger-core/proxy"
    40  	rpccore "github.com/lazyledger/lazyledger-core/rpc/core"
    41  	grpccore "github.com/lazyledger/lazyledger-core/rpc/grpc"
    42  	rpcserver "github.com/lazyledger/lazyledger-core/rpc/jsonrpc/server"
    43  	sm "github.com/lazyledger/lazyledger-core/state"
    44  	"github.com/lazyledger/lazyledger-core/state/txindex"
    45  	"github.com/lazyledger/lazyledger-core/state/txindex/kv"
    46  	"github.com/lazyledger/lazyledger-core/state/txindex/null"
    47  	"github.com/lazyledger/lazyledger-core/statesync"
    48  	"github.com/lazyledger/lazyledger-core/store"
    49  	"github.com/lazyledger/lazyledger-core/types"
    50  	tmtime "github.com/lazyledger/lazyledger-core/types/time"
    51  	"github.com/lazyledger/lazyledger-core/version"
    52  )
    53  
    54  //------------------------------------------------------------------------------
    55  
    56  // DBContext specifies config information for loading a new DB.
    57  type DBContext struct {
    58  	ID     string
    59  	Config *cfg.Config
    60  }
    61  
    62  // DBProvider takes a DBContext and returns an instantiated DB.
    63  type DBProvider func(*DBContext) (dbm.DB, error)
    64  
    65  // DefaultDBProvider returns a database using the DBBackend and DBDir
    66  // specified in the ctx.Config.
    67  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
    68  	return badgerdb.NewDB(ctx.ID, ctx.Config.DBDir())
    69  }
    70  
    71  // InMemDBProvider provides an in-memory DB.
    72  func InMemDBProvider(ctx *DBContext) (dbm.DB, error) {
    73  	return badgerdb.NewInMemoryDB()
    74  }
    75  
    76  // GenesisDocProvider returns a GenesisDoc.
    77  // It allows the GenesisDoc to be pulled from sources other than the
    78  // filesystem, for instance from a distributed key-value store cluster.
    79  type GenesisDocProvider func() (*types.GenesisDoc, error)
    80  
    81  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
    82  // the GenesisDoc from the config.GenesisFile() on the filesystem.
    83  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
    84  	return func() (*types.GenesisDoc, error) {
    85  		return types.GenesisDocFromFile(config.GenesisFile())
    86  	}
    87  }
    88  
    89  // Provider takes a config and a logger and returns a ready to go Node.
    90  type Provider func(*cfg.Config, ipfs.APIProvider, log.Logger) (*Node, error)
    91  
    92  // DefaultNewNode returns a Tendermint node with default settings for the
    93  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
    94  // It implements NodeProvider.
    95  func DefaultNewNode(config *cfg.Config, ipfs ipfs.APIProvider, logger log.Logger) (*Node, error) {
    96  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
    97  	if err != nil {
    98  		return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
    99  	}
   100  
   101  	pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
   102  	if err != nil {
   103  		return nil, err
   104  	}
   105  
   106  	return NewNode(config,
   107  		pval,
   108  		nodeKey,
   109  		proxy.DefaultClientCreator(config.ProxyApp, config.DBDir()),
   110  		DefaultGenesisDocProviderFunc(config),
   111  		DefaultDBProvider,
   112  		ipfs,
   113  		DefaultMetricsProvider(config.Instrumentation),
   114  		logger,
   115  	)
   116  }
   117  
   118  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   119  type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   120  
   121  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   122  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   123  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   124  	return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   125  		if config.Prometheus {
   126  			return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   127  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   128  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   129  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   130  		}
   131  		return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   132  	}
   133  }
   134  
   135  // Option sets a parameter for the node.
   136  type Option func(*Node)
   137  
   138  // Temporary interface for switching to fast sync, we should get rid of v0.
   139  // See: https://github.com/tendermint/tendermint/issues/4595
   140  type fastSyncReactor interface {
   141  	SwitchToFastSync(sm.State) error
   142  }
   143  
   144  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   145  // the node's Switch.
   146  //
   147  // WARNING: using any name from the below list of the existing reactors will
   148  // result in replacing it with the custom one.
   149  //
   150  //  - MEMPOOL
   151  //  - BLOCKCHAIN
   152  //  - CONSENSUS
   153  //  - EVIDENCE
   154  //  - PEX
   155  //  - STATESYNC
   156  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   157  	return func(n *Node) {
   158  		for name, reactor := range reactors {
   159  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   160  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   161  					"name", name, "existing", existingReactor, "custom", reactor)
   162  				n.sw.RemoveReactor(name, existingReactor)
   163  			}
   164  			n.sw.AddReactor(name, reactor)
   165  		}
   166  	}
   167  }
   168  
   169  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   170  // build a State object for bootstrapping the node.
   171  // WARNING: this interface is considered unstable and subject to change.
   172  func StateProvider(stateProvider statesync.StateProvider) Option {
   173  	return func(n *Node) {
   174  		n.stateSyncProvider = stateProvider
   175  	}
   176  }
   177  
   178  //------------------------------------------------------------------------------
   179  
   180  // Node is the highest level interface to a full Tendermint node.
   181  // It includes all configuration information and running services.
   182  type Node struct {
   183  	service.BaseService
   184  
   185  	// config
   186  	config        *cfg.Config
   187  	genesisDoc    *types.GenesisDoc   // initial validator set
   188  	privValidator types.PrivValidator // local node's validator key
   189  
   190  	// network
   191  	transport   *p2p.MultiplexTransport
   192  	sw          *p2p.Switch  // p2p connections
   193  	addrBook    pex.AddrBook // known peers
   194  	nodeInfo    p2p.NodeInfo
   195  	nodeKey     p2p.NodeKey // our node privkey
   196  	isListening bool
   197  
   198  	// services
   199  	eventBus          *types.EventBus // pub/sub for services
   200  	stateStore        sm.Store
   201  	blockStore        *store.BlockStore // store the blockchain to disk
   202  	bcReactor         p2p.Reactor       // for fast-syncing
   203  	mempoolReactor    *mempl.Reactor    // for gossipping transactions
   204  	mempool           mempl.Mempool
   205  	stateSync         bool                    // whether the node should state sync on startup
   206  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   207  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   208  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   209  	consensusState    *cs.State               // latest consensus state
   210  	consensusReactor  *cs.Reactor             // for participating in the consensus
   211  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   212  	evidencePool      *evidence.Pool          // tracking evidence
   213  	proxyApp          proxy.AppConns          // connection to the application
   214  	rpcListeners      []net.Listener          // rpc servers
   215  	txIndexer         txindex.TxIndexer
   216  	indexerService    *txindex.IndexerService
   217  	prometheusSrv     *http.Server
   218  
   219  	ipfsClose io.Closer
   220  }
   221  
   222  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   223  	proxyApp := proxy.NewAppConns(clientCreator)
   224  	proxyApp.SetLogger(logger.With("module", "proxy"))
   225  	if err := proxyApp.Start(); err != nil {
   226  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   227  	}
   228  	return proxyApp, nil
   229  }
   230  
   231  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   232  	eventBus := types.NewEventBus()
   233  	eventBus.SetLogger(logger.With("module", "events"))
   234  	if err := eventBus.Start(); err != nil {
   235  		return nil, err
   236  	}
   237  	return eventBus, nil
   238  }
   239  
   240  func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
   241  	eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) {
   242  
   243  	var txIndexer txindex.TxIndexer
   244  	switch config.TxIndex.Indexer {
   245  	case "kv":
   246  		store, err := dbProvider(&DBContext{"tx_index", config})
   247  		if err != nil {
   248  			return nil, nil, err
   249  		}
   250  		txIndexer = kv.NewTxIndex(store)
   251  	default:
   252  		txIndexer = &null.TxIndex{}
   253  	}
   254  
   255  	indexerService := txindex.NewIndexerService(txIndexer, eventBus)
   256  	indexerService.SetLogger(logger.With("module", "txindex"))
   257  	if err := indexerService.Start(); err != nil {
   258  		return nil, nil, err
   259  	}
   260  	return indexerService, txIndexer, nil
   261  }
   262  
   263  func doHandshake(
   264  	stateStore sm.Store,
   265  	state sm.State,
   266  	blockStore sm.BlockStore,
   267  	genDoc *types.GenesisDoc,
   268  	eventBus types.BlockEventPublisher,
   269  	proxyApp proxy.AppConns,
   270  	consensusLogger log.Logger) error {
   271  
   272  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   273  	handshaker.SetLogger(consensusLogger)
   274  	handshaker.SetEventBus(eventBus)
   275  	if err := handshaker.Handshake(proxyApp); err != nil {
   276  		return fmt.Errorf("error during handshake: %v", err)
   277  	}
   278  	return nil
   279  }
   280  
   281  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   282  	// Log the version info.
   283  	logger.Info("Version info",
   284  		"software", version.TMCoreSemVer,
   285  		"block", version.BlockProtocol,
   286  		"p2p", version.P2PProtocol,
   287  	)
   288  
   289  	// If the state and software differ in block version, at least log it.
   290  	if state.Version.Consensus.Block != version.BlockProtocol {
   291  		logger.Info("Software and state have different block protocols",
   292  			"software", version.BlockProtocol,
   293  			"state", state.Version.Consensus.Block,
   294  		)
   295  	}
   296  
   297  	addr := pubKey.Address()
   298  	// Log whether this node is a validator or an observer
   299  	if state.Validators.HasAddress(addr) {
   300  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   301  	} else {
   302  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   303  	}
   304  }
   305  
   306  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   307  	if state.Validators.Size() > 1 {
   308  		return false
   309  	}
   310  	addr, _ := state.Validators.GetByIndex(0)
   311  	return bytes.Equal(pubKey.Address(), addr)
   312  }
   313  
   314  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   315  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   316  
   317  	mempool := mempl.NewCListMempool(
   318  		config.Mempool,
   319  		proxyApp.Mempool(),
   320  		state.LastBlockHeight,
   321  		mempl.WithMetrics(memplMetrics),
   322  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   323  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   324  	)
   325  	mempoolLogger := logger.With("module", "mempool")
   326  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   327  	mempoolReactor.SetLogger(mempoolLogger)
   328  
   329  	if config.Consensus.WaitForTxs() {
   330  		mempool.EnableTxsAvailable()
   331  	}
   332  	return mempoolReactor, mempool
   333  }
   334  
   335  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   336  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   337  
   338  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   339  	if err != nil {
   340  		return nil, nil, err
   341  	}
   342  	evidenceLogger := logger.With("module", "evidence")
   343  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
   344  	if err != nil {
   345  		return nil, nil, err
   346  	}
   347  	evidenceReactor := evidence.NewReactor(evidencePool)
   348  	evidenceReactor.SetLogger(evidenceLogger)
   349  	return evidenceReactor, evidencePool, nil
   350  }
   351  
   352  func createBlockchainReactor(config *cfg.Config,
   353  	state sm.State,
   354  	blockExec *sm.BlockExecutor,
   355  	blockStore *store.BlockStore,
   356  	fastSync bool,
   357  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   358  
   359  	switch config.FastSync.Version {
   360  	case "v0":
   361  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   362  	// case "v2":
   363  	//	bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   364  	default:
   365  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   366  	}
   367  
   368  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   369  	return bcReactor, nil
   370  }
   371  
   372  func createConsensusReactor(
   373  	config *cfg.Config,
   374  	state sm.State,
   375  	blockExec *sm.BlockExecutor,
   376  	blockStore sm.BlockStore,
   377  	mempool *mempl.CListMempool,
   378  	evidencePool *evidence.Pool,
   379  	privValidator types.PrivValidator,
   380  	csMetrics *cs.Metrics,
   381  	waitSync bool,
   382  	eventBus *types.EventBus,
   383  	dag ipld.DAGService,
   384  	croute routing.ContentRouting,
   385  	consensusLogger log.Logger) (*cs.Reactor, *cs.State) {
   386  
   387  	consensusState := cs.NewState(
   388  		config.Consensus,
   389  		state.Copy(),
   390  		blockExec,
   391  		blockStore,
   392  		mempool,
   393  		dag,
   394  		croute,
   395  		evidencePool,
   396  		cs.StateMetrics(csMetrics),
   397  	)
   398  	consensusState.SetLogger(consensusLogger)
   399  	if privValidator != nil {
   400  		consensusState.SetPrivValidator(privValidator)
   401  	}
   402  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   403  	consensusReactor.SetLogger(consensusLogger)
   404  	// services which will be publishing and/or subscribing for messages (events)
   405  	// consensusReactor will set it on consensusState and blockExecutor
   406  	consensusReactor.SetEventBus(eventBus)
   407  	return consensusReactor, consensusState
   408  }
   409  
   410  func createTransport(
   411  	config *cfg.Config,
   412  	nodeInfo p2p.NodeInfo,
   413  	nodeKey p2p.NodeKey,
   414  	proxyApp proxy.AppConns,
   415  ) (
   416  	*p2p.MultiplexTransport,
   417  	[]p2p.PeerFilterFunc,
   418  ) {
   419  	var (
   420  		mConnConfig = p2p.MConnConfig(config.P2P)
   421  		transport   = p2p.NewMultiplexTransport(nodeInfo, nodeKey, mConnConfig)
   422  		connFilters = []p2p.ConnFilterFunc{}
   423  		peerFilters = []p2p.PeerFilterFunc{}
   424  	)
   425  
   426  	if !config.P2P.AllowDuplicateIP {
   427  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   428  	}
   429  
   430  	// Filter peers by addr or pubkey with an ABCI query.
   431  	// If the query return code is OK, add peer.
   432  	if config.FilterPeers {
   433  		connFilters = append(
   434  			connFilters,
   435  			// ABCI query for address filtering.
   436  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   437  				res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
   438  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   439  				})
   440  				if err != nil {
   441  					return err
   442  				}
   443  				if res.IsErr() {
   444  					return fmt.Errorf("error querying abci app: %v", res)
   445  				}
   446  
   447  				return nil
   448  			},
   449  		)
   450  
   451  		peerFilters = append(
   452  			peerFilters,
   453  			// ABCI query for ID filtering.
   454  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   455  				res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
   456  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   457  				})
   458  				if err != nil {
   459  					return err
   460  				}
   461  				if res.IsErr() {
   462  					return fmt.Errorf("error querying abci app: %v", res)
   463  				}
   464  
   465  				return nil
   466  			},
   467  		)
   468  	}
   469  
   470  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   471  
   472  	// Limit the number of incoming connections.
   473  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   474  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   475  
   476  	return transport, peerFilters
   477  }
   478  
   479  func createSwitch(config *cfg.Config,
   480  	transport p2p.Transport,
   481  	p2pMetrics *p2p.Metrics,
   482  	peerFilters []p2p.PeerFilterFunc,
   483  	mempoolReactor *mempl.Reactor,
   484  	bcReactor p2p.Reactor,
   485  	stateSyncReactor *p2p.ReactorShim,
   486  	consensusReactor *cs.Reactor,
   487  	evidenceReactor *evidence.Reactor,
   488  	nodeInfo p2p.NodeInfo,
   489  	nodeKey p2p.NodeKey,
   490  	p2pLogger log.Logger) *p2p.Switch {
   491  
   492  	sw := p2p.NewSwitch(
   493  		config.P2P,
   494  		transport,
   495  		p2p.WithMetrics(p2pMetrics),
   496  		p2p.SwitchPeerFilters(peerFilters...),
   497  	)
   498  	sw.SetLogger(p2pLogger)
   499  	sw.AddReactor("MEMPOOL", mempoolReactor)
   500  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   501  	sw.AddReactor("CONSENSUS", consensusReactor)
   502  	sw.AddReactor("EVIDENCE", evidenceReactor)
   503  	sw.AddReactor("STATESYNC", stateSyncReactor)
   504  
   505  	sw.SetNodeInfo(nodeInfo)
   506  	sw.SetNodeKey(nodeKey)
   507  
   508  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile())
   509  	return sw
   510  }
   511  
   512  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   513  	p2pLogger log.Logger, nodeKey p2p.NodeKey) (pex.AddrBook, error) {
   514  
   515  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   516  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   517  
   518  	// Add ourselves to addrbook to prevent dialing ourselves
   519  	if config.P2P.ExternalAddress != "" {
   520  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress))
   521  		if err != nil {
   522  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   523  		}
   524  		addrBook.AddOurAddress(addr)
   525  	}
   526  	if config.P2P.ListenAddress != "" {
   527  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress))
   528  		if err != nil {
   529  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   530  		}
   531  		addrBook.AddOurAddress(addr)
   532  	}
   533  
   534  	sw.SetAddrBook(addrBook)
   535  
   536  	return addrBook, nil
   537  }
   538  
   539  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   540  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   541  
   542  	// TODO persistent peers ? so we can have their DNS addrs saved
   543  	pexReactor := pex.NewReactor(addrBook,
   544  		&pex.ReactorConfig{
   545  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   546  			SeedMode: config.P2P.SeedMode,
   547  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   548  			// blocks assuming 10s blocks ~ 28 hours.
   549  			// TODO (melekes): make it dynamic based on the actual block latencies
   550  			// from the live network.
   551  			// https://github.com/tendermint/tendermint/issues/3523
   552  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   553  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   554  		})
   555  	pexReactor.SetLogger(logger.With("module", "pex"))
   556  	sw.AddReactor("PEX", pexReactor)
   557  	return pexReactor
   558  }
   559  
   560  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   561  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   562  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   563  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
   564  	ssR.Logger.Info("Starting state sync")
   565  
   566  	if stateProvider == nil {
   567  		var err error
   568  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   569  		defer cancel()
   570  		stateProvider, err = statesync.NewLightClientStateProvider(
   571  			ctx,
   572  			state.ChainID, state.Version, state.InitialHeight,
   573  			config.RPCServers, light.TrustOptions{
   574  				Period: config.TrustPeriod,
   575  				Height: config.TrustHeight,
   576  				Hash:   config.TrustHashBytes(),
   577  			}, ssR.Logger.With("module", "light"))
   578  		if err != nil {
   579  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   580  		}
   581  	}
   582  
   583  	go func() {
   584  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   585  		if err != nil {
   586  			ssR.Logger.Error("State sync failed", "err", err)
   587  			return
   588  		}
   589  		err = stateStore.Bootstrap(state)
   590  		if err != nil {
   591  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   592  			return
   593  		}
   594  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   595  		if err != nil {
   596  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   597  			return
   598  		}
   599  
   600  		if fastSync {
   601  			// FIXME Very ugly to have these metrics bleed through here.
   602  			conR.Metrics.StateSyncing.Set(0)
   603  			conR.Metrics.FastSyncing.Set(1)
   604  			err = bcR.SwitchToFastSync(state)
   605  			if err != nil {
   606  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   607  				return
   608  			}
   609  		} else {
   610  			conR.SwitchToConsensus(state, true)
   611  		}
   612  	}()
   613  	return nil
   614  }
   615  
   616  // NewNode returns a new, ready to go, Tendermint Node.
   617  func NewNode(config *cfg.Config,
   618  	privValidator types.PrivValidator,
   619  	nodeKey p2p.NodeKey,
   620  	clientCreator proxy.ClientCreator,
   621  	genesisDocProvider GenesisDocProvider,
   622  	dbProvider DBProvider,
   623  	ipfsProvider ipfs.APIProvider,
   624  	metricsProvider MetricsProvider,
   625  	logger log.Logger,
   626  	options ...Option) (*Node, error) {
   627  
   628  	stateDB, err := dbProvider(&DBContext{"state", config})
   629  	if err != nil {
   630  		return nil, err
   631  	}
   632  
   633  	stateStore := sm.NewStore(stateDB)
   634  
   635  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   636  	if err != nil {
   637  		return nil, err
   638  	}
   639  
   640  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   641  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   642  	if err != nil {
   643  		return nil, err
   644  	}
   645  
   646  	// EventBus and IndexerService must be started before the handshake because
   647  	// we might need to index the txs of the replayed block as this might not have happened
   648  	// when the node stopped last time (i.e. the node stopped after it saved the block
   649  	// but before it indexed the txs, or, endblocker panicked)
   650  	eventBus, err := createAndStartEventBus(logger)
   651  	if err != nil {
   652  		return nil, err
   653  	}
   654  
   655  	// Transaction indexing
   656  	indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   657  	if err != nil {
   658  		return nil, err
   659  	}
   660  
   661  	// If an address is provided, listen on the socket for a connection from an
   662  	// external signing process.
   663  	if config.PrivValidatorListenAddr != "" {
   664  		// FIXME: we should start services inside OnStart
   665  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   666  		if err != nil {
   667  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   668  		}
   669  	}
   670  
   671  	pubKey, err := privValidator.GetPubKey()
   672  	if err != nil {
   673  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   674  	}
   675  
   676  	// Determine whether we should attempt state sync.
   677  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   678  	if stateSync && state.LastBlockHeight > 0 {
   679  		logger.Info("Found local state with non-zero height, skipping state sync")
   680  		stateSync = false
   681  	}
   682  
   683  	dag, ipfsNode, err := ipfsProvider()
   684  	if err != nil {
   685  		return nil, err
   686  	}
   687  
   688  	blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
   689  	if err != nil {
   690  		return nil, err
   691  	}
   692  
   693  	blockStore := store.NewBlockStore(blockStoreDB, dag)
   694  
   695  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   696  	// and replays any blocks as necessary to sync tendermint with the app.
   697  	consensusLogger := logger.With("module", "consensus")
   698  	if !stateSync {
   699  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   700  			return nil, err
   701  		}
   702  
   703  		// Reload the state. It will have the Version.Consensus.App set by the
   704  		// Handshake, and may have other modifications as well (ie. depending on
   705  		// what happened during block replay).
   706  		state, err = stateStore.Load()
   707  		if err != nil {
   708  			return nil, fmt.Errorf("cannot load state: %w", err)
   709  		}
   710  	}
   711  
   712  	// Determine whether we should do fast sync. This must happen after the handshake, since the
   713  	// app may modify the validator set, specifying ourself as the only validator.
   714  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   715  
   716  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   717  
   718  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   719  
   720  	// Make MempoolReactor
   721  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   722  
   723  	// Make Evidence Reactor
   724  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   725  	if err != nil {
   726  		return nil, err
   727  	}
   728  
   729  	// make block executor for consensus and blockchain reactors to execute blocks
   730  	blockExec := sm.NewBlockExecutor(
   731  		stateStore,
   732  		logger.With("module", "state"),
   733  		proxyApp.Consensus(),
   734  		mempool,
   735  		evidencePool,
   736  		sm.BlockExecutorWithMetrics(smMetrics),
   737  	)
   738  
   739  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   740  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   741  	if err != nil {
   742  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   743  	}
   744  
   745  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   746  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   747  	if stateSync {
   748  		csMetrics.StateSyncing.Set(1)
   749  	} else if fastSync {
   750  		csMetrics.FastSyncing.Set(1)
   751  	}
   752  	consensusReactor, consensusState := createConsensusReactor(
   753  		config, state, blockExec, blockStore, mempool, evidencePool,
   754  		privValidator, csMetrics, stateSync || fastSync, eventBus, dag, ipfsNode.Routing, consensusLogger,
   755  	)
   756  
   757  	// Set up state sync reactor, and schedule a sync if requested.
   758  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   759  	// we should clean this whole thing up. See:
   760  	// https://github.com/tendermint/tendermint/issues/4644
   761  	stateSyncReactorShim := p2p.NewReactorShim("StateSyncShim", statesync.ChannelShims)
   762  	stateSyncReactorShim.SetLogger(logger.With("module", "statesync"))
   763  
   764  	stateSyncReactor := statesync.NewReactor(
   765  		stateSyncReactorShim.Logger,
   766  		proxyApp.Snapshot(),
   767  		proxyApp.Query(),
   768  		stateSyncReactorShim.GetChannel(statesync.SnapshotChannel),
   769  		stateSyncReactorShim.GetChannel(statesync.ChunkChannel),
   770  		stateSyncReactorShim.PeerUpdates,
   771  		config.StateSync.TempDir,
   772  	)
   773  
   774  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   775  	if err != nil {
   776  		return nil, err
   777  	}
   778  
   779  	// Setup Transport.
   780  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   781  
   782  	// Setup Switch.
   783  	p2pLogger := logger.With("module", "p2p")
   784  	sw := createSwitch(
   785  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   786  		stateSyncReactorShim, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   787  	)
   788  
   789  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   790  	if err != nil {
   791  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   792  	}
   793  
   794  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   795  	if err != nil {
   796  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   797  	}
   798  
   799  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   800  	if err != nil {
   801  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   802  	}
   803  
   804  	// Optionally, start the pex reactor
   805  	//
   806  	// TODO:
   807  	//
   808  	// We need to set Seeds and PersistentPeers on the switch,
   809  	// since it needs to be able to use these (and their DNS names)
   810  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   811  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   812  	// somewhere that we can return with net_info.
   813  	//
   814  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   815  	// Note we currently use the addrBook regardless at least for AddOurAddress
   816  	var pexReactor *pex.Reactor
   817  	if config.P2P.PexReactor {
   818  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   819  	}
   820  
   821  	if config.RPC.PprofListenAddress != "" {
   822  		go func() {
   823  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   824  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   825  		}()
   826  	}
   827  
   828  	node := &Node{
   829  		config:        config,
   830  		genesisDoc:    genDoc,
   831  		privValidator: privValidator,
   832  
   833  		transport: transport,
   834  		sw:        sw,
   835  		addrBook:  addrBook,
   836  		nodeInfo:  nodeInfo,
   837  		nodeKey:   nodeKey,
   838  
   839  		stateStore:       stateStore,
   840  		blockStore:       blockStore,
   841  		bcReactor:        bcReactor,
   842  		mempoolReactor:   mempoolReactor,
   843  		mempool:          mempool,
   844  		consensusState:   consensusState,
   845  		consensusReactor: consensusReactor,
   846  		stateSyncReactor: stateSyncReactor,
   847  		stateSync:        stateSync,
   848  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   849  		pexReactor:       pexReactor,
   850  		evidencePool:     evidencePool,
   851  		proxyApp:         proxyApp,
   852  		txIndexer:        txIndexer,
   853  		indexerService:   indexerService,
   854  		eventBus:         eventBus,
   855  		ipfsClose:        ipfsNode,
   856  	}
   857  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   858  
   859  	for _, option := range options {
   860  		option(node)
   861  	}
   862  
   863  	return node, nil
   864  }
   865  
   866  // OnStart starts the Node. It implements service.Service.
   867  func (n *Node) OnStart() error {
   868  	now := tmtime.Now()
   869  	genTime := n.genesisDoc.GenesisTime
   870  	if genTime.After(now) {
   871  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   872  		time.Sleep(genTime.Sub(now))
   873  	}
   874  
   875  	// Add private IDs to addrbook to block those peers being added
   876  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   877  
   878  	// Start the RPC server before the P2P server
   879  	// so we can eg. receive txs for the first block
   880  	if n.config.RPC.ListenAddress != "" {
   881  		listeners, err := n.startRPC()
   882  		if err != nil {
   883  			return err
   884  		}
   885  		n.rpcListeners = listeners
   886  	}
   887  
   888  	if n.config.Instrumentation.Prometheus &&
   889  		n.config.Instrumentation.PrometheusListenAddr != "" {
   890  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   891  	}
   892  
   893  	// Start the transport.
   894  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID, n.config.P2P.ListenAddress))
   895  	if err != nil {
   896  		return err
   897  	}
   898  	if err := n.transport.Listen(*addr); err != nil {
   899  		return err
   900  	}
   901  
   902  	n.isListening = true
   903  
   904  	if n.config.Mempool.WalEnabled() {
   905  		err = n.mempool.InitWAL()
   906  		if err != nil {
   907  			return fmt.Errorf("init mempool WAL: %w", err)
   908  		}
   909  	}
   910  
   911  	// Start the switch (the P2P server).
   912  	err = n.sw.Start()
   913  	if err != nil {
   914  		return err
   915  	}
   916  
   917  	// Start the real state sync reactor separately since the switch uses the shim.
   918  	if err := n.stateSyncReactor.Start(); err != nil {
   919  		return err
   920  	}
   921  
   922  	// Always connect to persistent peers
   923  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   924  	if err != nil {
   925  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   926  	}
   927  
   928  	// Run state sync
   929  	if n.stateSync {
   930  		bcR, ok := n.bcReactor.(fastSyncReactor)
   931  		if !ok {
   932  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   933  		}
   934  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   935  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   936  		if err != nil {
   937  			return fmt.Errorf("failed to start state sync: %w", err)
   938  		}
   939  	}
   940  
   941  	return nil
   942  }
   943  
   944  // OnStop stops the Node. It implements service.Service.
   945  func (n *Node) OnStop() {
   946  	n.BaseService.OnStop()
   947  
   948  	n.Logger.Info("Stopping Node")
   949  
   950  	// first stop the non-reactor services
   951  	if err := n.eventBus.Stop(); err != nil {
   952  		n.Logger.Error("Error closing eventBus", "err", err)
   953  	}
   954  	if err := n.indexerService.Stop(); err != nil {
   955  		n.Logger.Error("Error closing indexerService", "err", err)
   956  	}
   957  
   958  	// now stop the reactors
   959  	if err := n.sw.Stop(); err != nil {
   960  		n.Logger.Error("Error closing switch", "err", err)
   961  	}
   962  
   963  	// Stop the real state sync reactor separately since the switch uses the shim.
   964  	if err := n.stateSyncReactor.Stop(); err != nil {
   965  		n.Logger.Error("failed to stop state sync service", "err", err)
   966  	}
   967  
   968  	// stop mempool WAL
   969  	if n.config.Mempool.WalEnabled() {
   970  		n.mempool.CloseWAL()
   971  	}
   972  
   973  	if err := n.transport.Close(); err != nil {
   974  		n.Logger.Error("Error closing transport", "err", err)
   975  	}
   976  
   977  	n.isListening = false
   978  
   979  	// finally stop the listeners / external services
   980  	for _, l := range n.rpcListeners {
   981  		n.Logger.Info("Closing rpc listener", "listener", l)
   982  		if err := l.Close(); err != nil {
   983  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
   984  		}
   985  	}
   986  
   987  	if pvsc, ok := n.privValidator.(service.Service); ok {
   988  		if err := pvsc.Stop(); err != nil {
   989  			n.Logger.Error("Error closing private validator", "err", err)
   990  		}
   991  	}
   992  
   993  	if n.prometheusSrv != nil {
   994  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
   995  			// Error from closing listeners, or context timeout:
   996  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
   997  		}
   998  	}
   999  
  1000  	if err := n.ipfsClose.Close(); err != nil {
  1001  		n.Logger.Error("ipfsClose.Close()", err)
  1002  	}
  1003  }
  1004  
  1005  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1006  func (n *Node) ConfigureRPC() error {
  1007  	pubKey, err := n.privValidator.GetPubKey()
  1008  	if err != nil {
  1009  		return fmt.Errorf("can't get pubkey: %w", err)
  1010  	}
  1011  	rpccore.SetEnvironment(&rpccore.Environment{
  1012  		ProxyAppQuery:   n.proxyApp.Query(),
  1013  		ProxyAppMempool: n.proxyApp.Mempool(),
  1014  
  1015  		StateStore:     n.stateStore,
  1016  		BlockStore:     n.blockStore,
  1017  		EvidencePool:   n.evidencePool,
  1018  		ConsensusState: n.consensusState,
  1019  		P2PPeers:       n.sw,
  1020  		P2PTransport:   n,
  1021  
  1022  		PubKey:           pubKey,
  1023  		GenDoc:           n.genesisDoc,
  1024  		TxIndexer:        n.txIndexer,
  1025  		ConsensusReactor: n.consensusReactor,
  1026  		EventBus:         n.eventBus,
  1027  		Mempool:          n.mempool,
  1028  
  1029  		Logger: n.Logger.With("module", "rpc"),
  1030  
  1031  		Config: *n.config.RPC,
  1032  	})
  1033  	return nil
  1034  }
  1035  
  1036  func (n *Node) startRPC() ([]net.Listener, error) {
  1037  	err := n.ConfigureRPC()
  1038  	if err != nil {
  1039  		return nil, err
  1040  	}
  1041  
  1042  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1043  
  1044  	if n.config.RPC.Unsafe {
  1045  		rpccore.AddUnsafeRoutes()
  1046  	}
  1047  
  1048  	config := rpcserver.DefaultConfig()
  1049  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1050  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1051  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1052  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1053  	// TimeoutBroadcastTxCommit.
  1054  	// See https://github.com/tendermint/tendermint/issues/3435
  1055  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1056  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1057  	}
  1058  
  1059  	// we may expose the rpc over both a unix and tcp socket
  1060  	listeners := make([]net.Listener, len(listenAddrs))
  1061  	for i, listenAddr := range listenAddrs {
  1062  		mux := http.NewServeMux()
  1063  		rpcLogger := n.Logger.With("module", "rpc-server")
  1064  		wmLogger := rpcLogger.With("protocol", "websocket")
  1065  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1066  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1067  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1068  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1069  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1070  				}
  1071  			}),
  1072  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1073  		)
  1074  		wm.SetLogger(wmLogger)
  1075  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1076  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1077  		listener, err := rpcserver.Listen(
  1078  			listenAddr,
  1079  			config,
  1080  		)
  1081  		if err != nil {
  1082  			return nil, err
  1083  		}
  1084  
  1085  		var rootHandler http.Handler = mux
  1086  		if n.config.RPC.IsCorsEnabled() {
  1087  			corsMiddleware := cors.New(cors.Options{
  1088  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1089  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1090  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1091  			})
  1092  			rootHandler = corsMiddleware.Handler(mux)
  1093  		}
  1094  		if n.config.RPC.IsTLSEnabled() {
  1095  			go func() {
  1096  				if err := rpcserver.ServeTLS(
  1097  					listener,
  1098  					rootHandler,
  1099  					n.config.RPC.CertFile(),
  1100  					n.config.RPC.KeyFile(),
  1101  					rpcLogger,
  1102  					config,
  1103  				); err != nil {
  1104  					n.Logger.Error("Error serving server with TLS", "err", err)
  1105  				}
  1106  			}()
  1107  		} else {
  1108  			go func() {
  1109  				if err := rpcserver.Serve(
  1110  					listener,
  1111  					rootHandler,
  1112  					rpcLogger,
  1113  					config,
  1114  				); err != nil {
  1115  					n.Logger.Error("Error serving server", "err", err)
  1116  				}
  1117  			}()
  1118  		}
  1119  
  1120  		listeners[i] = listener
  1121  	}
  1122  
  1123  	// we expose a simplified api over grpc for convenience to app devs
  1124  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1125  	if grpcListenAddr != "" {
  1126  		config := rpcserver.DefaultConfig()
  1127  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1128  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1129  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1130  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1131  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1132  		// TimeoutBroadcastTxCommit.
  1133  		// See https://github.com/tendermint/tendermint/issues/3435
  1134  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1135  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1136  		}
  1137  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1138  		if err != nil {
  1139  			return nil, err
  1140  		}
  1141  		go func() {
  1142  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1143  				n.Logger.Error("Error starting gRPC server", "err", err)
  1144  			}
  1145  		}()
  1146  		listeners = append(listeners, listener)
  1147  
  1148  	}
  1149  
  1150  	return listeners, nil
  1151  
  1152  }
  1153  
  1154  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1155  // collectors on addr.
  1156  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1157  	srv := &http.Server{
  1158  		Addr: addr,
  1159  		Handler: promhttp.InstrumentMetricHandler(
  1160  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1161  				prometheus.DefaultGatherer,
  1162  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1163  			),
  1164  		),
  1165  	}
  1166  	go func() {
  1167  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1168  			// Error starting or closing listener:
  1169  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1170  		}
  1171  	}()
  1172  	return srv
  1173  }
  1174  
  1175  // Switch returns the Node's Switch.
  1176  func (n *Node) Switch() *p2p.Switch {
  1177  	return n.sw
  1178  }
  1179  
  1180  // BlockStore returns the Node's BlockStore.
  1181  func (n *Node) BlockStore() *store.BlockStore {
  1182  	return n.blockStore
  1183  }
  1184  
  1185  // ConsensusState returns the Node's ConsensusState.
  1186  func (n *Node) ConsensusState() *cs.State {
  1187  	return n.consensusState
  1188  }
  1189  
  1190  // ConsensusReactor returns the Node's ConsensusReactor.
  1191  func (n *Node) ConsensusReactor() *cs.Reactor {
  1192  	return n.consensusReactor
  1193  }
  1194  
  1195  // MempoolReactor returns the Node's mempool reactor.
  1196  func (n *Node) MempoolReactor() *mempl.Reactor {
  1197  	return n.mempoolReactor
  1198  }
  1199  
  1200  // Mempool returns the Node's mempool.
  1201  func (n *Node) Mempool() mempl.Mempool {
  1202  	return n.mempool
  1203  }
  1204  
  1205  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1206  func (n *Node) PEXReactor() *pex.Reactor {
  1207  	return n.pexReactor
  1208  }
  1209  
  1210  // EvidencePool returns the Node's EvidencePool.
  1211  func (n *Node) EvidencePool() *evidence.Pool {
  1212  	return n.evidencePool
  1213  }
  1214  
  1215  // EventBus returns the Node's EventBus.
  1216  func (n *Node) EventBus() *types.EventBus {
  1217  	return n.eventBus
  1218  }
  1219  
  1220  // PrivValidator returns the Node's PrivValidator.
  1221  // XXX: for convenience only!
  1222  func (n *Node) PrivValidator() types.PrivValidator {
  1223  	return n.privValidator
  1224  }
  1225  
  1226  // GenesisDoc returns the Node's GenesisDoc.
  1227  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1228  	return n.genesisDoc
  1229  }
  1230  
  1231  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1232  func (n *Node) ProxyApp() proxy.AppConns {
  1233  	return n.proxyApp
  1234  }
  1235  
  1236  // Config returns the Node's config.
  1237  func (n *Node) Config() *cfg.Config {
  1238  	return n.config
  1239  }
  1240  
  1241  //------------------------------------------------------------------------------
  1242  
  1243  func (n *Node) Listeners() []string {
  1244  	return []string{
  1245  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1246  	}
  1247  }
  1248  
  1249  func (n *Node) IsListening() bool {
  1250  	return n.isListening
  1251  }
  1252  
  1253  // NodeInfo returns the Node's Info from the Switch.
  1254  func (n *Node) NodeInfo() p2p.NodeInfo {
  1255  	return n.nodeInfo
  1256  }
  1257  
  1258  func makeNodeInfo(
  1259  	config *cfg.Config,
  1260  	nodeKey p2p.NodeKey,
  1261  	txIndexer txindex.TxIndexer,
  1262  	genDoc *types.GenesisDoc,
  1263  	state sm.State,
  1264  ) (p2p.NodeInfo, error) {
  1265  	txIndexerStatus := "on"
  1266  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1267  		txIndexerStatus = "off"
  1268  	}
  1269  
  1270  	var bcChannel byte
  1271  	switch config.FastSync.Version {
  1272  	case "v0":
  1273  		bcChannel = bcv0.BlockchainChannel
  1274  	// case "v2":
  1275  	//	bcChannel = bcv2.BlockchainChannel
  1276  	default:
  1277  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1278  	}
  1279  
  1280  	nodeInfo := p2p.DefaultNodeInfo{
  1281  		ProtocolVersion: p2p.NewProtocolVersion(
  1282  			version.P2PProtocol, // global
  1283  			state.Version.Consensus.Block,
  1284  			state.Version.Consensus.App,
  1285  		),
  1286  		DefaultNodeID: nodeKey.ID,
  1287  		Network:       genDoc.ChainID,
  1288  		Version:       version.TMCoreSemVer,
  1289  		Channels: []byte{
  1290  			bcChannel,
  1291  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1292  			mempl.MempoolChannel,
  1293  			evidence.EvidenceChannel,
  1294  			byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel),
  1295  		},
  1296  		Moniker: config.Moniker,
  1297  		Other: p2p.DefaultNodeInfoOther{
  1298  			TxIndex:    txIndexerStatus,
  1299  			RPCAddress: config.RPC.ListenAddress,
  1300  		},
  1301  	}
  1302  
  1303  	if config.P2P.PexReactor {
  1304  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1305  	}
  1306  
  1307  	lAddr := config.P2P.ExternalAddress
  1308  
  1309  	if lAddr == "" {
  1310  		lAddr = config.P2P.ListenAddress
  1311  	}
  1312  
  1313  	nodeInfo.ListenAddr = lAddr
  1314  
  1315  	err := nodeInfo.Validate()
  1316  	return nodeInfo, err
  1317  }
  1318  
  1319  //------------------------------------------------------------------------------
  1320  
  1321  var (
  1322  	genesisDocKey = []byte("genesisDoc")
  1323  )
  1324  
  1325  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1326  // database, or creates one using the given genesisDocProvider. On success this also
  1327  // returns the genesis doc loaded through the given provider.
  1328  func LoadStateFromDBOrGenesisDocProvider(
  1329  	stateDB dbm.DB,
  1330  	genesisDocProvider GenesisDocProvider,
  1331  ) (sm.State, *types.GenesisDoc, error) {
  1332  	// Get genesis doc
  1333  	genDoc, err := loadGenesisDoc(stateDB)
  1334  	if err != nil {
  1335  		genDoc, err = genesisDocProvider()
  1336  		if err != nil {
  1337  			return sm.State{}, nil, err
  1338  		}
  1339  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1340  		// was changed, accidentally or not). Also good for audit trail.
  1341  		if err := saveGenesisDoc(stateDB, genDoc); err != nil {
  1342  			return sm.State{}, nil, err
  1343  		}
  1344  	}
  1345  	stateStore := sm.NewStore(stateDB)
  1346  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1347  	if err != nil {
  1348  		return sm.State{}, nil, err
  1349  	}
  1350  	return state, genDoc, nil
  1351  }
  1352  
  1353  // panics if failed to unmarshal bytes
  1354  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1355  	b, err := db.Get(genesisDocKey)
  1356  	if err != nil {
  1357  		panic(err)
  1358  	}
  1359  	if len(b) == 0 {
  1360  		return nil, errors.New("genesis doc not found")
  1361  	}
  1362  	var genDoc *types.GenesisDoc
  1363  	err = tmjson.Unmarshal(b, &genDoc)
  1364  	if err != nil {
  1365  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1366  	}
  1367  	return genDoc, nil
  1368  }
  1369  
  1370  // panics if failed to marshal the given genesis document
  1371  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
  1372  	b, err := tmjson.Marshal(genDoc)
  1373  	if err != nil {
  1374  		return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err)
  1375  	}
  1376  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1377  		return err
  1378  	}
  1379  
  1380  	return nil
  1381  }
  1382  
  1383  func createAndStartPrivValidatorSocketClient(
  1384  	listenAddr,
  1385  	chainID string,
  1386  	logger log.Logger,
  1387  ) (types.PrivValidator, error) {
  1388  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1389  	if err != nil {
  1390  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1391  	}
  1392  
  1393  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1394  	if err != nil {
  1395  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1396  	}
  1397  
  1398  	// try to get a pubkey from private validate first time
  1399  	_, err = pvsc.GetPubKey()
  1400  	if err != nil {
  1401  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1402  	}
  1403  
  1404  	const (
  1405  		retries = 50 // 50 * 200ms = 10s total
  1406  		timeout = 200 * time.Millisecond
  1407  	)
  1408  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1409  
  1410  	return pvscWithRetries, nil
  1411  }
  1412  
  1413  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1414  // slice of the string s with all leading and trailing Unicode code points
  1415  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1416  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1417  // -1.  also filter out empty strings, only return non-empty strings.
  1418  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1419  	if s == "" {
  1420  		return []string{}
  1421  	}
  1422  
  1423  	spl := strings.Split(s, sep)
  1424  	nonEmptyStrings := make([]string, 0, len(spl))
  1425  	for i := 0; i < len(spl); i++ {
  1426  		element := strings.Trim(spl[i], cutset)
  1427  		if element != "" {
  1428  			nonEmptyStrings = append(nonEmptyStrings, element)
  1429  		}
  1430  	}
  1431  	return nonEmptyStrings
  1432  }