github.com/noirx94/tendermintmp@v0.0.1/test/maverick/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	dbm "github.com/tendermint/tm-db"
    20  
    21  	abci "github.com/tendermint/tendermint/abci/types"
    22  	bcv0 "github.com/tendermint/tendermint/blockchain/v0"
    23  	bcv1 "github.com/tendermint/tendermint/blockchain/v1"
    24  	bcv2 "github.com/tendermint/tendermint/blockchain/v2"
    25  	cfg "github.com/tendermint/tendermint/config"
    26  	"github.com/tendermint/tendermint/consensus"
    27  	"github.com/tendermint/tendermint/crypto"
    28  	"github.com/tendermint/tendermint/evidence"
    29  	tmjson "github.com/tendermint/tendermint/libs/json"
    30  	"github.com/tendermint/tendermint/libs/log"
    31  	tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
    32  	"github.com/tendermint/tendermint/libs/service"
    33  	"github.com/tendermint/tendermint/light"
    34  	mempl "github.com/tendermint/tendermint/mempool"
    35  	"github.com/tendermint/tendermint/p2p"
    36  	"github.com/tendermint/tendermint/p2p/pex"
    37  	"github.com/tendermint/tendermint/privval"
    38  	"github.com/tendermint/tendermint/proxy"
    39  	rpccore "github.com/tendermint/tendermint/rpc/core"
    40  	grpccore "github.com/tendermint/tendermint/rpc/grpc"
    41  	rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
    42  	sm "github.com/tendermint/tendermint/state"
    43  	"github.com/tendermint/tendermint/state/indexer"
    44  	blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
    45  	blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
    46  	"github.com/tendermint/tendermint/state/txindex"
    47  	"github.com/tendermint/tendermint/state/txindex/kv"
    48  	"github.com/tendermint/tendermint/state/txindex/null"
    49  	"github.com/tendermint/tendermint/statesync"
    50  	"github.com/tendermint/tendermint/store"
    51  	cs "github.com/tendermint/tendermint/test/maverick/consensus"
    52  	"github.com/tendermint/tendermint/types"
    53  	tmtime "github.com/tendermint/tendermint/types/time"
    54  	"github.com/tendermint/tendermint/version"
    55  )
    56  
    57  //------------------------------------------------------------------------------
    58  
    59  // ParseMisbehaviors is a util function that converts a comma separated string into
    60  // a map of misbehaviors to be executed by the maverick node
    61  func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
    62  	// check if string is empty in which case we run a normal node
    63  	var misbehaviors = make(map[int64]cs.Misbehavior)
    64  	if str == "" {
    65  		return misbehaviors, nil
    66  	}
    67  	strs := strings.Split(str, ",")
    68  	if len(strs)%2 != 0 {
    69  		return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag")
    70  	}
    71  OUTER_LOOP:
    72  	for i := 0; i < len(strs); i += 2 {
    73  		height, err := strconv.ParseInt(strs[i+1], 10, 64)
    74  		if err != nil {
    75  			return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err)
    76  		}
    77  		for key, misbehavior := range cs.MisbehaviorList {
    78  			if key == strs[i] {
    79  				misbehaviors[height] = misbehavior
    80  				continue OUTER_LOOP
    81  			}
    82  		}
    83  		return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i])
    84  	}
    85  
    86  	return misbehaviors, nil
    87  }
    88  
    89  // DBContext specifies config information for loading a new DB.
    90  type DBContext struct {
    91  	ID     string
    92  	Config *cfg.Config
    93  }
    94  
    95  // DBProvider takes a DBContext and returns an instantiated DB.
    96  type DBProvider func(*DBContext) (dbm.DB, error)
    97  
    98  // DefaultDBProvider returns a database using the DBBackend and DBDir
    99  // specified in the ctx.Config.
   100  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
   101  	dbType := dbm.BackendType(ctx.Config.DBBackend)
   102  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
   103  }
   104  
   105  // GenesisDocProvider returns a GenesisDoc.
   106  // It allows the GenesisDoc to be pulled from sources other than the
   107  // filesystem, for instance from a distributed key-value store cluster.
   108  type GenesisDocProvider func() (*types.GenesisDoc, error)
   109  
   110  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
   111  // the GenesisDoc from the config.GenesisFile() on the filesystem.
   112  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
   113  	return func() (*types.GenesisDoc, error) {
   114  		return types.GenesisDocFromFile(config.GenesisFile())
   115  	}
   116  }
   117  
   118  // Provider takes a config and a logger and returns a ready to go Node.
   119  type Provider func(*cfg.Config, log.Logger) (*Node, error)
   120  
   121  // DefaultNewNode returns a Tendermint node with default settings for the
   122  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
   123  // It implements NodeProvider.
   124  func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) {
   125  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   126  	if err != nil {
   127  		return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err)
   128  	}
   129  
   130  	return NewNode(config,
   131  		LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   132  		nodeKey,
   133  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   134  		DefaultGenesisDocProviderFunc(config),
   135  		DefaultDBProvider,
   136  		DefaultMetricsProvider(config.Instrumentation),
   137  		logger,
   138  		misbehaviors,
   139  	)
   140  
   141  }
   142  
   143  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   144  type MetricsProvider func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   145  
   146  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   147  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   148  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   149  	return func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   150  		if config.Prometheus {
   151  			return consensus.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   152  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   153  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   154  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   155  		}
   156  		return consensus.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   157  	}
   158  }
   159  
   160  // Option sets a parameter for the node.
   161  type Option func(*Node)
   162  
   163  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   164  // See: https://github.com/tendermint/tendermint/issues/4595
   165  type fastSyncReactor interface {
   166  	SwitchToFastSync(sm.State) error
   167  }
   168  
   169  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   170  // the node's Switch.
   171  //
   172  // WARNING: using any name from the below list of the existing reactors will
   173  // result in replacing it with the custom one.
   174  //
   175  //  - MEMPOOL
   176  //  - BLOCKCHAIN
   177  //  - CONSENSUS
   178  //  - EVIDENCE
   179  //  - PEX
   180  //  - STATESYNC
   181  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   182  	return func(n *Node) {
   183  		for name, reactor := range reactors {
   184  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   185  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   186  					"name", name, "existing", existingReactor, "custom", reactor)
   187  				n.sw.RemoveReactor(name, existingReactor)
   188  			}
   189  			n.sw.AddReactor(name, reactor)
   190  		}
   191  	}
   192  }
   193  
   194  func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option {
   195  	return func(n *Node) {
   196  		for name, customReactor := range reactors {
   197  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   198  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   199  					"name", name)
   200  				n.sw.RemoveReactor(name, existingReactor)
   201  			}
   202  			n.sw.AddReactor(name, customReactor(n))
   203  		}
   204  	}
   205  }
   206  
   207  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   208  // build a State object for bootstrapping the node.
   209  // WARNING: this interface is considered unstable and subject to change.
   210  func StateProvider(stateProvider statesync.StateProvider) Option {
   211  	return func(n *Node) {
   212  		n.stateSyncProvider = stateProvider
   213  	}
   214  }
   215  
   216  //------------------------------------------------------------------------------
   217  
   218  // Node is the highest level interface to a full Tendermint node.
   219  // It includes all configuration information and running services.
   220  type Node struct {
   221  	service.BaseService
   222  
   223  	// config
   224  	config        *cfg.Config
   225  	genesisDoc    *types.GenesisDoc   // initial validator set
   226  	privValidator types.PrivValidator // local node's validator key
   227  
   228  	// network
   229  	transport   *p2p.MultiplexTransport
   230  	sw          *p2p.Switch  // p2p connections
   231  	addrBook    pex.AddrBook // known peers
   232  	nodeInfo    p2p.NodeInfo
   233  	nodeKey     *p2p.NodeKey // our node privkey
   234  	isListening bool
   235  
   236  	// services
   237  	eventBus          *types.EventBus // pub/sub for services
   238  	stateStore        sm.Store
   239  	blockStore        *store.BlockStore // store the blockchain to disk
   240  	bcReactor         p2p.Reactor       // for fast-syncing
   241  	mempoolReactor    *mempl.Reactor    // for gossipping transactions
   242  	mempool           mempl.Mempool
   243  	stateSync         bool                    // whether the node should state sync on startup
   244  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   245  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   246  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   247  	consensusState    *cs.State               // latest consensus state
   248  	consensusReactor  *cs.Reactor             // for participating in the consensus
   249  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   250  	evidencePool      *evidence.Pool          // tracking evidence
   251  	proxyApp          proxy.AppConns          // connection to the application
   252  	rpcListeners      []net.Listener          // rpc servers
   253  	txIndexer         txindex.TxIndexer
   254  	blockIndexer      indexer.BlockIndexer
   255  	indexerService    *txindex.IndexerService
   256  	prometheusSrv     *http.Server
   257  }
   258  
   259  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   260  	var blockStoreDB dbm.DB
   261  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   262  	if err != nil {
   263  		return
   264  	}
   265  	blockStore = store.NewBlockStore(blockStoreDB)
   266  
   267  	stateDB, err = dbProvider(&DBContext{"state", config})
   268  	if err != nil {
   269  		return
   270  	}
   271  
   272  	return
   273  }
   274  
   275  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   276  	proxyApp := proxy.NewAppConns(clientCreator)
   277  	proxyApp.SetLogger(logger.With("module", "proxy"))
   278  	if err := proxyApp.Start(); err != nil {
   279  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   280  	}
   281  	return proxyApp, nil
   282  }
   283  
   284  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   285  	eventBus := types.NewEventBus()
   286  	eventBus.SetLogger(logger.With("module", "events"))
   287  	if err := eventBus.Start(); err != nil {
   288  		return nil, err
   289  	}
   290  	return eventBus, nil
   291  }
   292  
   293  func createAndStartIndexerService(
   294  	config *cfg.Config,
   295  	dbProvider DBProvider,
   296  	eventBus *types.EventBus,
   297  	logger log.Logger,
   298  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   299  
   300  	var (
   301  		txIndexer    txindex.TxIndexer
   302  		blockIndexer indexer.BlockIndexer
   303  	)
   304  
   305  	switch config.TxIndex.Indexer {
   306  	case "kv":
   307  		store, err := dbProvider(&DBContext{"tx_index", config})
   308  		if err != nil {
   309  			return nil, nil, nil, err
   310  		}
   311  
   312  		txIndexer = kv.NewTxIndex(store)
   313  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   314  	default:
   315  		txIndexer = &null.TxIndex{}
   316  		blockIndexer = &blockidxnull.BlockerIndexer{}
   317  	}
   318  
   319  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus)
   320  	indexerService.SetLogger(logger.With("module", "txindex"))
   321  
   322  	if err := indexerService.Start(); err != nil {
   323  		return nil, nil, nil, err
   324  	}
   325  
   326  	return indexerService, txIndexer, blockIndexer, nil
   327  }
   328  
   329  func doHandshake(
   330  	stateStore sm.Store,
   331  	state sm.State,
   332  	blockStore sm.BlockStore,
   333  	genDoc *types.GenesisDoc,
   334  	eventBus types.BlockEventPublisher,
   335  	proxyApp proxy.AppConns,
   336  	consensusLogger log.Logger) error {
   337  
   338  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   339  	handshaker.SetLogger(consensusLogger)
   340  	handshaker.SetEventBus(eventBus)
   341  	if err := handshaker.Handshake(proxyApp); err != nil {
   342  		return fmt.Errorf("error during handshake: %v", err)
   343  	}
   344  	return nil
   345  }
   346  
   347  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   348  	// Log the version info.
   349  	logger.Info("Version info",
   350  		"software", version.TMCoreSemVer,
   351  		"block", version.BlockProtocol,
   352  		"p2p", version.P2PProtocol,
   353  	)
   354  
   355  	// If the state and software differ in block version, at least log it.
   356  	if state.Version.Consensus.Block != version.BlockProtocol {
   357  		logger.Info("Software and state have different block protocols",
   358  			"software", version.BlockProtocol,
   359  			"state", state.Version.Consensus.Block,
   360  		)
   361  	}
   362  
   363  	addr := pubKey.Address()
   364  	// Log whether this node is a validator or an observer
   365  	if state.Validators.HasAddress(addr) {
   366  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   367  	} else {
   368  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   369  	}
   370  }
   371  
   372  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   373  	if state.Validators.Size() > 1 {
   374  		return false
   375  	}
   376  	addr, _ := state.Validators.GetByIndex(0)
   377  	return bytes.Equal(pubKey.Address(), addr)
   378  }
   379  
   380  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   381  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   382  
   383  	mempool := mempl.NewCListMempool(
   384  		config.Mempool,
   385  		proxyApp.Mempool(),
   386  		state.LastBlockHeight,
   387  		mempl.WithMetrics(memplMetrics),
   388  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   389  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   390  	)
   391  	mempoolLogger := logger.With("module", "mempool")
   392  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   393  	mempoolReactor.SetLogger(mempoolLogger)
   394  
   395  	if config.Consensus.WaitForTxs() {
   396  		mempool.EnableTxsAvailable()
   397  	}
   398  	return mempoolReactor, mempool
   399  }
   400  
   401  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   402  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   403  
   404  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   405  	if err != nil {
   406  		return nil, nil, err
   407  	}
   408  	evidenceLogger := logger.With("module", "evidence")
   409  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
   410  	if err != nil {
   411  		return nil, nil, err
   412  	}
   413  	evidenceReactor := evidence.NewReactor(evidencePool)
   414  	evidenceReactor.SetLogger(evidenceLogger)
   415  	return evidenceReactor, evidencePool, nil
   416  }
   417  
   418  func createBlockchainReactor(config *cfg.Config,
   419  	state sm.State,
   420  	blockExec *sm.BlockExecutor,
   421  	blockStore *store.BlockStore,
   422  	fastSync bool,
   423  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   424  
   425  	switch config.FastSync.Version {
   426  	case "v0":
   427  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   428  	case "v1":
   429  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   430  	case "v2":
   431  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   432  	default:
   433  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   434  	}
   435  
   436  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   437  	return bcReactor, nil
   438  }
   439  
   440  func createConsensusReactor(config *cfg.Config,
   441  	state sm.State,
   442  	blockExec *sm.BlockExecutor,
   443  	blockStore sm.BlockStore,
   444  	mempool *mempl.CListMempool,
   445  	evidencePool *evidence.Pool,
   446  	privValidator types.PrivValidator,
   447  	csMetrics *consensus.Metrics,
   448  	waitSync bool,
   449  	eventBus *types.EventBus,
   450  	consensusLogger log.Logger,
   451  	misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) {
   452  
   453  	consensusState := cs.NewState(
   454  		config.Consensus,
   455  		state.Copy(),
   456  		blockExec,
   457  		blockStore,
   458  		mempool,
   459  		evidencePool,
   460  		misbehaviors,
   461  		cs.StateMetrics(csMetrics),
   462  	)
   463  	consensusState.SetLogger(consensusLogger)
   464  	if privValidator != nil {
   465  		consensusState.SetPrivValidator(privValidator)
   466  	}
   467  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   468  	consensusReactor.SetLogger(consensusLogger)
   469  	// services which will be publishing and/or subscribing for messages (events)
   470  	// consensusReactor will set it on consensusState and blockExecutor
   471  	consensusReactor.SetEventBus(eventBus)
   472  	return consensusReactor, consensusState
   473  }
   474  
   475  func createTransport(
   476  	config *cfg.Config,
   477  	nodeInfo p2p.NodeInfo,
   478  	nodeKey *p2p.NodeKey,
   479  	proxyApp proxy.AppConns,
   480  ) (
   481  	*p2p.MultiplexTransport,
   482  	[]p2p.PeerFilterFunc,
   483  ) {
   484  	var (
   485  		mConnConfig = p2p.MConnConfig(config.P2P)
   486  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   487  		connFilters = []p2p.ConnFilterFunc{}
   488  		peerFilters = []p2p.PeerFilterFunc{}
   489  	)
   490  
   491  	if !config.P2P.AllowDuplicateIP {
   492  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   493  	}
   494  
   495  	// Filter peers by addr or pubkey with an ABCI query.
   496  	// If the query return code is OK, add peer.
   497  	if config.FilterPeers {
   498  		connFilters = append(
   499  			connFilters,
   500  			// ABCI query for address filtering.
   501  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   502  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   503  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   504  				})
   505  				if err != nil {
   506  					return err
   507  				}
   508  				if res.IsErr() {
   509  					return fmt.Errorf("error querying abci app: %v", res)
   510  				}
   511  
   512  				return nil
   513  			},
   514  		)
   515  
   516  		peerFilters = append(
   517  			peerFilters,
   518  			// ABCI query for ID filtering.
   519  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   520  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   521  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   522  				})
   523  				if err != nil {
   524  					return err
   525  				}
   526  				if res.IsErr() {
   527  					return fmt.Errorf("error querying abci app: %v", res)
   528  				}
   529  
   530  				return nil
   531  			},
   532  		)
   533  	}
   534  
   535  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   536  
   537  	// Limit the number of incoming connections.
   538  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   539  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   540  
   541  	return transport, peerFilters
   542  }
   543  
   544  func createSwitch(config *cfg.Config,
   545  	transport p2p.Transport,
   546  	p2pMetrics *p2p.Metrics,
   547  	peerFilters []p2p.PeerFilterFunc,
   548  	mempoolReactor *mempl.Reactor,
   549  	bcReactor p2p.Reactor,
   550  	stateSyncReactor *statesync.Reactor,
   551  	consensusReactor *cs.Reactor,
   552  	evidenceReactor *evidence.Reactor,
   553  	nodeInfo p2p.NodeInfo,
   554  	nodeKey *p2p.NodeKey,
   555  	p2pLogger log.Logger) *p2p.Switch {
   556  
   557  	sw := p2p.NewSwitch(
   558  		config.P2P,
   559  		transport,
   560  		p2p.WithMetrics(p2pMetrics),
   561  		p2p.SwitchPeerFilters(peerFilters...),
   562  	)
   563  	sw.SetLogger(p2pLogger)
   564  	sw.AddReactor("MEMPOOL", mempoolReactor)
   565  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   566  	sw.AddReactor("CONSENSUS", consensusReactor)
   567  	sw.AddReactor("EVIDENCE", evidenceReactor)
   568  	sw.AddReactor("STATESYNC", stateSyncReactor)
   569  
   570  	sw.SetNodeInfo(nodeInfo)
   571  	sw.SetNodeKey(nodeKey)
   572  
   573  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   574  	return sw
   575  }
   576  
   577  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   578  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   579  
   580  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   581  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   582  
   583  	// Add ourselves to addrbook to prevent dialing ourselves
   584  	if config.P2P.ExternalAddress != "" {
   585  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   586  		if err != nil {
   587  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   588  		}
   589  		addrBook.AddOurAddress(addr)
   590  	}
   591  	if config.P2P.ListenAddress != "" {
   592  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   593  		if err != nil {
   594  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   595  		}
   596  		addrBook.AddOurAddress(addr)
   597  	}
   598  
   599  	sw.SetAddrBook(addrBook)
   600  
   601  	return addrBook, nil
   602  }
   603  
   604  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   605  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   606  
   607  	// TODO persistent peers ? so we can have their DNS addrs saved
   608  	pexReactor := pex.NewReactor(addrBook,
   609  		&pex.ReactorConfig{
   610  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   611  			SeedMode: config.P2P.SeedMode,
   612  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   613  			// blocks assuming 10s blocks ~ 28 hours.
   614  			// TODO (melekes): make it dynamic based on the actual block latencies
   615  			// from the live network.
   616  			// https://github.com/tendermint/tendermint/issues/3523
   617  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   618  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   619  		})
   620  	pexReactor.SetLogger(logger.With("module", "pex"))
   621  	sw.AddReactor("PEX", pexReactor)
   622  	return pexReactor
   623  }
   624  
   625  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   626  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   627  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   628  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
   629  	ssR.Logger.Info("Starting state sync")
   630  
   631  	if stateProvider == nil {
   632  		var err error
   633  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   634  		defer cancel()
   635  		stateProvider, err = statesync.NewLightClientStateProvider(
   636  			ctx,
   637  			state.ChainID, state.Version, state.InitialHeight,
   638  			config.RPCServers, light.TrustOptions{
   639  				Period: config.TrustPeriod,
   640  				Height: config.TrustHeight,
   641  				Hash:   config.TrustHashBytes(),
   642  			}, ssR.Logger.With("module", "light"))
   643  		if err != nil {
   644  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   645  		}
   646  	}
   647  
   648  	go func() {
   649  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   650  		if err != nil {
   651  			ssR.Logger.Error("State sync failed", "err", err)
   652  			return
   653  		}
   654  		err = stateStore.Bootstrap(state)
   655  		if err != nil {
   656  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   657  			return
   658  		}
   659  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   660  		if err != nil {
   661  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   662  			return
   663  		}
   664  
   665  		if fastSync {
   666  			// FIXME Very ugly to have these metrics bleed through here.
   667  			conR.Metrics.StateSyncing.Set(0)
   668  			conR.Metrics.FastSyncing.Set(1)
   669  			err = bcR.SwitchToFastSync(state)
   670  			if err != nil {
   671  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   672  				return
   673  			}
   674  		} else {
   675  			conR.SwitchToConsensus(state, true)
   676  		}
   677  	}()
   678  	return nil
   679  }
   680  
   681  // NewNode returns a new, ready to go, Tendermint Node.
   682  func NewNode(config *cfg.Config,
   683  	privValidator types.PrivValidator,
   684  	nodeKey *p2p.NodeKey,
   685  	clientCreator proxy.ClientCreator,
   686  	genesisDocProvider GenesisDocProvider,
   687  	dbProvider DBProvider,
   688  	metricsProvider MetricsProvider,
   689  	logger log.Logger,
   690  	misbehaviors map[int64]cs.Misbehavior,
   691  	options ...Option) (*Node, error) {
   692  
   693  	blockStore, stateDB, err := initDBs(config, dbProvider)
   694  	if err != nil {
   695  		return nil, err
   696  	}
   697  
   698  	stateStore := sm.NewStore(stateDB)
   699  
   700  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   701  	if err != nil {
   702  		return nil, err
   703  	}
   704  
   705  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   706  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   707  	if err != nil {
   708  		return nil, err
   709  	}
   710  
   711  	// EventBus and IndexerService must be started before the handshake because
   712  	// we might need to index the txs of the replayed block as this might not have happened
   713  	// when the node stopped last time (i.e. the node stopped after it saved the block
   714  	// but before it indexed the txs, or, endblocker panicked)
   715  	eventBus, err := createAndStartEventBus(logger)
   716  	if err != nil {
   717  		return nil, err
   718  	}
   719  
   720  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   721  	if err != nil {
   722  		return nil, err
   723  	}
   724  
   725  	// If an address is provided, listen on the socket for a connection from an
   726  	// external signing process.
   727  	if config.PrivValidatorListenAddr != "" {
   728  		// FIXME: we should start services inside OnStart
   729  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   730  		if err != nil {
   731  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   732  		}
   733  	}
   734  
   735  	pubKey, err := privValidator.GetPubKey()
   736  	if err != nil {
   737  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   738  	}
   739  
   740  	// Determine whether we should do state and/or fast sync.
   741  	// We don't fast-sync when the only validator is us.
   742  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   743  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   744  	if stateSync && state.LastBlockHeight > 0 {
   745  		logger.Info("Found local state with non-zero height, skipping state sync")
   746  		stateSync = false
   747  	}
   748  
   749  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   750  	// and replays any blocks as necessary to sync tendermint with the app.
   751  	consensusLogger := logger.With("module", "consensus")
   752  	if !stateSync {
   753  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   754  			return nil, err
   755  		}
   756  
   757  		// Reload the state. It will have the Version.Consensus.App set by the
   758  		// Handshake, and may have other modifications as well (ie. depending on
   759  		// what happened during block replay).
   760  		state, err = stateStore.Load()
   761  		if err != nil {
   762  			return nil, fmt.Errorf("cannot load state: %w", err)
   763  		}
   764  	}
   765  
   766  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   767  
   768  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   769  
   770  	// Make MempoolReactor
   771  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   772  
   773  	// Make Evidence Reactor
   774  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   775  	if err != nil {
   776  		return nil, err
   777  	}
   778  
   779  	// make block executor for consensus and blockchain reactors to execute blocks
   780  	blockExec := sm.NewBlockExecutor(
   781  		stateStore,
   782  		logger.With("module", "state"),
   783  		proxyApp.Consensus(),
   784  		mempool,
   785  		evidencePool,
   786  		sm.BlockExecutorWithMetrics(smMetrics),
   787  	)
   788  
   789  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   790  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   791  	if err != nil {
   792  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   793  	}
   794  
   795  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   796  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   797  	if stateSync {
   798  		csMetrics.StateSyncing.Set(1)
   799  	} else if fastSync {
   800  		csMetrics.FastSyncing.Set(1)
   801  	}
   802  
   803  	logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors)
   804  	consensusReactor, consensusState := createConsensusReactor(
   805  		config, state, blockExec, blockStore, mempool, evidencePool,
   806  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors)
   807  
   808  	// Set up state sync reactor, and schedule a sync if requested.
   809  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   810  	// we should clean this whole thing up. See:
   811  	// https://github.com/tendermint/tendermint/issues/4644
   812  	stateSyncReactor := statesync.NewReactor(
   813  		*config.StateSync,
   814  		proxyApp.Snapshot(),
   815  		proxyApp.Query(),
   816  		config.StateSync.TempDir,
   817  	)
   818  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   819  
   820  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   821  	if err != nil {
   822  		return nil, err
   823  	}
   824  
   825  	// Setup Transport.
   826  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   827  
   828  	// Setup Switch.
   829  	p2pLogger := logger.With("module", "p2p")
   830  	sw := createSwitch(
   831  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   832  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   833  	)
   834  
   835  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   836  	if err != nil {
   837  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   838  	}
   839  
   840  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   841  	if err != nil {
   842  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   843  	}
   844  
   845  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   846  	if err != nil {
   847  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   848  	}
   849  
   850  	// Optionally, start the pex reactor
   851  	//
   852  	// TODO:
   853  	//
   854  	// We need to set Seeds and PersistentPeers on the switch,
   855  	// since it needs to be able to use these (and their DNS names)
   856  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   857  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   858  	// somewhere that we can return with net_info.
   859  	//
   860  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   861  	// Note we currently use the addrBook regardless at least for AddOurAddress
   862  	var pexReactor *pex.Reactor
   863  	if config.P2P.PexReactor {
   864  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   865  	}
   866  
   867  	if config.RPC.PprofListenAddress != "" {
   868  		go func() {
   869  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   870  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   871  		}()
   872  	}
   873  
   874  	node := &Node{
   875  		config:        config,
   876  		genesisDoc:    genDoc,
   877  		privValidator: privValidator,
   878  
   879  		transport: transport,
   880  		sw:        sw,
   881  		addrBook:  addrBook,
   882  		nodeInfo:  nodeInfo,
   883  		nodeKey:   nodeKey,
   884  
   885  		stateStore:       stateStore,
   886  		blockStore:       blockStore,
   887  		bcReactor:        bcReactor,
   888  		mempoolReactor:   mempoolReactor,
   889  		mempool:          mempool,
   890  		consensusState:   consensusState,
   891  		consensusReactor: consensusReactor,
   892  		stateSyncReactor: stateSyncReactor,
   893  		stateSync:        stateSync,
   894  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   895  		pexReactor:       pexReactor,
   896  		evidencePool:     evidencePool,
   897  		proxyApp:         proxyApp,
   898  		txIndexer:        txIndexer,
   899  		indexerService:   indexerService,
   900  		blockIndexer:     blockIndexer,
   901  		eventBus:         eventBus,
   902  	}
   903  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   904  
   905  	for _, option := range options {
   906  		option(node)
   907  	}
   908  
   909  	return node, nil
   910  }
   911  
   912  // OnStart starts the Node. It implements service.Service.
   913  func (n *Node) OnStart() error {
   914  	now := tmtime.Now()
   915  	genTime := n.genesisDoc.GenesisTime
   916  	if genTime.After(now) {
   917  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   918  		time.Sleep(genTime.Sub(now))
   919  	}
   920  
   921  	// Add private IDs to addrbook to block those peers being added
   922  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   923  
   924  	// Start the RPC server before the P2P server
   925  	// so we can eg. receive txs for the first block
   926  	if n.config.RPC.ListenAddress != "" {
   927  		listeners, err := n.startRPC()
   928  		if err != nil {
   929  			return err
   930  		}
   931  		n.rpcListeners = listeners
   932  	}
   933  
   934  	if n.config.Instrumentation.Prometheus &&
   935  		n.config.Instrumentation.PrometheusListenAddr != "" {
   936  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   937  	}
   938  
   939  	// Start the transport.
   940  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   941  	if err != nil {
   942  		return err
   943  	}
   944  	if err := n.transport.Listen(*addr); err != nil {
   945  		return err
   946  	}
   947  
   948  	n.isListening = true
   949  
   950  	if n.config.Mempool.WalEnabled() {
   951  		err = n.mempool.InitWAL()
   952  		if err != nil {
   953  			return fmt.Errorf("init mempool WAL: %w", err)
   954  		}
   955  	}
   956  
   957  	// Start the switch (the P2P server).
   958  	err = n.sw.Start()
   959  	if err != nil {
   960  		return err
   961  	}
   962  
   963  	// Always connect to persistent peers
   964  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   965  	if err != nil {
   966  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   967  	}
   968  
   969  	// Run state sync
   970  	if n.stateSync {
   971  		bcR, ok := n.bcReactor.(fastSyncReactor)
   972  		if !ok {
   973  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   974  		}
   975  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   976  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   977  		if err != nil {
   978  			return fmt.Errorf("failed to start state sync: %w", err)
   979  		}
   980  	}
   981  
   982  	return nil
   983  }
   984  
   985  // OnStop stops the Node. It implements service.Service.
   986  func (n *Node) OnStop() {
   987  	n.BaseService.OnStop()
   988  
   989  	n.Logger.Info("Stopping Node")
   990  
   991  	// first stop the non-reactor services
   992  	if err := n.eventBus.Stop(); err != nil {
   993  		n.Logger.Error("Error closing eventBus", "err", err)
   994  	}
   995  	if err := n.indexerService.Stop(); err != nil {
   996  		n.Logger.Error("Error closing indexerService", "err", err)
   997  	}
   998  
   999  	// now stop the reactors
  1000  	if err := n.sw.Stop(); err != nil {
  1001  		n.Logger.Error("Error closing switch", "err", err)
  1002  	}
  1003  
  1004  	// stop mempool WAL
  1005  	if n.config.Mempool.WalEnabled() {
  1006  		n.mempool.CloseWAL()
  1007  	}
  1008  
  1009  	if err := n.transport.Close(); err != nil {
  1010  		n.Logger.Error("Error closing transport", "err", err)
  1011  	}
  1012  
  1013  	n.isListening = false
  1014  
  1015  	// finally stop the listeners / external services
  1016  	for _, l := range n.rpcListeners {
  1017  		n.Logger.Info("Closing rpc listener", "listener", l)
  1018  		if err := l.Close(); err != nil {
  1019  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1020  		}
  1021  	}
  1022  
  1023  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1024  		if err := pvsc.Stop(); err != nil {
  1025  			n.Logger.Error("Error closing private validator", "err", err)
  1026  		}
  1027  	}
  1028  
  1029  	if n.prometheusSrv != nil {
  1030  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1031  			// Error from closing listeners, or context timeout:
  1032  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1033  		}
  1034  	}
  1035  }
  1036  
  1037  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1038  func (n *Node) ConfigureRPC() error {
  1039  	pubKey, err := n.privValidator.GetPubKey()
  1040  	if err != nil {
  1041  		return fmt.Errorf("can't get pubkey: %w", err)
  1042  	}
  1043  	rpccore.SetEnvironment(&rpccore.Environment{
  1044  		ProxyAppQuery:   n.proxyApp.Query(),
  1045  		ProxyAppMempool: n.proxyApp.Mempool(),
  1046  
  1047  		StateStore:     n.stateStore,
  1048  		BlockStore:     n.blockStore,
  1049  		EvidencePool:   n.evidencePool,
  1050  		ConsensusState: n.consensusState,
  1051  		P2PPeers:       n.sw,
  1052  		P2PTransport:   n,
  1053  
  1054  		PubKey:           pubKey,
  1055  		GenDoc:           n.genesisDoc,
  1056  		TxIndexer:        n.txIndexer,
  1057  		BlockIndexer:     n.blockIndexer,
  1058  		ConsensusReactor: &consensus.Reactor{},
  1059  		EventBus:         n.eventBus,
  1060  		Mempool:          n.mempool,
  1061  
  1062  		Logger: n.Logger.With("module", "rpc"),
  1063  
  1064  		Config: *n.config.RPC,
  1065  	})
  1066  	return nil
  1067  }
  1068  
  1069  func (n *Node) startRPC() ([]net.Listener, error) {
  1070  	err := n.ConfigureRPC()
  1071  	if err != nil {
  1072  		return nil, err
  1073  	}
  1074  
  1075  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1076  
  1077  	if n.config.RPC.Unsafe {
  1078  		rpccore.AddUnsafeRoutes()
  1079  	}
  1080  
  1081  	config := rpcserver.DefaultConfig()
  1082  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1083  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1084  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1085  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1086  	// TimeoutBroadcastTxCommit.
  1087  	// See https://github.com/tendermint/tendermint/issues/3435
  1088  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1089  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1090  	}
  1091  
  1092  	// we may expose the rpc over both a unix and tcp socket
  1093  	listeners := make([]net.Listener, len(listenAddrs))
  1094  	for i, listenAddr := range listenAddrs {
  1095  		mux := http.NewServeMux()
  1096  		rpcLogger := n.Logger.With("module", "rpc-server")
  1097  		wmLogger := rpcLogger.With("protocol", "websocket")
  1098  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1099  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1100  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1101  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1102  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1103  				}
  1104  			}),
  1105  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1106  		)
  1107  		wm.SetLogger(wmLogger)
  1108  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1109  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1110  		listener, err := rpcserver.Listen(
  1111  			listenAddr,
  1112  			config,
  1113  		)
  1114  		if err != nil {
  1115  			return nil, err
  1116  		}
  1117  
  1118  		var rootHandler http.Handler = mux
  1119  		if n.config.RPC.IsCorsEnabled() {
  1120  			corsMiddleware := cors.New(cors.Options{
  1121  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1122  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1123  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1124  			})
  1125  			rootHandler = corsMiddleware.Handler(mux)
  1126  		}
  1127  		if n.config.RPC.IsTLSEnabled() {
  1128  			go func() {
  1129  				if err := rpcserver.ServeTLS(
  1130  					listener,
  1131  					rootHandler,
  1132  					n.config.RPC.CertFile(),
  1133  					n.config.RPC.KeyFile(),
  1134  					rpcLogger,
  1135  					config,
  1136  				); err != nil {
  1137  					n.Logger.Error("Error serving server with TLS", "err", err)
  1138  				}
  1139  			}()
  1140  		} else {
  1141  			go func() {
  1142  				if err := rpcserver.Serve(
  1143  					listener,
  1144  					rootHandler,
  1145  					rpcLogger,
  1146  					config,
  1147  				); err != nil {
  1148  					n.Logger.Error("Error serving server", "err", err)
  1149  				}
  1150  			}()
  1151  		}
  1152  
  1153  		listeners[i] = listener
  1154  	}
  1155  
  1156  	// we expose a simplified api over grpc for convenience to app devs
  1157  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1158  	if grpcListenAddr != "" {
  1159  		config := rpcserver.DefaultConfig()
  1160  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1161  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1162  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1163  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1164  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1165  		// TimeoutBroadcastTxCommit.
  1166  		// See https://github.com/tendermint/tendermint/issues/3435
  1167  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1168  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1169  		}
  1170  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1171  		if err != nil {
  1172  			return nil, err
  1173  		}
  1174  		go func() {
  1175  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1176  				n.Logger.Error("Error starting gRPC server", "err", err)
  1177  			}
  1178  		}()
  1179  		listeners = append(listeners, listener)
  1180  	}
  1181  
  1182  	return listeners, nil
  1183  }
  1184  
  1185  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1186  // collectors on addr.
  1187  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1188  	srv := &http.Server{
  1189  		Addr: addr,
  1190  		Handler: promhttp.InstrumentMetricHandler(
  1191  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1192  				prometheus.DefaultGatherer,
  1193  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1194  			),
  1195  		),
  1196  	}
  1197  	go func() {
  1198  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1199  			// Error starting or closing listener:
  1200  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1201  		}
  1202  	}()
  1203  	return srv
  1204  }
  1205  
  1206  // Switch returns the Node's Switch.
  1207  func (n *Node) Switch() *p2p.Switch {
  1208  	return n.sw
  1209  }
  1210  
  1211  // BlockStore returns the Node's BlockStore.
  1212  func (n *Node) BlockStore() *store.BlockStore {
  1213  	return n.blockStore
  1214  }
  1215  
  1216  // ConsensusState returns the Node's ConsensusState.
  1217  func (n *Node) ConsensusState() *cs.State {
  1218  	return n.consensusState
  1219  }
  1220  
  1221  // ConsensusReactor returns the Node's ConsensusReactor.
  1222  func (n *Node) ConsensusReactor() *cs.Reactor {
  1223  	return n.consensusReactor
  1224  }
  1225  
  1226  // MempoolReactor returns the Node's mempool reactor.
  1227  func (n *Node) MempoolReactor() *mempl.Reactor {
  1228  	return n.mempoolReactor
  1229  }
  1230  
  1231  // Mempool returns the Node's mempool.
  1232  func (n *Node) Mempool() mempl.Mempool {
  1233  	return n.mempool
  1234  }
  1235  
  1236  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1237  func (n *Node) PEXReactor() *pex.Reactor {
  1238  	return n.pexReactor
  1239  }
  1240  
  1241  // EvidencePool returns the Node's EvidencePool.
  1242  func (n *Node) EvidencePool() *evidence.Pool {
  1243  	return n.evidencePool
  1244  }
  1245  
  1246  // EventBus returns the Node's EventBus.
  1247  func (n *Node) EventBus() *types.EventBus {
  1248  	return n.eventBus
  1249  }
  1250  
  1251  // PrivValidator returns the Node's PrivValidator.
  1252  // XXX: for convenience only!
  1253  func (n *Node) PrivValidator() types.PrivValidator {
  1254  	return n.privValidator
  1255  }
  1256  
  1257  // GenesisDoc returns the Node's GenesisDoc.
  1258  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1259  	return n.genesisDoc
  1260  }
  1261  
  1262  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1263  func (n *Node) ProxyApp() proxy.AppConns {
  1264  	return n.proxyApp
  1265  }
  1266  
  1267  // Config returns the Node's config.
  1268  func (n *Node) Config() *cfg.Config {
  1269  	return n.config
  1270  }
  1271  
  1272  //------------------------------------------------------------------------------
  1273  
  1274  func (n *Node) Listeners() []string {
  1275  	return []string{
  1276  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1277  	}
  1278  }
  1279  
  1280  func (n *Node) IsListening() bool {
  1281  	return n.isListening
  1282  }
  1283  
  1284  // NodeInfo returns the Node's Info from the Switch.
  1285  func (n *Node) NodeInfo() p2p.NodeInfo {
  1286  	return n.nodeInfo
  1287  }
  1288  
  1289  func makeNodeInfo(
  1290  	config *cfg.Config,
  1291  	nodeKey *p2p.NodeKey,
  1292  	txIndexer txindex.TxIndexer,
  1293  	genDoc *types.GenesisDoc,
  1294  	state sm.State,
  1295  ) (p2p.NodeInfo, error) {
  1296  	txIndexerStatus := "on"
  1297  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1298  		txIndexerStatus = "off"
  1299  	}
  1300  
  1301  	var bcChannel byte
  1302  	switch config.FastSync.Version {
  1303  	case "v0":
  1304  		bcChannel = bcv0.BlockchainChannel
  1305  	case "v1":
  1306  		bcChannel = bcv1.BlockchainChannel
  1307  	case "v2":
  1308  		bcChannel = bcv2.BlockchainChannel
  1309  	default:
  1310  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1311  	}
  1312  
  1313  	nodeInfo := p2p.DefaultNodeInfo{
  1314  		ProtocolVersion: p2p.NewProtocolVersion(
  1315  			version.P2PProtocol, // global
  1316  			state.Version.Consensus.Block,
  1317  			state.Version.Consensus.App,
  1318  		),
  1319  		DefaultNodeID: nodeKey.ID(),
  1320  		Network:       genDoc.ChainID,
  1321  		Version:       version.TMCoreSemVer,
  1322  		Channels: []byte{
  1323  			bcChannel,
  1324  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1325  			mempl.MempoolChannel,
  1326  			evidence.EvidenceChannel,
  1327  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1328  		},
  1329  		Moniker: config.Moniker,
  1330  		Other: p2p.DefaultNodeInfoOther{
  1331  			TxIndex:    txIndexerStatus,
  1332  			RPCAddress: config.RPC.ListenAddress,
  1333  		},
  1334  	}
  1335  
  1336  	if config.P2P.PexReactor {
  1337  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1338  	}
  1339  
  1340  	lAddr := config.P2P.ExternalAddress
  1341  
  1342  	if lAddr == "" {
  1343  		lAddr = config.P2P.ListenAddress
  1344  	}
  1345  
  1346  	nodeInfo.ListenAddr = lAddr
  1347  
  1348  	err := nodeInfo.Validate()
  1349  	return nodeInfo, err
  1350  }
  1351  
  1352  //------------------------------------------------------------------------------
  1353  
  1354  var (
  1355  	genesisDocKey = []byte("genesisDoc")
  1356  )
  1357  
  1358  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1359  // database, or creates one using the given genesisDocProvider and persists the
  1360  // result to the database. On success this also returns the genesis doc loaded
  1361  // through the given provider.
  1362  func LoadStateFromDBOrGenesisDocProvider(
  1363  	stateDB dbm.DB,
  1364  	genesisDocProvider GenesisDocProvider,
  1365  ) (sm.State, *types.GenesisDoc, error) {
  1366  	// Get genesis doc
  1367  	genDoc, err := loadGenesisDoc(stateDB)
  1368  	if err != nil {
  1369  		genDoc, err = genesisDocProvider()
  1370  		if err != nil {
  1371  			return sm.State{}, nil, err
  1372  		}
  1373  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1374  		// was changed, accidentally or not). Also good for audit trail.
  1375  		saveGenesisDoc(stateDB, genDoc)
  1376  	}
  1377  	stateStore := sm.NewStore(stateDB)
  1378  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1379  	if err != nil {
  1380  		return sm.State{}, nil, err
  1381  	}
  1382  	return state, genDoc, nil
  1383  }
  1384  
  1385  // panics if failed to unmarshal bytes
  1386  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1387  	b, err := db.Get(genesisDocKey)
  1388  	if err != nil {
  1389  		panic(err)
  1390  	}
  1391  	if len(b) == 0 {
  1392  		return nil, errors.New("genesis doc not found")
  1393  	}
  1394  	var genDoc *types.GenesisDoc
  1395  	err = tmjson.Unmarshal(b, &genDoc)
  1396  	if err != nil {
  1397  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1398  	}
  1399  	return genDoc, nil
  1400  }
  1401  
  1402  // panics if failed to marshal the given genesis document
  1403  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1404  	b, err := tmjson.Marshal(genDoc)
  1405  	if err != nil {
  1406  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1407  	}
  1408  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1409  		panic(fmt.Sprintf("Failed to save genesis doc: %v", err))
  1410  	}
  1411  }
  1412  
  1413  func createAndStartPrivValidatorSocketClient(
  1414  	listenAddr,
  1415  	chainID string,
  1416  	logger log.Logger,
  1417  ) (types.PrivValidator, error) {
  1418  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1419  	if err != nil {
  1420  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1421  	}
  1422  
  1423  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1424  	if err != nil {
  1425  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1426  	}
  1427  
  1428  	// try to get a pubkey from private validate first time
  1429  	_, err = pvsc.GetPubKey()
  1430  	if err != nil {
  1431  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1432  	}
  1433  
  1434  	const (
  1435  		retries = 50 // 50 * 100ms = 5s total
  1436  		timeout = 100 * time.Millisecond
  1437  	)
  1438  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1439  
  1440  	return pvscWithRetries, nil
  1441  }
  1442  
  1443  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1444  // slice of the string s with all leading and trailing Unicode code points
  1445  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1446  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1447  // -1.  also filter out empty strings, only return non-empty strings.
  1448  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1449  	if s == "" {
  1450  		return []string{}
  1451  	}
  1452  
  1453  	spl := strings.Split(s, sep)
  1454  	nonEmptyStrings := make([]string, 0, len(spl))
  1455  	for i := 0; i < len(spl); i++ {
  1456  		element := strings.Trim(spl[i], cutset)
  1457  		if element != "" {
  1458  			nonEmptyStrings = append(nonEmptyStrings, element)
  1459  		}
  1460  	}
  1461  	return nonEmptyStrings
  1462  }