github.com/soomindae/tendermint@v0.0.5-0.20210528140126-84a0c70c8162/test/maverick/node/node.go (about)

     1  package node
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"net"
     9  	"net/http"
    10  	_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/prometheus/client_golang/prometheus"
    16  	"github.com/prometheus/client_golang/prometheus/promhttp"
    17  	"github.com/rs/cors"
    18  
    19  	dbm "github.com/soomindae/tm-db"
    20  
    21  	abci "github.com/soomindae/tendermint/abci/types"
    22  	bcv0 "github.com/soomindae/tendermint/blockchain/v0"
    23  	bcv1 "github.com/soomindae/tendermint/blockchain/v1"
    24  	bcv2 "github.com/soomindae/tendermint/blockchain/v2"
    25  	cfg "github.com/soomindae/tendermint/config"
    26  	"github.com/soomindae/tendermint/consensus"
    27  	"github.com/soomindae/tendermint/crypto"
    28  	"github.com/soomindae/tendermint/evidence"
    29  	tmjson "github.com/soomindae/tendermint/libs/json"
    30  	"github.com/soomindae/tendermint/libs/log"
    31  	tmpubsub "github.com/soomindae/tendermint/libs/pubsub"
    32  	"github.com/soomindae/tendermint/libs/service"
    33  	"github.com/soomindae/tendermint/light"
    34  	mempl "github.com/soomindae/tendermint/mempool"
    35  	"github.com/soomindae/tendermint/p2p"
    36  	"github.com/soomindae/tendermint/p2p/pex"
    37  	"github.com/soomindae/tendermint/privval"
    38  	"github.com/soomindae/tendermint/proxy"
    39  	rpccore "github.com/soomindae/tendermint/rpc/core"
    40  	grpccore "github.com/soomindae/tendermint/rpc/grpc"
    41  	rpcserver "github.com/soomindae/tendermint/rpc/jsonrpc/server"
    42  	sm "github.com/soomindae/tendermint/state"
    43  	"github.com/soomindae/tendermint/state/indexer"
    44  	blockidxkv "github.com/soomindae/tendermint/state/indexer/block/kv"
    45  	blockidxnull "github.com/soomindae/tendermint/state/indexer/block/null"
    46  	"github.com/soomindae/tendermint/state/txindex"
    47  	"github.com/soomindae/tendermint/state/txindex/kv"
    48  	"github.com/soomindae/tendermint/state/txindex/null"
    49  	"github.com/soomindae/tendermint/statesync"
    50  	"github.com/soomindae/tendermint/store"
    51  	cs "github.com/soomindae/tendermint/test/maverick/consensus"
    52  	"github.com/soomindae/tendermint/types"
    53  	tmtime "github.com/soomindae/tendermint/types/time"
    54  	"github.com/soomindae/tendermint/version"
    55  )
    56  
    57  //------------------------------------------------------------------------------
    58  
    59  // ParseMisbehaviors is a util function that converts a comma separated string into
    60  // a map of misbehaviors to be executed by the maverick node
    61  func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) {
    62  	// check if string is empty in which case we run a normal node
    63  	var misbehaviors = make(map[int64]cs.Misbehavior)
    64  	if str == "" {
    65  		return misbehaviors, nil
    66  	}
    67  	strs := strings.Split(str, ",")
    68  	if len(strs)%2 != 0 {
    69  		return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag")
    70  	}
    71  OUTER_LOOP:
    72  	for i := 0; i < len(strs); i += 2 {
    73  		height, err := strconv.ParseInt(strs[i+1], 10, 64)
    74  		if err != nil {
    75  			return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err)
    76  		}
    77  		for key, misbehavior := range cs.MisbehaviorList {
    78  			if key == strs[i] {
    79  				misbehaviors[height] = misbehavior
    80  				continue OUTER_LOOP
    81  			}
    82  		}
    83  		return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i])
    84  	}
    85  
    86  	return misbehaviors, nil
    87  }
    88  
    89  // DBContext specifies config information for loading a new DB.
    90  type DBContext struct {
    91  	ID     string
    92  	Config *cfg.Config
    93  }
    94  
    95  // DBProvider takes a DBContext and returns an instantiated DB.
    96  type DBProvider func(*DBContext) (dbm.DB, error)
    97  
    98  // DefaultDBProvider returns a database using the DBBackend and DBDir
    99  // specified in the ctx.Config.
   100  func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
   101  	dbType := dbm.BackendType(ctx.Config.DBBackend)
   102  	return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
   103  }
   104  
   105  // GenesisDocProvider returns a GenesisDoc.
   106  // It allows the GenesisDoc to be pulled from sources other than the
   107  // filesystem, for instance from a distributed key-value store cluster.
   108  type GenesisDocProvider func() (*types.GenesisDoc, error)
   109  
   110  // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
   111  // the GenesisDoc from the config.GenesisFile() on the filesystem.
   112  func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
   113  	return func() (*types.GenesisDoc, error) {
   114  		return types.GenesisDocFromFile(config.GenesisFile())
   115  	}
   116  }
   117  
   118  // Provider takes a config and a logger and returns a ready to go Node.
   119  type Provider func(*cfg.Config, log.Logger) (*Node, error)
   120  
   121  // DefaultNewNode returns a Tendermint node with default settings for the
   122  // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
   123  // It implements NodeProvider.
   124  func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) {
   125  	nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
   126  	if err != nil {
   127  		return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err)
   128  	}
   129  
   130  	return NewNode(config,
   131  		LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
   132  		nodeKey,
   133  		proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
   134  		DefaultGenesisDocProviderFunc(config),
   135  		DefaultDBProvider,
   136  		DefaultMetricsProvider(config.Instrumentation),
   137  		logger,
   138  		misbehaviors,
   139  	)
   140  
   141  }
   142  
   143  // MetricsProvider returns a consensus, p2p and mempool Metrics.
   144  type MetricsProvider func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
   145  
   146  // DefaultMetricsProvider returns Metrics build using Prometheus client library
   147  // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
   148  func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
   149  	return func(chainID string) (*consensus.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
   150  		if config.Prometheus {
   151  			return consensus.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   152  				p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   153  				mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
   154  				sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
   155  		}
   156  		return consensus.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
   157  	}
   158  }
   159  
   160  // Option sets a parameter for the node.
   161  type Option func(*Node)
   162  
   163  // Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors.
   164  // See: https://github.com/soomindae/tendermint/issues/4595
   165  type fastSyncReactor interface {
   166  	SwitchToFastSync(sm.State) error
   167  }
   168  
   169  // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to
   170  // the node's Switch.
   171  //
   172  // WARNING: using any name from the below list of the existing reactors will
   173  // result in replacing it with the custom one.
   174  //
   175  //  - MEMPOOL
   176  //  - BLOCKCHAIN
   177  //  - CONSENSUS
   178  //  - EVIDENCE
   179  //  - PEX
   180  //  - STATESYNC
   181  func CustomReactors(reactors map[string]p2p.Reactor) Option {
   182  	return func(n *Node) {
   183  		for name, reactor := range reactors {
   184  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   185  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   186  					"name", name, "existing", existingReactor, "custom", reactor)
   187  				n.sw.RemoveReactor(name, existingReactor)
   188  			}
   189  			n.sw.AddReactor(name, reactor)
   190  		}
   191  	}
   192  }
   193  
   194  func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option {
   195  	return func(n *Node) {
   196  		for name, customReactor := range reactors {
   197  			if existingReactor := n.sw.Reactor(name); existingReactor != nil {
   198  				n.sw.Logger.Info("Replacing existing reactor with a custom one",
   199  					"name", name)
   200  				n.sw.RemoveReactor(name, existingReactor)
   201  			}
   202  			n.sw.AddReactor(name, customReactor(n))
   203  		}
   204  	}
   205  }
   206  
   207  // StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and
   208  // build a State object for bootstrapping the node.
   209  // WARNING: this interface is considered unstable and subject to change.
   210  func StateProvider(stateProvider statesync.StateProvider) Option {
   211  	return func(n *Node) {
   212  		n.stateSyncProvider = stateProvider
   213  	}
   214  }
   215  
   216  //------------------------------------------------------------------------------
   217  
   218  // Node is the highest level interface to a full Tendermint node.
   219  // It includes all configuration information and running services.
   220  type Node struct {
   221  	service.BaseService
   222  
   223  	// config
   224  	config        *cfg.Config
   225  	genesisDoc    *types.GenesisDoc   // initial validator set
   226  	privValidator types.PrivValidator // local node's validator key
   227  
   228  	// network
   229  	transport   *p2p.MultiplexTransport
   230  	sw          *p2p.Switch  // p2p connections
   231  	addrBook    pex.AddrBook // known peers
   232  	nodeInfo    p2p.NodeInfo
   233  	nodeKey     *p2p.NodeKey // our node privkey
   234  	isListening bool
   235  
   236  	// services
   237  	eventBus          *types.EventBus // pub/sub for services
   238  	stateStore        sm.Store
   239  	blockStore        *store.BlockStore // store the blockchain to disk
   240  	bcReactor         p2p.Reactor       // for fast-syncing
   241  	mempoolReactor    *mempl.Reactor    // for gossipping transactions
   242  	mempool           mempl.Mempool
   243  	stateSync         bool                    // whether the node should state sync on startup
   244  	stateSyncReactor  *statesync.Reactor      // for hosting and restoring state sync snapshots
   245  	stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
   246  	stateSyncGenesis  sm.State                // provides the genesis state for state sync
   247  	consensusState    *cs.State               // latest consensus state
   248  	consensusReactor  *cs.Reactor             // for participating in the consensus
   249  	pexReactor        *pex.Reactor            // for exchanging peer addresses
   250  	evidencePool      *evidence.Pool          // tracking evidence
   251  	proxyApp          proxy.AppConns          // connection to the application
   252  	rpcListeners      []net.Listener          // rpc servers
   253  	txIndexer         txindex.TxIndexer
   254  	blockIndexer      indexer.BlockIndexer
   255  	indexerService    *txindex.IndexerService
   256  	prometheusSrv     *http.Server
   257  }
   258  
   259  func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
   260  	var blockStoreDB dbm.DB
   261  	blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
   262  	if err != nil {
   263  		return
   264  	}
   265  	blockStore = store.NewBlockStore(blockStoreDB)
   266  
   267  	stateDB, err = dbProvider(&DBContext{"state", config})
   268  	if err != nil {
   269  		return
   270  	}
   271  
   272  	return
   273  }
   274  
   275  func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
   276  	proxyApp := proxy.NewAppConns(clientCreator)
   277  	proxyApp.SetLogger(logger.With("module", "proxy"))
   278  	if err := proxyApp.Start(); err != nil {
   279  		return nil, fmt.Errorf("error starting proxy app connections: %v", err)
   280  	}
   281  	return proxyApp, nil
   282  }
   283  
   284  func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
   285  	eventBus := types.NewEventBus()
   286  	eventBus.SetLogger(logger.With("module", "events"))
   287  	if err := eventBus.Start(); err != nil {
   288  		return nil, err
   289  	}
   290  	return eventBus, nil
   291  }
   292  
   293  func createAndStartIndexerService(
   294  	config *cfg.Config,
   295  	dbProvider DBProvider,
   296  	eventBus *types.EventBus,
   297  	logger log.Logger,
   298  ) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
   299  
   300  	var (
   301  		txIndexer    txindex.TxIndexer
   302  		blockIndexer indexer.BlockIndexer
   303  	)
   304  
   305  	switch config.TxIndex.Indexer {
   306  	case "kv":
   307  		store, err := dbProvider(&DBContext{"tx_index", config})
   308  		if err != nil {
   309  			return nil, nil, nil, err
   310  		}
   311  
   312  		txIndexer = kv.NewTxIndex(store)
   313  		blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
   314  	default:
   315  		txIndexer = &null.TxIndex{}
   316  		blockIndexer = &blockidxnull.BlockerIndexer{}
   317  	}
   318  
   319  	indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus)
   320  	indexerService.SetLogger(logger.With("module", "txindex"))
   321  
   322  	if err := indexerService.Start(); err != nil {
   323  		return nil, nil, nil, err
   324  	}
   325  
   326  	return indexerService, txIndexer, blockIndexer, nil
   327  }
   328  
   329  func doHandshake(
   330  	stateStore sm.Store,
   331  	state sm.State,
   332  	blockStore sm.BlockStore,
   333  	genDoc *types.GenesisDoc,
   334  	eventBus types.BlockEventPublisher,
   335  	proxyApp proxy.AppConns,
   336  	consensusLogger log.Logger) error {
   337  
   338  	handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
   339  	handshaker.SetLogger(consensusLogger)
   340  	handshaker.SetEventBus(eventBus)
   341  	if err := handshaker.Handshake(proxyApp); err != nil {
   342  		return fmt.Errorf("error during handshake: %v", err)
   343  	}
   344  	return nil
   345  }
   346  
   347  func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
   348  	// Log the version info.
   349  	logger.Info("Version info",
   350  		"software", version.TMCoreSemVer,
   351  		"block", version.BlockProtocol,
   352  		"p2p", version.P2PProtocol,
   353  	)
   354  
   355  	// If the state and software differ in block version, at least log it.
   356  	if state.Version.Consensus.Block != version.BlockProtocol {
   357  		logger.Info("Software and state have different block protocols",
   358  			"software", version.BlockProtocol,
   359  			"state", state.Version.Consensus.Block,
   360  		)
   361  	}
   362  
   363  	addr := pubKey.Address()
   364  	// Log whether this node is a validator or an observer
   365  	if state.Validators.HasAddress(addr) {
   366  		consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
   367  	} else {
   368  		consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
   369  	}
   370  }
   371  
   372  func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
   373  	if state.Validators.Size() > 1 {
   374  		return false
   375  	}
   376  	addr, _ := state.Validators.GetByIndex(0)
   377  	return bytes.Equal(pubKey.Address(), addr)
   378  }
   379  
   380  func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
   381  	state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
   382  
   383  	mempool := mempl.NewCListMempool(
   384  		config.Mempool,
   385  		proxyApp.Mempool(),
   386  		state.LastBlockHeight,
   387  		mempl.WithMetrics(memplMetrics),
   388  		mempl.WithPreCheck(sm.TxPreCheck(state)),
   389  		mempl.WithPostCheck(sm.TxPostCheck(state)),
   390  	)
   391  	mempoolLogger := logger.With("module", "mempool")
   392  	mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
   393  	mempoolReactor.SetLogger(mempoolLogger)
   394  
   395  	if config.Consensus.WaitForTxs() {
   396  		mempool.EnableTxsAvailable()
   397  	}
   398  	return mempoolReactor, mempool
   399  }
   400  
   401  func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
   402  	stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) {
   403  
   404  	evidenceDB, err := dbProvider(&DBContext{"evidence", config})
   405  	if err != nil {
   406  		return nil, nil, err
   407  	}
   408  	evidenceLogger := logger.With("module", "evidence")
   409  	evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore)
   410  	if err != nil {
   411  		return nil, nil, err
   412  	}
   413  	evidenceReactor := evidence.NewReactor(evidencePool)
   414  	evidenceReactor.SetLogger(evidenceLogger)
   415  	return evidenceReactor, evidencePool, nil
   416  }
   417  
   418  func createBlockchainReactor(config *cfg.Config,
   419  	state sm.State,
   420  	blockExec *sm.BlockExecutor,
   421  	blockStore *store.BlockStore,
   422  	fastSync bool,
   423  	logger log.Logger) (bcReactor p2p.Reactor, err error) {
   424  
   425  	switch config.FastSync.Version {
   426  	case "v0":
   427  		bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   428  	case "v1":
   429  		bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   430  	case "v2":
   431  		bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
   432  	default:
   433  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
   434  	}
   435  
   436  	bcReactor.SetLogger(logger.With("module", "blockchain"))
   437  	return bcReactor, nil
   438  }
   439  
   440  func createConsensusReactor(config *cfg.Config,
   441  	state sm.State,
   442  	blockExec *sm.BlockExecutor,
   443  	blockStore sm.BlockStore,
   444  	mempool *mempl.CListMempool,
   445  	evidencePool *evidence.Pool,
   446  	privValidator types.PrivValidator,
   447  	csMetrics *consensus.Metrics,
   448  	waitSync bool,
   449  	eventBus *types.EventBus,
   450  	consensusLogger log.Logger,
   451  	misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) {
   452  
   453  	consensusState := cs.NewState(
   454  		config.Consensus,
   455  		state.Copy(),
   456  		blockExec,
   457  		blockStore,
   458  		mempool,
   459  		evidencePool,
   460  		misbehaviors,
   461  		cs.StateMetrics(csMetrics),
   462  	)
   463  	consensusState.SetLogger(consensusLogger)
   464  	if privValidator != nil {
   465  		consensusState.SetPrivValidator(privValidator)
   466  	}
   467  	consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
   468  	consensusReactor.SetLogger(consensusLogger)
   469  	// services which will be publishing and/or subscribing for messages (events)
   470  	// consensusReactor will set it on consensusState and blockExecutor
   471  	consensusReactor.SetEventBus(eventBus)
   472  	return consensusReactor, consensusState
   473  }
   474  
   475  func createTransport(
   476  	config *cfg.Config,
   477  	nodeInfo p2p.NodeInfo,
   478  	nodeKey *p2p.NodeKey,
   479  	proxyApp proxy.AppConns,
   480  ) (
   481  	*p2p.MultiplexTransport,
   482  	[]p2p.PeerFilterFunc,
   483  ) {
   484  	var (
   485  		mConnConfig = p2p.MConnConfig(config.P2P)
   486  		transport   = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
   487  		connFilters = []p2p.ConnFilterFunc{}
   488  		peerFilters = []p2p.PeerFilterFunc{}
   489  	)
   490  
   491  	if !config.P2P.AllowDuplicateIP {
   492  		connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
   493  	}
   494  
   495  	// Filter peers by addr or pubkey with an ABCI query.
   496  	// If the query return code is OK, add peer.
   497  	if config.FilterPeers {
   498  		connFilters = append(
   499  			connFilters,
   500  			// ABCI query for address filtering.
   501  			func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
   502  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   503  					Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
   504  				})
   505  				if err != nil {
   506  					return err
   507  				}
   508  				if res.IsErr() {
   509  					return fmt.Errorf("error querying abci app: %v", res)
   510  				}
   511  
   512  				return nil
   513  			},
   514  		)
   515  
   516  		peerFilters = append(
   517  			peerFilters,
   518  			// ABCI query for ID filtering.
   519  			func(_ p2p.IPeerSet, p p2p.Peer) error {
   520  				res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
   521  					Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
   522  				})
   523  				if err != nil {
   524  					return err
   525  				}
   526  				if res.IsErr() {
   527  					return fmt.Errorf("error querying abci app: %v", res)
   528  				}
   529  
   530  				return nil
   531  			},
   532  		)
   533  	}
   534  
   535  	p2p.MultiplexTransportConnFilters(connFilters...)(transport)
   536  
   537  	// Limit the number of incoming connections.
   538  	max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   539  	p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
   540  
   541  	return transport, peerFilters
   542  }
   543  
   544  func createSwitch(config *cfg.Config,
   545  	transport p2p.Transport,
   546  	p2pMetrics *p2p.Metrics,
   547  	peerFilters []p2p.PeerFilterFunc,
   548  	mempoolReactor *mempl.Reactor,
   549  	bcReactor p2p.Reactor,
   550  	stateSyncReactor *statesync.Reactor,
   551  	consensusReactor *cs.Reactor,
   552  	evidenceReactor *evidence.Reactor,
   553  	nodeInfo p2p.NodeInfo,
   554  	nodeKey *p2p.NodeKey,
   555  	p2pLogger log.Logger) *p2p.Switch {
   556  
   557  	sw := p2p.NewSwitch(
   558  		config.P2P,
   559  		transport,
   560  		p2p.WithMetrics(p2pMetrics),
   561  		p2p.SwitchPeerFilters(peerFilters...),
   562  	)
   563  	sw.SetLogger(p2pLogger)
   564  	sw.AddReactor("MEMPOOL", mempoolReactor)
   565  	sw.AddReactor("BLOCKCHAIN", bcReactor)
   566  	sw.AddReactor("CONSENSUS", consensusReactor)
   567  	sw.AddReactor("EVIDENCE", evidenceReactor)
   568  	sw.AddReactor("STATESYNC", stateSyncReactor)
   569  
   570  	sw.SetNodeInfo(nodeInfo)
   571  	sw.SetNodeKey(nodeKey)
   572  
   573  	p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
   574  	return sw
   575  }
   576  
   577  func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
   578  	p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
   579  
   580  	addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
   581  	addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
   582  
   583  	// Add ourselves to addrbook to prevent dialing ourselves
   584  	if config.P2P.ExternalAddress != "" {
   585  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
   586  		if err != nil {
   587  			return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
   588  		}
   589  		addrBook.AddOurAddress(addr)
   590  	}
   591  	if config.P2P.ListenAddress != "" {
   592  		addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
   593  		if err != nil {
   594  			return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
   595  		}
   596  		addrBook.AddOurAddress(addr)
   597  	}
   598  
   599  	sw.SetAddrBook(addrBook)
   600  
   601  	return addrBook, nil
   602  }
   603  
   604  func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
   605  	sw *p2p.Switch, logger log.Logger) *pex.Reactor {
   606  
   607  	// TODO persistent peers ? so we can have their DNS addrs saved
   608  	pexReactor := pex.NewReactor(addrBook,
   609  		&pex.ReactorConfig{
   610  			Seeds:    splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
   611  			SeedMode: config.P2P.SeedMode,
   612  			// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
   613  			// blocks assuming 10s blocks ~ 28 hours.
   614  			// TODO (melekes): make it dynamic based on the actual block latencies
   615  			// from the live network.
   616  			// https://github.com/soomindae/tendermint/issues/3523
   617  			SeedDisconnectWaitPeriod:     28 * time.Hour,
   618  			PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
   619  		})
   620  	pexReactor.SetLogger(logger.With("module", "pex"))
   621  	sw.AddReactor("PEX", pexReactor)
   622  	return pexReactor
   623  }
   624  
   625  // startStateSync starts an asynchronous state sync process, then switches to fast sync mode.
   626  func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor,
   627  	stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool,
   628  	stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error {
   629  	ssR.Logger.Info("Starting state sync")
   630  
   631  	if stateProvider == nil {
   632  		var err error
   633  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   634  		defer cancel()
   635  		stateProvider, err = statesync.NewLightClientStateProvider(
   636  			ctx,
   637  			state.ChainID, state.Version, state.InitialHeight,
   638  			config.RPCServers, light.TrustOptions{
   639  				Period: config.TrustPeriod,
   640  				Height: config.TrustHeight,
   641  				Hash:   config.TrustHashBytes(),
   642  			}, ssR.Logger.With("module", "light"))
   643  		if err != nil {
   644  			return fmt.Errorf("failed to set up light client state provider: %w", err)
   645  		}
   646  	}
   647  
   648  	go func() {
   649  		state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime)
   650  		if err != nil {
   651  			ssR.Logger.Error("State sync failed", "err", err)
   652  			return
   653  		}
   654  		err = stateStore.Bootstrap(state)
   655  		if err != nil {
   656  			ssR.Logger.Error("Failed to bootstrap node with new state", "err", err)
   657  			return
   658  		}
   659  		err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit)
   660  		if err != nil {
   661  			ssR.Logger.Error("Failed to store last seen commit", "err", err)
   662  			return
   663  		}
   664  
   665  		if fastSync {
   666  			// FIXME Very ugly to have these metrics bleed through here.
   667  			conR.Metrics.StateSyncing.Set(0)
   668  			conR.Metrics.FastSyncing.Set(1)
   669  			err = bcR.SwitchToFastSync(state)
   670  			if err != nil {
   671  				ssR.Logger.Error("Failed to switch to fast sync", "err", err)
   672  				return
   673  			}
   674  		} else {
   675  			conR.SwitchToConsensus(state, true)
   676  		}
   677  	}()
   678  	return nil
   679  }
   680  
   681  // NewNode returns a new, ready to go, Tendermint Node.
   682  func NewNode(config *cfg.Config,
   683  	privValidator types.PrivValidator,
   684  	nodeKey *p2p.NodeKey,
   685  	clientCreator proxy.ClientCreator,
   686  	genesisDocProvider GenesisDocProvider,
   687  	dbProvider DBProvider,
   688  	metricsProvider MetricsProvider,
   689  	logger log.Logger,
   690  	misbehaviors map[int64]cs.Misbehavior,
   691  	options ...Option) (*Node, error) {
   692  
   693  	blockStore, stateDB, err := initDBs(config, dbProvider)
   694  	if err != nil {
   695  		return nil, err
   696  	}
   697  
   698  	stateStore := sm.NewStore(stateDB)
   699  
   700  	state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
   701  	if err != nil {
   702  		return nil, err
   703  	}
   704  
   705  	// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
   706  	proxyApp, err := createAndStartProxyAppConns(clientCreator, logger)
   707  	if err != nil {
   708  		return nil, err
   709  	}
   710  
   711  	// EventBus and IndexerService must be started before the handshake because
   712  	// we might need to index the txs of the replayed block as this might not have happened
   713  	// when the node stopped last time (i.e. the node stopped after it saved the block
   714  	// but before it indexed the txs, or, endblocker panicked)
   715  	eventBus, err := createAndStartEventBus(logger)
   716  	if err != nil {
   717  		return nil, err
   718  	}
   719  
   720  	indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger)
   721  	if err != nil {
   722  		return nil, err
   723  	}
   724  
   725  	// If an address is provided, listen on the socket for a connection from an
   726  	// external signing process.
   727  	if config.PrivValidatorListenAddr != "" {
   728  		// FIXME: we should start services inside OnStart
   729  		privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
   730  		if err != nil {
   731  			return nil, fmt.Errorf("error with private validator socket client: %w", err)
   732  		}
   733  	}
   734  
   735  	pubKey, err := privValidator.GetPubKey()
   736  	if err != nil {
   737  		return nil, fmt.Errorf("can't get pubkey: %w", err)
   738  	}
   739  
   740  	// Determine whether we should do state and/or fast sync.
   741  	// We don't fast-sync when the only validator is us.
   742  	fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey)
   743  	stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
   744  	if stateSync && state.LastBlockHeight > 0 {
   745  		logger.Info("Found local state with non-zero height, skipping state sync")
   746  		stateSync = false
   747  	}
   748  
   749  	// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
   750  	// and replays any blocks as necessary to sync tendermint with the app.
   751  	consensusLogger := logger.With("module", "consensus")
   752  	if !stateSync {
   753  		if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
   754  			return nil, err
   755  		}
   756  
   757  		// Reload the state. It will have the Version.Consensus.App set by the
   758  		// Handshake, and may have other modifications as well (ie. depending on
   759  		// what happened during block replay).
   760  		state, err = stateStore.Load()
   761  		if err != nil {
   762  			return nil, fmt.Errorf("cannot load state: %w", err)
   763  		}
   764  	}
   765  
   766  	logNodeStartupInfo(state, pubKey, logger, consensusLogger)
   767  
   768  	csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
   769  
   770  	// Make MempoolReactor
   771  	mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
   772  
   773  	// Make Evidence Reactor
   774  	evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
   775  	if err != nil {
   776  		return nil, err
   777  	}
   778  
   779  	// make block executor for consensus and blockchain reactors to execute blocks
   780  	blockExec := sm.NewBlockExecutor(
   781  		stateStore,
   782  		logger.With("module", "state"),
   783  		proxyApp.Consensus(),
   784  		mempool,
   785  		evidencePool,
   786  		sm.BlockExecutorWithMetrics(smMetrics),
   787  	)
   788  
   789  	// Make BlockchainReactor. Don't start fast sync if we're doing a state sync first.
   790  	bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger)
   791  	if err != nil {
   792  		return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
   793  	}
   794  
   795  	// Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first.
   796  	// FIXME We need to update metrics here, since other reactors don't have access to them.
   797  	if stateSync {
   798  		csMetrics.StateSyncing.Set(1)
   799  	} else if fastSync {
   800  		csMetrics.FastSyncing.Set(1)
   801  	}
   802  
   803  	logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors)
   804  	consensusReactor, consensusState := createConsensusReactor(
   805  		config, state, blockExec, blockStore, mempool, evidencePool,
   806  		privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors)
   807  
   808  	// Set up state sync reactor, and schedule a sync if requested.
   809  	// FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy,
   810  	// we should clean this whole thing up. See:
   811  	// https://github.com/soomindae/tendermint/issues/4644
   812  	stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(),
   813  		config.StateSync.TempDir)
   814  	stateSyncReactor.SetLogger(logger.With("module", "statesync"))
   815  
   816  	nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
   817  	if err != nil {
   818  		return nil, err
   819  	}
   820  
   821  	// Setup Transport.
   822  	transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
   823  
   824  	// Setup Switch.
   825  	p2pLogger := logger.With("module", "p2p")
   826  	sw := createSwitch(
   827  		config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
   828  		stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
   829  	)
   830  
   831  	err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
   832  	if err != nil {
   833  		return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
   834  	}
   835  
   836  	err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " "))
   837  	if err != nil {
   838  		return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
   839  	}
   840  
   841  	addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
   842  	if err != nil {
   843  		return nil, fmt.Errorf("could not create addrbook: %w", err)
   844  	}
   845  
   846  	// Optionally, start the pex reactor
   847  	//
   848  	// TODO:
   849  	//
   850  	// We need to set Seeds and PersistentPeers on the switch,
   851  	// since it needs to be able to use these (and their DNS names)
   852  	// even if the PEX is off. We can include the DNS name in the NetAddress,
   853  	// but it would still be nice to have a clear list of the current "PersistentPeers"
   854  	// somewhere that we can return with net_info.
   855  	//
   856  	// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
   857  	// Note we currently use the addrBook regardless at least for AddOurAddress
   858  	var pexReactor *pex.Reactor
   859  	if config.P2P.PexReactor {
   860  		pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
   861  	}
   862  
   863  	if config.RPC.PprofListenAddress != "" {
   864  		go func() {
   865  			logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress)
   866  			logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil))
   867  		}()
   868  	}
   869  
   870  	node := &Node{
   871  		config:        config,
   872  		genesisDoc:    genDoc,
   873  		privValidator: privValidator,
   874  
   875  		transport: transport,
   876  		sw:        sw,
   877  		addrBook:  addrBook,
   878  		nodeInfo:  nodeInfo,
   879  		nodeKey:   nodeKey,
   880  
   881  		stateStore:       stateStore,
   882  		blockStore:       blockStore,
   883  		bcReactor:        bcReactor,
   884  		mempoolReactor:   mempoolReactor,
   885  		mempool:          mempool,
   886  		consensusState:   consensusState,
   887  		consensusReactor: consensusReactor,
   888  		stateSyncReactor: stateSyncReactor,
   889  		stateSync:        stateSync,
   890  		stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
   891  		pexReactor:       pexReactor,
   892  		evidencePool:     evidencePool,
   893  		proxyApp:         proxyApp,
   894  		txIndexer:        txIndexer,
   895  		indexerService:   indexerService,
   896  		blockIndexer:     blockIndexer,
   897  		eventBus:         eventBus,
   898  	}
   899  	node.BaseService = *service.NewBaseService(logger, "Node", node)
   900  
   901  	for _, option := range options {
   902  		option(node)
   903  	}
   904  
   905  	return node, nil
   906  }
   907  
   908  // OnStart starts the Node. It implements service.Service.
   909  func (n *Node) OnStart() error {
   910  	now := tmtime.Now()
   911  	genTime := n.genesisDoc.GenesisTime
   912  	if genTime.After(now) {
   913  		n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
   914  		time.Sleep(genTime.Sub(now))
   915  	}
   916  
   917  	// Add private IDs to addrbook to block those peers being added
   918  	n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
   919  
   920  	// Start the RPC server before the P2P server
   921  	// so we can eg. receive txs for the first block
   922  	if n.config.RPC.ListenAddress != "" {
   923  		listeners, err := n.startRPC()
   924  		if err != nil {
   925  			return err
   926  		}
   927  		n.rpcListeners = listeners
   928  	}
   929  
   930  	if n.config.Instrumentation.Prometheus &&
   931  		n.config.Instrumentation.PrometheusListenAddr != "" {
   932  		n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
   933  	}
   934  
   935  	// Start the transport.
   936  	addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
   937  	if err != nil {
   938  		return err
   939  	}
   940  	if err := n.transport.Listen(*addr); err != nil {
   941  		return err
   942  	}
   943  
   944  	n.isListening = true
   945  
   946  	if n.config.Mempool.WalEnabled() {
   947  		err = n.mempool.InitWAL()
   948  		if err != nil {
   949  			return fmt.Errorf("init mempool WAL: %w", err)
   950  		}
   951  	}
   952  
   953  	// Start the switch (the P2P server).
   954  	err = n.sw.Start()
   955  	if err != nil {
   956  		return err
   957  	}
   958  
   959  	// Always connect to persistent peers
   960  	err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
   961  	if err != nil {
   962  		return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
   963  	}
   964  
   965  	// Run state sync
   966  	if n.stateSync {
   967  		bcR, ok := n.bcReactor.(fastSyncReactor)
   968  		if !ok {
   969  			return fmt.Errorf("this blockchain reactor does not support switching from state sync")
   970  		}
   971  		err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
   972  			n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
   973  		if err != nil {
   974  			return fmt.Errorf("failed to start state sync: %w", err)
   975  		}
   976  	}
   977  
   978  	return nil
   979  }
   980  
   981  // OnStop stops the Node. It implements service.Service.
   982  func (n *Node) OnStop() {
   983  	n.BaseService.OnStop()
   984  
   985  	n.Logger.Info("Stopping Node")
   986  
   987  	// first stop the non-reactor services
   988  	if err := n.eventBus.Stop(); err != nil {
   989  		n.Logger.Error("Error closing eventBus", "err", err)
   990  	}
   991  	if err := n.indexerService.Stop(); err != nil {
   992  		n.Logger.Error("Error closing indexerService", "err", err)
   993  	}
   994  
   995  	// now stop the reactors
   996  	if err := n.sw.Stop(); err != nil {
   997  		n.Logger.Error("Error closing switch", "err", err)
   998  	}
   999  
  1000  	// stop mempool WAL
  1001  	if n.config.Mempool.WalEnabled() {
  1002  		n.mempool.CloseWAL()
  1003  	}
  1004  
  1005  	if err := n.transport.Close(); err != nil {
  1006  		n.Logger.Error("Error closing transport", "err", err)
  1007  	}
  1008  
  1009  	n.isListening = false
  1010  
  1011  	// finally stop the listeners / external services
  1012  	for _, l := range n.rpcListeners {
  1013  		n.Logger.Info("Closing rpc listener", "listener", l)
  1014  		if err := l.Close(); err != nil {
  1015  			n.Logger.Error("Error closing listener", "listener", l, "err", err)
  1016  		}
  1017  	}
  1018  
  1019  	if pvsc, ok := n.privValidator.(service.Service); ok {
  1020  		if err := pvsc.Stop(); err != nil {
  1021  			n.Logger.Error("Error closing private validator", "err", err)
  1022  		}
  1023  	}
  1024  
  1025  	if n.prometheusSrv != nil {
  1026  		if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  1027  			// Error from closing listeners, or context timeout:
  1028  			n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  1029  		}
  1030  	}
  1031  }
  1032  
  1033  // ConfigureRPC makes sure RPC has all the objects it needs to operate.
  1034  func (n *Node) ConfigureRPC() error {
  1035  	pubKey, err := n.privValidator.GetPubKey()
  1036  	if err != nil {
  1037  		return fmt.Errorf("can't get pubkey: %w", err)
  1038  	}
  1039  	rpccore.SetEnvironment(&rpccore.Environment{
  1040  		ProxyAppQuery:   n.proxyApp.Query(),
  1041  		ProxyAppMempool: n.proxyApp.Mempool(),
  1042  
  1043  		StateStore:     n.stateStore,
  1044  		BlockStore:     n.blockStore,
  1045  		EvidencePool:   n.evidencePool,
  1046  		ConsensusState: n.consensusState,
  1047  		P2PPeers:       n.sw,
  1048  		P2PTransport:   n,
  1049  
  1050  		PubKey:           pubKey,
  1051  		GenDoc:           n.genesisDoc,
  1052  		TxIndexer:        n.txIndexer,
  1053  		BlockIndexer:     n.blockIndexer,
  1054  		ConsensusReactor: &consensus.Reactor{},
  1055  		EventBus:         n.eventBus,
  1056  		Mempool:          n.mempool,
  1057  
  1058  		Logger: n.Logger.With("module", "rpc"),
  1059  
  1060  		Config: *n.config.RPC,
  1061  	})
  1062  	return nil
  1063  }
  1064  
  1065  func (n *Node) startRPC() ([]net.Listener, error) {
  1066  	err := n.ConfigureRPC()
  1067  	if err != nil {
  1068  		return nil, err
  1069  	}
  1070  
  1071  	listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  1072  
  1073  	if n.config.RPC.Unsafe {
  1074  		rpccore.AddUnsafeRoutes()
  1075  	}
  1076  
  1077  	config := rpcserver.DefaultConfig()
  1078  	config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1079  	config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1080  	config.MaxOpenConnections = n.config.RPC.MaxOpenConnections
  1081  	// If necessary adjust global WriteTimeout to ensure it's greater than
  1082  	// TimeoutBroadcastTxCommit.
  1083  	// See https://github.com/soomindae/tendermint/issues/3435
  1084  	if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1085  		config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1086  	}
  1087  
  1088  	// we may expose the rpc over both a unix and tcp socket
  1089  	listeners := make([]net.Listener, len(listenAddrs))
  1090  	for i, listenAddr := range listenAddrs {
  1091  		mux := http.NewServeMux()
  1092  		rpcLogger := n.Logger.With("module", "rpc-server")
  1093  		wmLogger := rpcLogger.With("protocol", "websocket")
  1094  		wm := rpcserver.NewWebsocketManager(rpccore.Routes,
  1095  			rpcserver.OnDisconnect(func(remoteAddr string) {
  1096  				err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
  1097  				if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
  1098  					wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
  1099  				}
  1100  			}),
  1101  			rpcserver.ReadLimit(config.MaxBodyBytes),
  1102  		)
  1103  		wm.SetLogger(wmLogger)
  1104  		mux.HandleFunc("/websocket", wm.WebsocketHandler)
  1105  		rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
  1106  		listener, err := rpcserver.Listen(
  1107  			listenAddr,
  1108  			config,
  1109  		)
  1110  		if err != nil {
  1111  			return nil, err
  1112  		}
  1113  
  1114  		var rootHandler http.Handler = mux
  1115  		if n.config.RPC.IsCorsEnabled() {
  1116  			corsMiddleware := cors.New(cors.Options{
  1117  				AllowedOrigins: n.config.RPC.CORSAllowedOrigins,
  1118  				AllowedMethods: n.config.RPC.CORSAllowedMethods,
  1119  				AllowedHeaders: n.config.RPC.CORSAllowedHeaders,
  1120  			})
  1121  			rootHandler = corsMiddleware.Handler(mux)
  1122  		}
  1123  		if n.config.RPC.IsTLSEnabled() {
  1124  			go func() {
  1125  				if err := rpcserver.ServeTLS(
  1126  					listener,
  1127  					rootHandler,
  1128  					n.config.RPC.CertFile(),
  1129  					n.config.RPC.KeyFile(),
  1130  					rpcLogger,
  1131  					config,
  1132  				); err != nil {
  1133  					n.Logger.Error("Error serving server with TLS", "err", err)
  1134  				}
  1135  			}()
  1136  		} else {
  1137  			go func() {
  1138  				if err := rpcserver.Serve(
  1139  					listener,
  1140  					rootHandler,
  1141  					rpcLogger,
  1142  					config,
  1143  				); err != nil {
  1144  					n.Logger.Error("Error serving server", "err", err)
  1145  				}
  1146  			}()
  1147  		}
  1148  
  1149  		listeners[i] = listener
  1150  	}
  1151  
  1152  	// we expose a simplified api over grpc for convenience to app devs
  1153  	grpcListenAddr := n.config.RPC.GRPCListenAddress
  1154  	if grpcListenAddr != "" {
  1155  		config := rpcserver.DefaultConfig()
  1156  		config.MaxBodyBytes = n.config.RPC.MaxBodyBytes
  1157  		config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes
  1158  		// NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections
  1159  		config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections
  1160  		// If necessary adjust global WriteTimeout to ensure it's greater than
  1161  		// TimeoutBroadcastTxCommit.
  1162  		// See https://github.com/soomindae/tendermint/issues/3435
  1163  		if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
  1164  			config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
  1165  		}
  1166  		listener, err := rpcserver.Listen(grpcListenAddr, config)
  1167  		if err != nil {
  1168  			return nil, err
  1169  		}
  1170  		go func() {
  1171  			if err := grpccore.StartGRPCServer(listener); err != nil {
  1172  				n.Logger.Error("Error starting gRPC server", "err", err)
  1173  			}
  1174  		}()
  1175  		listeners = append(listeners, listener)
  1176  	}
  1177  
  1178  	return listeners, nil
  1179  }
  1180  
  1181  // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  1182  // collectors on addr.
  1183  func (n *Node) startPrometheusServer(addr string) *http.Server {
  1184  	srv := &http.Server{
  1185  		Addr: addr,
  1186  		Handler: promhttp.InstrumentMetricHandler(
  1187  			prometheus.DefaultRegisterer, promhttp.HandlerFor(
  1188  				prometheus.DefaultGatherer,
  1189  				promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  1190  			),
  1191  		),
  1192  	}
  1193  	go func() {
  1194  		if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  1195  			// Error starting or closing listener:
  1196  			n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  1197  		}
  1198  	}()
  1199  	return srv
  1200  }
  1201  
  1202  // Switch returns the Node's Switch.
  1203  func (n *Node) Switch() *p2p.Switch {
  1204  	return n.sw
  1205  }
  1206  
  1207  // BlockStore returns the Node's BlockStore.
  1208  func (n *Node) BlockStore() *store.BlockStore {
  1209  	return n.blockStore
  1210  }
  1211  
  1212  // ConsensusState returns the Node's ConsensusState.
  1213  func (n *Node) ConsensusState() *cs.State {
  1214  	return n.consensusState
  1215  }
  1216  
  1217  // ConsensusReactor returns the Node's ConsensusReactor.
  1218  func (n *Node) ConsensusReactor() *cs.Reactor {
  1219  	return n.consensusReactor
  1220  }
  1221  
  1222  // MempoolReactor returns the Node's mempool reactor.
  1223  func (n *Node) MempoolReactor() *mempl.Reactor {
  1224  	return n.mempoolReactor
  1225  }
  1226  
  1227  // Mempool returns the Node's mempool.
  1228  func (n *Node) Mempool() mempl.Mempool {
  1229  	return n.mempool
  1230  }
  1231  
  1232  // PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled.
  1233  func (n *Node) PEXReactor() *pex.Reactor {
  1234  	return n.pexReactor
  1235  }
  1236  
  1237  // EvidencePool returns the Node's EvidencePool.
  1238  func (n *Node) EvidencePool() *evidence.Pool {
  1239  	return n.evidencePool
  1240  }
  1241  
  1242  // EventBus returns the Node's EventBus.
  1243  func (n *Node) EventBus() *types.EventBus {
  1244  	return n.eventBus
  1245  }
  1246  
  1247  // PrivValidator returns the Node's PrivValidator.
  1248  // XXX: for convenience only!
  1249  func (n *Node) PrivValidator() types.PrivValidator {
  1250  	return n.privValidator
  1251  }
  1252  
  1253  // GenesisDoc returns the Node's GenesisDoc.
  1254  func (n *Node) GenesisDoc() *types.GenesisDoc {
  1255  	return n.genesisDoc
  1256  }
  1257  
  1258  // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  1259  func (n *Node) ProxyApp() proxy.AppConns {
  1260  	return n.proxyApp
  1261  }
  1262  
  1263  // Config returns the Node's config.
  1264  func (n *Node) Config() *cfg.Config {
  1265  	return n.config
  1266  }
  1267  
  1268  //------------------------------------------------------------------------------
  1269  
  1270  func (n *Node) Listeners() []string {
  1271  	return []string{
  1272  		fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  1273  	}
  1274  }
  1275  
  1276  func (n *Node) IsListening() bool {
  1277  	return n.isListening
  1278  }
  1279  
  1280  // NodeInfo returns the Node's Info from the Switch.
  1281  func (n *Node) NodeInfo() p2p.NodeInfo {
  1282  	return n.nodeInfo
  1283  }
  1284  
  1285  func makeNodeInfo(
  1286  	config *cfg.Config,
  1287  	nodeKey *p2p.NodeKey,
  1288  	txIndexer txindex.TxIndexer,
  1289  	genDoc *types.GenesisDoc,
  1290  	state sm.State,
  1291  ) (p2p.NodeInfo, error) {
  1292  	txIndexerStatus := "on"
  1293  	if _, ok := txIndexer.(*null.TxIndex); ok {
  1294  		txIndexerStatus = "off"
  1295  	}
  1296  
  1297  	var bcChannel byte
  1298  	switch config.FastSync.Version {
  1299  	case "v0":
  1300  		bcChannel = bcv0.BlockchainChannel
  1301  	case "v1":
  1302  		bcChannel = bcv1.BlockchainChannel
  1303  	case "v2":
  1304  		bcChannel = bcv2.BlockchainChannel
  1305  	default:
  1306  		return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  1307  	}
  1308  
  1309  	nodeInfo := p2p.DefaultNodeInfo{
  1310  		ProtocolVersion: p2p.NewProtocolVersion(
  1311  			version.P2PProtocol, // global
  1312  			state.Version.Consensus.Block,
  1313  			state.Version.Consensus.App,
  1314  		),
  1315  		DefaultNodeID: nodeKey.ID(),
  1316  		Network:       genDoc.ChainID,
  1317  		Version:       version.TMCoreSemVer,
  1318  		Channels: []byte{
  1319  			bcChannel,
  1320  			cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  1321  			mempl.MempoolChannel,
  1322  			evidence.EvidenceChannel,
  1323  			statesync.SnapshotChannel, statesync.ChunkChannel,
  1324  		},
  1325  		Moniker: config.Moniker,
  1326  		Other: p2p.DefaultNodeInfoOther{
  1327  			TxIndex:    txIndexerStatus,
  1328  			RPCAddress: config.RPC.ListenAddress,
  1329  		},
  1330  	}
  1331  
  1332  	if config.P2P.PexReactor {
  1333  		nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  1334  	}
  1335  
  1336  	lAddr := config.P2P.ExternalAddress
  1337  
  1338  	if lAddr == "" {
  1339  		lAddr = config.P2P.ListenAddress
  1340  	}
  1341  
  1342  	nodeInfo.ListenAddr = lAddr
  1343  
  1344  	err := nodeInfo.Validate()
  1345  	return nodeInfo, err
  1346  }
  1347  
  1348  //------------------------------------------------------------------------------
  1349  
  1350  var (
  1351  	genesisDocKey = []byte("genesisDoc")
  1352  )
  1353  
  1354  // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the
  1355  // database, or creates one using the given genesisDocProvider and persists the
  1356  // result to the database. On success this also returns the genesis doc loaded
  1357  // through the given provider.
  1358  func LoadStateFromDBOrGenesisDocProvider(
  1359  	stateDB dbm.DB,
  1360  	genesisDocProvider GenesisDocProvider,
  1361  ) (sm.State, *types.GenesisDoc, error) {
  1362  	// Get genesis doc
  1363  	genDoc, err := loadGenesisDoc(stateDB)
  1364  	if err != nil {
  1365  		genDoc, err = genesisDocProvider()
  1366  		if err != nil {
  1367  			return sm.State{}, nil, err
  1368  		}
  1369  		// save genesis doc to prevent a certain class of user errors (e.g. when it
  1370  		// was changed, accidentally or not). Also good for audit trail.
  1371  		saveGenesisDoc(stateDB, genDoc)
  1372  	}
  1373  	stateStore := sm.NewStore(stateDB)
  1374  	state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
  1375  	if err != nil {
  1376  		return sm.State{}, nil, err
  1377  	}
  1378  	return state, genDoc, nil
  1379  }
  1380  
  1381  // panics if failed to unmarshal bytes
  1382  func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  1383  	b, err := db.Get(genesisDocKey)
  1384  	if err != nil {
  1385  		panic(err)
  1386  	}
  1387  	if len(b) == 0 {
  1388  		return nil, errors.New("genesis doc not found")
  1389  	}
  1390  	var genDoc *types.GenesisDoc
  1391  	err = tmjson.Unmarshal(b, &genDoc)
  1392  	if err != nil {
  1393  		panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b))
  1394  	}
  1395  	return genDoc, nil
  1396  }
  1397  
  1398  // panics if failed to marshal the given genesis document
  1399  func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  1400  	b, err := tmjson.Marshal(genDoc)
  1401  	if err != nil {
  1402  		panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  1403  	}
  1404  	if err := db.SetSync(genesisDocKey, b); err != nil {
  1405  		panic(fmt.Sprintf("Failed to save genesis doc: %v", err))
  1406  	}
  1407  }
  1408  
  1409  func createAndStartPrivValidatorSocketClient(
  1410  	listenAddr,
  1411  	chainID string,
  1412  	logger log.Logger,
  1413  ) (types.PrivValidator, error) {
  1414  	pve, err := privval.NewSignerListener(listenAddr, logger)
  1415  	if err != nil {
  1416  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1417  	}
  1418  
  1419  	pvsc, err := privval.NewSignerClient(pve, chainID)
  1420  	if err != nil {
  1421  		return nil, fmt.Errorf("failed to start private validator: %w", err)
  1422  	}
  1423  
  1424  	// try to get a pubkey from private validate first time
  1425  	_, err = pvsc.GetPubKey()
  1426  	if err != nil {
  1427  		return nil, fmt.Errorf("can't get pubkey: %w", err)
  1428  	}
  1429  
  1430  	const (
  1431  		retries = 50 // 50 * 100ms = 5s total
  1432  		timeout = 100 * time.Millisecond
  1433  	)
  1434  	pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
  1435  
  1436  	return pvscWithRetries, nil
  1437  }
  1438  
  1439  // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  1440  // slice of the string s with all leading and trailing Unicode code points
  1441  // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  1442  // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  1443  // -1.  also filter out empty strings, only return non-empty strings.
  1444  func splitAndTrimEmpty(s, sep, cutset string) []string {
  1445  	if s == "" {
  1446  		return []string{}
  1447  	}
  1448  
  1449  	spl := strings.Split(s, sep)
  1450  	nonEmptyStrings := make([]string, 0, len(spl))
  1451  	for i := 0; i < len(spl); i++ {
  1452  		element := strings.Trim(spl[i], cutset)
  1453  		if element != "" {
  1454  			nonEmptyStrings = append(nonEmptyStrings, element)
  1455  		}
  1456  	}
  1457  	return nonEmptyStrings
  1458  }